Java Code Examples for backtype.storm.Config#get()

The following examples show how to use backtype.storm.Config#get() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: JStormHelper.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public static void runTopologyRemotely(StormTopology topology, String topologyName, Config conf,
                                       int runtimeInSeconds, Callback callback) throws Exception {
    if (conf.get(Config.TOPOLOGY_WORKERS) == null) {
        conf.setNumWorkers(3);
    }

    StormSubmitter.submitTopology(topologyName, conf, topology);

    if (JStormUtils.parseBoolean(conf.get("RUN_LONG_TIME"), false)) {
        LOG.info(topologyName + " will run long time");
        return;
    }

    if (runtimeInSeconds < 120) {
        JStormUtils.sleepMs(120 * 1000);
    } else {
        JStormUtils.sleepMs(runtimeInSeconds * 1000);
    }

    if (callback != null) {
        callback.execute(topologyName);
    }

    killTopology(conf, topologyName);
}
 
Example 2
Source File: SequenceTopologyTool.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public void SetRemoteTopology() throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException {
    Config conf = getConf();
    StormTopology topology = buildTopology();
    
    conf.put(Config.STORM_CLUSTER_MODE, "distributed");
    String streamName = (String) conf.get(Config.TOPOLOGY_NAME);
    if (streamName == null) {
        streamName = "SequenceTest";
    }
    
    if (streamName.contains("zeromq")) {
        conf.put(Config.STORM_MESSAGING_TRANSPORT, "com.alibaba.jstorm.message.zeroMq.MQContext");
        
    } else {
        conf.put(Config.STORM_MESSAGING_TRANSPORT, "com.alibaba.jstorm.message.netty.NettyContext");
    }
    
    StormSubmitter.submitTopology(streamName, conf, topology);
    
}
 
Example 3
Source File: BenchmarkUtils.java    From storm-benchmark with Apache License 2.0 5 votes vote down vote up
public static boolean ifAckEnabled(Config config) {
  Object ackers = config.get(Config.TOPOLOGY_ACKER_EXECUTORS);
  if (null == ackers) {
    LOG.warn("acker executors are null");
    return false;
  }
  return Utils.getInt(ackers) > 0;
}
 
Example 4
Source File: StormTopologySubmitter.java    From incubator-samoa with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws IOException {
  Properties props = StormSamoaUtils.getProperties();

  String uploadedJarLocation = props.getProperty(StormJarSubmitter.UPLOADED_JAR_LOCATION_KEY);
  if (uploadedJarLocation == null) {
    logger.error("Invalid properties file. It must have key {}",
        StormJarSubmitter.UPLOADED_JAR_LOCATION_KEY);
    return;
  }

  List<String> tmpArgs = new ArrayList<String>(Arrays.asList(args));
  int numWorkers = StormSamoaUtils.numWorkers(tmpArgs);

  args = tmpArgs.toArray(new String[0]);
  StormTopology stormTopo = StormSamoaUtils.argsToTopology(args);

  Config conf = new Config();
  conf.putAll(Utils.readStormConfig());
  conf.putAll(Utils.readCommandLineOpts());
  conf.setDebug(false);
  conf.setNumWorkers(numWorkers);

  String profilerOption =
      props.getProperty(StormTopologySubmitter.YJP_OPTIONS_KEY);
  if (profilerOption != null) {
    String topoWorkerChildOpts = (String) conf.get(Config.TOPOLOGY_WORKER_CHILDOPTS);
    StringBuilder optionBuilder = new StringBuilder();
    if (topoWorkerChildOpts != null) {
      optionBuilder.append(topoWorkerChildOpts);
      optionBuilder.append(' ');
    }
    optionBuilder.append(profilerOption);
    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, optionBuilder.toString());
  }

  Map<String, Object> myConfigMap = new HashMap<String, Object>(conf);
  StringWriter out = new StringWriter();

  try {
    JSONValue.writeJSONString(myConfigMap, out);
  } catch (IOException e) {
    System.out.println("Error in writing JSONString");
    e.printStackTrace();
    return;
  }

  Config config = new Config();
  config.putAll(Utils.readStormConfig());

  NimbusClient nc = NimbusClient.getConfiguredClient(config);
  String topologyName = stormTopo.getTopologyName();
  try {
    System.out.println("Submitting topology with name: "
        + topologyName);
    nc.getClient().submitTopology(topologyName, uploadedJarLocation,
        out.toString(), stormTopo.getStormBuilder().createTopology());
    System.out.println(topologyName + " is successfully submitted");

  } catch (AlreadyAliveException aae) {
    System.out.println("Fail to submit " + topologyName
        + "\nError message: " + aae.get_msg());
  } catch (InvalidTopologyException ite) {
    System.out.println("Invalid topology for " + topologyName);
    ite.printStackTrace();
  } catch (TException te) {
    System.out.println("Texception for " + topologyName);
    te.printStackTrace();
  }
}
 
Example 5
Source File: DRPC.java    From storm-benchmark with Apache License 2.0 4 votes vote down vote up
@Override
  public StormTopology getTopology(Config config) {

    Object sObj = config.get(SERVER);
    if (null == sObj) {
      throw new IllegalArgumentException("must set a drpc server");
    }
    server = (String) sObj;
    config.put(Config.DRPC_SERVERS, Lists.newArrayList(server));

    Object pObj = config.get(PORT);
    if (null == pObj) {
      throw new IllegalArgumentException("must set a drpc port");
    }
    port = Utils.getInt(pObj);
    config.put(Config.DRPC_PORT, port);

    LOG.info("drpc server: " + server + "; drpc port: " + port);

    final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
    final int pageNum = BenchmarkUtils.getInt(config, PAGE_NUM, DEFAULT_PAGE_BOLT_NUM);
    final int viewNum = BenchmarkUtils.getInt(config, VIEW_NUM, DEFAULT_VIEW_BOLT_NUM);
    final int userNum = BenchmarkUtils.getInt(config, USER_NUM, DEFAULT_USER_BOLT_NUM);
    final int followerNum = BenchmarkUtils.getInt(config, FOLLOWER_NUM, DEFAULT_FOLLOWER_BOLT_NUM);

    spout = new TransactionalTridentKafkaSpout(
            KafkaUtils.getTridentKafkaConfig(config, new SchemeAsMultiScheme(new StringScheme())));

    TridentTopology trident = new TridentTopology();
    TridentState urlToUsers =
            trident.newStream("drpc", spout).parallelismHint(spoutNum).shuffle()
            .each(new Fields(StringScheme.STRING_SCHEME_KEY), new Extract(Arrays.asList(Item.URL, Item.USER)),
                    new Fields("url", "user")).parallelismHint(pageNum)
            .groupBy(new Fields("url"))
            .persistentAggregate(new MemoryMapState.Factory(), new Fields("url", "user"), new Distinct(), new Fields("user_set"))
            .parallelismHint(viewNum);
/** debug
 *  1. this proves that the aggregated result has successfully persisted
    urlToUsers.newValuesStream()
            .each(new Fields("url", "user_set"), new Print("(url, user_set)"), new Fields("url2", "user_set2"));
 */
    PageViewGenerator generator = new PageViewGenerator();
    TridentState userToFollowers = trident.newStaticState(new StaticSingleKeyMapState.Factory(generator.genFollowersDB()));
/** debug
  * 2. this proves that MemoryMapState could be read correctly
   trident.newStream("urlToUsers", new PageViewSpout(false))
            .each(new Fields("page_view"), new Extract(Arrays.asList(Item.URL)), new Fields("url"))
            .each(new Fields("url"), new Print("url"), new Fields("url2"))
            .groupBy(new Fields("url2"))
            .stateQuery(urlToUsers, new Fields("url2"),  new MapGet(), new Fields("users"))
            .each(new Fields("users"), new Print("users"), new Fields("users2"));
*/
/** debug
 *  3. this proves that StaticSingleKeyMapState could be read correctly
    trident.newStream("userToFollowers", new PageViewSpout(false))
            .each(new Fields("page_view"), new Extract(Arrays.asList(Item.USER)), new Fields("user"))
            .each(new Fields("user"), new Print("user"), new Fields("user2"))
            .stateQuery(userToFollowers, new Fields("user2"), new MapGet(), new Fields("followers"))
            .each(new Fields("followers"), new Print("followers"), new Fields("followers2"));
 */
    trident.newDRPCStream(FUNCTION, null)
            .each(new Fields("args"), new Print("args"), new Fields("url"))
            .groupBy(new Fields("url"))
            .stateQuery(urlToUsers, new Fields("url"), new MapGet(), new Fields("users"))
            .each(new Fields("users"), new Expand(), new Fields("user")).parallelismHint(userNum)
            .groupBy(new Fields("user"))
            .stateQuery(userToFollowers, new Fields("user"), new MapGet(), new Fields("followers"))
            .each(new Fields("followers"), new Expand(), new Fields("follower")).parallelismHint(followerNum)
            .groupBy(new Fields("follower"))
            .aggregate(new One(), new Fields("one"))
            .aggregate(new Fields("one"), new Sum(), new Fields("reach"));
    return trident.build();
  }
 
Example 6
Source File: StormTopologySubmitter.java    From samoa with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws IOException{
	Properties props = StormSamoaUtils.getProperties();
	
	String uploadedJarLocation = props.getProperty(StormJarSubmitter.UPLOADED_JAR_LOCATION_KEY);
	if(uploadedJarLocation == null){
		logger.error("Invalid properties file. It must have key {}", 
				StormJarSubmitter.UPLOADED_JAR_LOCATION_KEY);
		return;
	}
	
	List<String> tmpArgs = new ArrayList<String>(Arrays.asList(args));
	int numWorkers = StormSamoaUtils.numWorkers(tmpArgs);
	
	args = tmpArgs.toArray(new String[0]);
	StormTopology stormTopo = StormSamoaUtils.argsToTopology(args);

	Config conf = new Config();
	conf.putAll(Utils.readStormConfig());
	conf.putAll(Utils.readCommandLineOpts());
	conf.setDebug(false);
	conf.setNumWorkers(numWorkers);
	
	String profilerOption = 
			props.getProperty(StormTopologySubmitter.YJP_OPTIONS_KEY);
	if(profilerOption != null){
		String topoWorkerChildOpts =  (String) conf.get(Config.TOPOLOGY_WORKER_CHILDOPTS);
		StringBuilder optionBuilder = new StringBuilder();
		if(topoWorkerChildOpts != null){
			optionBuilder.append(topoWorkerChildOpts);	
			optionBuilder.append(' ');
		}
		optionBuilder.append(profilerOption);
		conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, optionBuilder.toString());
	}

	Map<String, Object> myConfigMap = new HashMap<String, Object>(conf);
	StringWriter out = new StringWriter();

	try {
		JSONValue.writeJSONString(myConfigMap, out);
	} catch (IOException e) {
		System.out.println("Error in writing JSONString");
		e.printStackTrace();
		return;
	}
	
	Config config = new Config();
	config.putAll(Utils.readStormConfig());
	
	String nimbusHost = (String) config.get(Config.NIMBUS_HOST);
			
	NimbusClient nc = new NimbusClient(nimbusHost);
	String topologyName = stormTopo.getTopologyName();
	try {
		System.out.println("Submitting topology with name: " 
				+ topologyName);
		nc.getClient().submitTopology(topologyName, uploadedJarLocation,
				out.toString(), stormTopo.getStormBuilder().createTopology());
		System.out.println(topologyName + " is successfully submitted");

	} catch (AlreadyAliveException aae) {
		System.out.println("Fail to submit " + topologyName
				+ "\nError message: " + aae.get_msg());
	} catch (InvalidTopologyException ite) {
		System.out.println("Invalid topology for " + topologyName);
		ite.printStackTrace();
	} catch (TException te) {
		System.out.println("Texception for " + topologyName);
		te.printStackTrace();
	} 		
}