Java Code Examples for backtype.storm.generated.StormTopology

The following are top voted examples for showing how to use backtype.storm.generated.StormTopology. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: storm-trident-example   File: ExampleTopology.java   Source Code and License 7 votes vote down vote up
public static StormTopology buildTopology()

{
	TridentTopology topology = new TridentTopology();
	RandomWordSpout spout1 = new RandomWordSpout();
	
	Stream inputStream = topology.newStream("faltu", spout1);//faltu isnt used anywhere.
	
	/**
	 * partitionPersist : The partitionPersist operation updates a source of state.
	 * It returns a TridentState object. You could then use this state in stateQuery operations elsewhere in the topology.
	 * Args:
	 * StateFactory instance - This factory implement the makeState API, that should return a instance of State.
	 * Fields list, that needs to be persisted. These field list should be present in the input stream.
	 * StateUpdater instance - The StateUpdater instance will update the underlying State.
	 */
	 inputStream
	    .partitionPersist(new RedisStoreStateFactory(), new Fields("randomWord"), new RedisStoreStateUpdater());
	 
	 return topology.build();
}
 
Example 2
Project: storm-trident-example   File: ExampleTopology.java   Source Code and License 7 votes vote down vote up
public static StormTopology buildTopology()

{
	TridentTopology topology = new TridentTopology();
	RandomPhraseSpout spout1 = new RandomPhraseSpout();
	
	Stream inputStream = topology.newStream("dumbo", spout1).parallelismHint(20);//where is dumbo used ? No where as per as I see.
	
	/**
	 * persistentAggregate : The persistentAggregate operation updates a source of state.
	 * persistentAggregate is an additional abstraction built on top of partitionPersist that knows how to take a 
	 * Trident aggregator and use it to apply updates to the source of state.
	 * Args:
	 * StateFactory instance - This factory implement the makeState API, that should return a instance of State.
	 * Fields list, that needs to be persisted. These field list should be present in the input stream.
	 * StateUpdater instance - The StateUpdater instance will update the underlying State.
	 */
	 inputStream
	    //input stream generated by spout1 has a field called randomPhrase.
	    //RandomPhraseSplitter takes a randomPhrase and additionally emits a field called randomWord into the stream.
	    .each(new Fields("randomPhrase"), new RandomPhraseSplitter(), new Fields("randomWord")).parallelismHint(6);
	    
	 return topology.build();
}
 
Example 3
Project: hadooparchitecturebook   File: MovingAvgLocalTopologyRunner.java   Source Code and License 6 votes vote down vote up
public static void main(String[] args) {

    Config config = new Config();
    config.setDebug(true);

    StormTopology topology = buildTopology();
    // Un-comment to run locally:
    LocalCluster localCluster = new LocalCluster();
    localCluster.submitTopology("local-moving-avg", config, topology);
    // Un-comment to run as part of a Storm cluster:
    // try {
    //   StormSubmitter.submitTopology("cluster-moving-average",
    //  				    config,
    // 				    topology);
    // } catch(AlreadyAliveException e) {
    //   e.printStackTrace();
    // } catch(InvalidTopologyException e) {
    //   e.printStackTrace();
    //}
  }
 
Example 4
Project: es-hadoop-v2.2.0   File: AbstractStormSuite.java   Source Code and License 6 votes vote down vote up
public static void run(final String name, final StormTopology topo, final Counter hasCompleted) throws Exception {
    Thread th = new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                start(name, topo);
                hasCompleted.waitForZero(TimeValue.timeValueSeconds(15));
            } finally {
                stop(name);
            }

        }
    }, "test-storm-runner");
    th.setDaemon(true);

    copyPropertiesIntoCfg(cfg);
    th.start();
}
 
Example 5
Project: Tstream   File: TopologyContext.java   Source Code and License 6 votes vote down vote up
public TopologyContext(StormTopology topology, Map stormConf,
		Map<Integer, String> taskToComponent,
		Map<String, List<Integer>> componentToSortedTasks,
		Map<String, Map<String, Fields>> componentToStreamToFields,
		String topologyId, String codeDir, String pidDir, Integer taskId,
		Integer workerPort, List<Integer> workerTasks,
		Map<String, Object> defaultResources,
		Map<String, Object> userResources,
		Map<String, Object> executorData, Map registeredMetrics,
		clojure.lang.Atom openOrPrepareWasCalled) {
	super(topology, stormConf, taskToComponent, componentToSortedTasks,
			componentToStreamToFields, topologyId, codeDir, pidDir,
			workerPort, workerTasks, defaultResources, userResources);
	_taskId = taskId;
	_executorData = executorData;
	_registeredMetrics = registeredMetrics;
	_openOrPrepareWasCalled = openOrPrepareWasCalled;
}
 
Example 6
Project: jstorm-0.9.6.3-   File: Common.java   Source Code and License 6 votes vote down vote up
public static StormTopology add_metrics_component(StormTopology topology) {

		/**
		 */
		//		(defn metrics-consumer-bolt-specs [storm-conf topology]
		//				  (let [component-ids-that-emit-metrics (cons SYSTEM-COMPONENT-ID (keys (all-components topology)))
		//				        inputs (->> (for [comp-id component-ids-that-emit-metrics]
		//				                      {[comp-id METRICS-STREAM-ID] :shuffle})
		//				                    (into {}))
		//				        
		//				        mk-bolt-spec (fn [class arg p]
		//				                       (thrift/mk-bolt-spec*
		//				                        inputs
		//				                        (backtype.storm.metric.MetricsConsumerBolt. class arg)
		//				                        {} :p p :conf {TOPOLOGY-TASKS p}))]
		//				    
		//				    (map
		//				     (fn [component-id register]           
		//				       [component-id (mk-bolt-spec (get register "class")
		//				                                   (get register "argument")
		//				                                   (or (get register "parallelism.hint") 1))])
		//				     
		//				     (metrics-consumer-register-ids storm-conf)
		//				     (get storm-conf TOPOLOGY-METRICS-CONSUMER-REGISTER))))
		return topology;
	}
 
Example 7
Project: jstorm-0.9.6.3-   File: LocalCluster.java   Source Code and License 6 votes vote down vote up
@Override
public void submitTopologyWithOpts(String topologyName, Map conf,
		StormTopology topology, SubmitOptions submitOpts){
	
	if (!Utils.isValidConf(conf))
		throw new RuntimeException("Topology conf is not json-serializable");
	JStormUtils.setLocalMode(true);
	
	try {
		if (submitOpts == null) {
			state.getNimbus().submitTopology(topologyName, null,
					Utils.to_json(conf), topology);
		}else {
			state.getNimbus().submitTopologyWithOpts(topologyName, null,
					Utils.to_json(conf), topology, submitOpts);
		}
		
	} catch (Exception e) {
		
		LOG.error("Failed to submit topology " + topologyName, e);
		throw new RuntimeException(e);
	} 
}
 
Example 8
Project: learn_jstorm   File: WorkerTopologyContext.java   Source Code and License 6 votes vote down vote up
public WorkerTopologyContext(StormTopology topology, Map stormConf,
		Map<Integer, String> taskToComponent,
		Map<String, List<Integer>> componentToSortedTasks,
		Map<String, Map<String, Fields>> componentToStreamToFields,
		String topologyId, String codeDir, String pidDir, Integer workerPort,
		List<Integer> workerTasks, Map<String, Object> defaultResources,
		Map<String, Object> userResources) {
	super(topology, stormConf, taskToComponent, componentToSortedTasks,
			componentToStreamToFields, topologyId);
	_codeDir = codeDir;
	_defaultResources = defaultResources;
	_userResources = userResources;
	try {
		if (pidDir != null) {
			_pidDir = new File(pidDir).getCanonicalPath();
		} else {
			_pidDir = null;
		}
	} catch (IOException e) {
		throw new RuntimeException("Could not get canonical path for "
				+ _pidDir, e);
	}
	_workerPort = workerPort;
	_workerTasks = workerTasks;
}
 
Example 9
Project: spiderz   File: WikiCrawlerTopology.java   Source Code and License 6 votes vote down vote up
public static StormTopology buildTopology(String redisIp, String redisPort) {
	// topology to build
	TopologyBuilder topology = new TopologyBuilder();

	// create a spout
	WikiCrawlerSpout wikiSpout = new WikiCrawlerSpout(redisIp, redisPort);

	// create a bolt
	WikiCrawlerExplorerBolt wikiBolt = new WikiCrawlerExplorerBolt(redisIp, redisPort);

	// set up the DAG
	// this spout always takes 1 task, it is light
	topology.setSpout("wikiSpout", wikiSpout, 1)
			.setNumTasks(2)
			.setMaxSpoutPending(5);
	// this bolt uses as many executors(threads) as the cores available
	topology.setBolt("wikiBolt", wikiBolt, numCores)
			.setNumTasks(numCores * 4) // 4 task per thread
			.shuffleGrouping("wikiSpout");

	return topology.createTopology();
}
 
Example 10
Project: Tstream   File: ThriftTopologyUtils.java   Source Code and License 6 votes vote down vote up
public static ComponentCommon getComponentCommon(StormTopology topology,
		String componentId) {
	for (StormTopology._Fields f : StormTopology.metaDataMap.keySet()) {
		Map<String, Object> componentMap = (Map<String, Object>) topology
				.getFieldValue(f);
		if (componentMap.containsKey(componentId)) {
			Object component = componentMap.get(componentId);
			if (component instanceof Bolt) {
				return ((Bolt) component).get_common();
			}
			if (component instanceof SpoutSpec) {
				return ((SpoutSpec) component).get_common();
			}
			if (component instanceof StateSpoutSpec) {
				return ((StateSpoutSpec) component).get_common();
			}
			throw new RuntimeException(
					"Unreachable code! No get_common conversion for component "
							+ component);
		}
	}
	throw new IllegalArgumentException(
			"Could not find component common for " + componentId);
}
 
Example 11
Project: big-data-system   File: TridentWordCount.java   Source Code and License 6 votes vote down vote up
public static StormTopology buildTopology(LocalDRPC drpc) {
  FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3, new Values("the cow jumped over the moon"),
      new Values("the man went to the store and bought some candy"), new Values("four score and seven years ago"),
      new Values("how many apples can you eat"), new Values("to be or not to be the person"));
  spout.setCycle(true);

  TridentTopology topology = new TridentTopology();
  TridentState wordCounts = topology.newStream("spout1", spout).parallelismHint(16).each(new Fields("sentence"),
      new Split(), new Fields("word")).groupBy(new Fields("word")).persistentAggregate(new MemoryMapState.Factory(),
      new Count(), new Fields("count")).parallelismHint(16);

  topology.newDRPCStream("words", drpc).each(new Fields("args"), new Split(), new Fields("word")).groupBy(new Fields(
      "word")).stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count")).each(new Fields("count"),
      new FilterNull()).aggregate(new Fields("count"), new Sum(), new Fields("sum"));
  return topology.build();
}
 
Example 12
Project: learn_jstorm   File: StormSubmitter.java   Source Code and License 6 votes vote down vote up
public static void submitTopology(String name, Map stormConf,
		StormTopology topology, SubmitOptions opts, List<File> jarFiles)
		throws AlreadyAliveException, InvalidTopologyException {
	if (jarFiles == null) {
		jarFiles = new ArrayList<File>();
	}
	Map<String, String> jars = new HashMap<String, String>(jarFiles.size());
	List<String> names = new ArrayList<String>(jarFiles.size());
	
	for (File f : jarFiles) {
		if (!f.exists()) {
			LOG.info(f.getName() + " is not existed: "
					+ f.getAbsolutePath());
			continue;
		}
		jars.put(f.getName(), f.getAbsolutePath());
		names.add(f.getName());
	}
	LOG.info("Files: " + names + " will be loaded");
	stormConf.put(GenericOptionsParser.TOPOLOGY_LIB_PATH, jars);
	stormConf.put(GenericOptionsParser.TOPOLOGY_LIB_NAME, names);
	submitTopology(name, stormConf, topology, opts);
}
 
Example 13
Project: LearnStorm   File: TridentWordCount.java   Source Code and License 6 votes vote down vote up
public static StormTopology buildTopology(LocalDRPC drpc) {
	FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3,
			new Values("the$$cow$$jumped$$over$$the$$moon"),
			new Values("the$$man$$went$$to$$the$$store$$and$$bought$$some$$candy"),
			new Values("four$$score$$and$$seven$$years$$ago"),
			new Values("how$$many$$apples$$can$$you$$eat"),
			new Values("to$$be$$or$$not$$to$$be$$the$$person"));
	spout.setCycle(true);

	TridentTopology topology = new TridentTopology();

	TridentState wordCounts = topology.newStream("spout1", spout)
			.each(new Fields("sentence"), new Split(), new Fields("word"))
			.groupBy(new Fields("word"))
			.persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
			.parallelismHint(6);

	topology.newDRPCStream("words", drpc).each(new Fields("args"), new Split(), new Fields("word"))
			.groupBy(new Fields("word"))
			.stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count"))
			.each(new Fields("count"), new FilterNull())
			.aggregate(new Fields("count"), new Sum(), new Fields("sum"));

	return topology.build();
}
 
Example 14
Project: jstrom   File: WorkerTopologyContext.java   Source Code and License 6 votes vote down vote up
public WorkerTopologyContext(StormTopology topology, Map stormConf, Map<Integer, String> taskToComponent,
        Map<String, List<Integer>> componentToSortedTasks, Map<String, Map<String, Fields>> componentToStreamToFields, String stormId, String codeDir,
        String pidDir, Integer workerPort, List<Integer> workerTasks, Map<String, Object> defaultResources, Map<String, Object> userResources) {
    super(topology, stormConf, taskToComponent, componentToSortedTasks, componentToStreamToFields, stormId);
    _codeDir = codeDir;
    _defaultResources = defaultResources;
    _userResources = userResources;
    try {
        if (pidDir != null) {
            _pidDir = new File(pidDir).getCanonicalPath();
        } else {
            _pidDir = null;
        }
    } catch (IOException e) {
        throw new RuntimeException("Could not get canonical path for " + _pidDir, e);
    }
    _workerPort = workerPort;
    _workerTasks = workerTasks;
}
 
Example 15
Project: Tstream   File: StormSubmitter.java   Source Code and License 6 votes vote down vote up
public static void submitTopology(String name, Map stormConf,
		StormTopology topology, SubmitOptions opts, List<File> jarFiles)
		throws AlreadyAliveException, InvalidTopologyException {
	Map<String, String> jars = new HashMap<String, String>(jarFiles.size());
	List<String> names = new ArrayList<String>(jarFiles.size());
	if (jarFiles == null)
		jarFiles = new ArrayList<File>();
	for (File f : jarFiles) {
		if (!f.exists()) {
			LOG.info(f.getName() + " is not existed: "
					+ f.getAbsolutePath());
			continue;
		}
		jars.put(f.getName(), f.getAbsolutePath());
		names.add(f.getName());
	}
	LOG.info("Files: " + names + " will be loaded");
	stormConf.put(GenericOptionsParser.TOPOLOGY_LIB_PATH, jars);
	stormConf.put(GenericOptionsParser.TOPOLOGY_LIB_NAME, names);
	submitTopology(name, stormConf, topology, opts);
}
 
Example 16
Project: Tstream   File: WorkerTopologyContext.java   Source Code and License 6 votes vote down vote up
public WorkerTopologyContext(StormTopology topology, Map stormConf,
		Map<Integer, String> taskToComponent,
		Map<String, List<Integer>> componentToSortedTasks,
		Map<String, Map<String, Fields>> componentToStreamToFields,
		String topologyId, String codeDir, String pidDir, Integer workerPort,
		List<Integer> workerTasks, Map<String, Object> defaultResources,
		Map<String, Object> userResources) {
	super(topology, stormConf, taskToComponent, componentToSortedTasks,
			componentToStreamToFields, topologyId);
	_codeDir = codeDir;
	_defaultResources = defaultResources;
	_userResources = userResources;
	try {
		if (pidDir != null) {
			_pidDir = new File(pidDir).getCanonicalPath();
		} else {
			_pidDir = null;
		}
	} catch (IOException e) {
		throw new RuntimeException("Could not get canonical path for "
				+ _pidDir, e);
	}
	_workerPort = workerPort;
	_workerTasks = workerTasks;
}
 
Example 17
Project: learn_jstorm   File: ServiceHandler.java   Source Code and License 6 votes vote down vote up
/**
 * get StormTopology throw deserialize local files
 * 
 * @param id
 *            String: topology id
 * @return StormTopology
 */
@Override
public StormTopology getTopology(String id) throws NotAliveException,
		TException {
	StormTopology topology = null;
	try {
		StormTopology stormtopology = StormConfig
				.read_nimbus_topology_code(conf, id);
		if (stormtopology == null) {
			throw new TException("topology:" + id + "is null");
		}

		Map<Object, Object> topologyConf = (Map<Object, Object>) StormConfig
				.read_nimbus_topology_conf(conf, id);

		topology = Common.system_topology(topologyConf, stormtopology);
	} catch (Exception e) {
		LOG.error("Failed to get topology " + id + ",", e);
		throw new TException("Failed to get system_topology");
	}
	return topology;
}
 
Example 18
Project: learn_jstorm   File: ServiceHandler.java   Source Code and License 6 votes vote down vote up
/**
 * generate a taskid(Integer) for every task
 * 
 * @param conf
 * @param topologyid
 * @return Map<Integer, String>: from taskid to componentid
 * @throws IOException
 * @throws InvalidTopologyException
 */
public Map<Integer, TaskInfo> mkTaskComponentAssignments(
		Map<Object, Object> conf, String topologyid) throws IOException,
		InvalidTopologyException {

	// @@@ here exist a little problem,
	// we can directly pass stormConf from Submit method
	Map<Object, Object> stormConf = StormConfig.read_nimbus_topology_conf(
			conf, topologyid);

	StormTopology stopology = StormConfig.read_nimbus_topology_code(conf,
			topologyid);

	// use TreeMap to make task as sequence
	Map<Integer, TaskInfo> rtn = new TreeMap<Integer, TaskInfo>();

	StormTopology topology = Common.system_topology(stormConf, stopology);

	Integer count = 0;
	count = mkTaskMaker(stormConf, topology.get_bolts(), rtn, count);
	count = mkTaskMaker(stormConf, topology.get_spouts(), rtn, count);
	count = mkTaskMaker(stormConf, topology.get_state_spouts(), rtn, count);

	return rtn;
}
 
Example 19
Project: cognition   File: ConfigurableIngestTopologyTest.java   Source Code and License 6 votes vote down vote up
@Test
public void testSubmitLocal(
    @Injectable String topologyName,
    @Injectable Config stormConfig,
    @Injectable TopologyBuilder builder,
    @Injectable ILocalCluster cluster,
    @Injectable StormTopology stormTopology
) throws Exception {
  topology.topologyName = topologyName;
  topology.stormConfig = stormConfig;
  topology.builder = builder;

  new Expectations() {{
    builder.createTopology();
    result = stormTopology;
    cluster.submitTopology(topologyName, stormConfig, stormTopology);
  }};

  topology.submitLocal(cluster);
}
 
Example 20
Project: heron   File: WorkerTopologyContext.java   Source Code and License 6 votes vote down vote up
@SuppressWarnings("rawtypes")
public WorkerTopologyContext(
    StormTopology topology,
    Map stormConf,
    Map<Integer, String> taskToComponent,
    Map<String, List<Integer>> componentToSortedTasks,
    Map<String, Map<String, Fields>> componentToStreamToFields,
    String stormId,
    String codeDir,
    String pidDir,
    Integer workerPort,
    List<Integer> workerTasks,
    Map<String, Object> defaultResources,
    Map<String, Object> userResources
) {
  super(topology, stormConf, taskToComponent,
      componentToSortedTasks, componentToStreamToFields, stormId);
  throw new RuntimeException("WorkerTopologyContext should never be init this way");
}
 
Example 21
Project: learn_jstorm   File: TopologyContext.java   Source Code and License 6 votes vote down vote up
public TopologyContext(StormTopology topology, Map stormConf,
		Map<Integer, String> taskToComponent,
		Map<String, List<Integer>> componentToSortedTasks,
		Map<String, Map<String, Fields>> componentToStreamToFields,
		String topologyId, String codeDir, String pidDir, Integer taskId,
		Integer workerPort, List<Integer> workerTasks,
		Map<String, Object> defaultResources,
		Map<String, Object> userResources,
		Map<String, Object> executorData, Map registeredMetrics,
		clojure.lang.Atom openOrPrepareWasCalled) {
	super(topology, stormConf, taskToComponent, componentToSortedTasks,
			componentToStreamToFields, topologyId, codeDir, pidDir,
			workerPort, workerTasks, defaultResources, userResources);
	_taskId = taskId;
	_executorData = executorData;
	_registeredMetrics = registeredMetrics;
	_openOrPrepareWasCalled = openOrPrepareWasCalled;
}
 
Example 22
Project: Mastering-Mesos   File: DefaultSchedulerTest.java   Source Code and License 5 votes vote down vote up
private TopologyDetails constructTopologyDetails(String topologyName, int numWorkers) {
  Map<String, TopologyDetails> topologyConf1 = new HashMap<>();

  StormTopology stormTopology = new StormTopology();
  TopologyDetails topologyDetails= new TopologyDetails(topologyName, topologyConf1, stormTopology, numWorkers);

  return topologyDetails;
}
 
Example 23
Project: Mastering-Mesos   File: TestUtils.java   Source Code and License 5 votes vote down vote up
public static TopologyDetails constructTopologyDetails(String topologyName, int numWorkers, double numCpus, double memSize) {
  Map<String, TopologyDetails> topologyConf = new HashMap<>();

  StormTopology stormTopology = new StormTopology();
  TopologyDetails topologyDetails= new TopologyDetails(topologyName, topologyConf, stormTopology, numWorkers);
  topologyDetails.getConf().put(MesosCommon.WORKER_CPU_CONF, Double.valueOf(numCpus));
  topologyDetails.getConf().put(MesosCommon.WORKER_MEM_CONF, Double.valueOf(memSize));

  return topologyDetails;
}
 
Example 24
Project: es-hadoop-v2.2.0   File: AbstractStormSuite.java   Source Code and License 5 votes vote down vote up
public static void start(String name, StormTopology topo) {
    try {
        topologies.add(name);
        stormCluster.submitTopology(name, cfg, topo);
    } catch (Exception ex) {
        throw new RuntimeException("Cannot submit topology " + name, ex);
    }
}
 
Example 25
Project: streaming_outliers   File: StormTopologyComponent.java   Source Code and License 5 votes vote down vote up
public void submitFluxTopology(String topologyName, File topologyLoc, Properties properties) throws IOException, ClassNotFoundException, NoSuchMethodException, InvocationTargetException, InstantiationException, IllegalAccessException, TException {
    TopologyDef topologyDef = loadYaml(topologyName, topologyLoc, properties);
    Config conf = FluxBuilder.buildConfig(topologyDef);
    ExecutionContext context = new ExecutionContext(topologyDef, conf);
    StormTopology topology = FluxBuilder.buildTopology(context);
    Assert.assertNotNull(topology);
    topology.validate();
    stormCluster.submitTopology(topologyName, conf, topology);
}
 
Example 26
Project: storm-trident-example   File: ExampleTopology.java   Source Code and License 5 votes vote down vote up
public static StormTopology buildTopology()

{
	TridentTopology topology = new TridentTopology();
	RandomPhraseSpout spout1 = new RandomPhraseSpout();
	
	Stream inputStream = topology.newStream("dumbo", spout1);//where is dump used ? No where as per as I see.
	
	/**
	 * persistentAggregate : The persistentAggregate operation updates a source of state.
	 * persistentAggregate is an additional abstraction built on top of partitionPersist that knows how to take a 
	 * Trident aggregator and use it to apply updates to the source of state.
	 * Args:
	 * StateFactory instance - This factory implement the makeState API, that should return a instance of State.
	 * Fields list, that needs to be persisted. These field list should be present in the input stream.
	 * StateUpdater instance - The StateUpdater instance will update the underlying State.
	 */
	 inputStream
	    //input stream generated by spout1 has a field called randomPhrase.
	    //RandomPhraseSplitter takes a randomPhrase and additionally emits a field called randomWord into the stream.
	    .each(new Fields("randomPhrase"), new RandomPhraseSplitter(), new Fields("randomWord"))
	    //the input stream is grouped by randomWord - Isn't this same as storm field grouping ? yes , similar.
	    .groupBy(new Fields("randomWord"))
	    //count the occurence of randomWord using Count aggregrator, that will add a field called count to the stream.
	    //persist the count in Redis.
	    .persistentAggregate(new RedisStoreStateFactory(), new Count(), new Fields("count"));
	 
	 return topology.build();
}
 
Example 27
Project: Tstream   File: StormConfig.java   Source Code and License 5 votes vote down vote up
public static StormTopology read_supervisor_topology_code(Map conf,
		String topologyId) throws IOException {
	String topologyRoot = StormConfig.supervisor_stormdist_root(conf,
			topologyId);
	String codePath = StormConfig.stormcode_path(topologyRoot);
	return (StormTopology) readLocalObject(topologyId, codePath);
}
 
Example 28
Project: storm-trident-example   File: ExampleTopology.java   Source Code and License 5 votes vote down vote up
public static StormTopology buildTopology()

{
	TridentTopology topology = new TridentTopology();
	RandomPhraseSpout spout1 = new RandomPhraseSpout();
	
	Stream inputStream = topology.newStream("dumbo", spout1);//where is dump used ? No where as per as I see.
	
	/**
	 * persistentAggregate : The persistentAggregate operation updates a source of state.
	 * persistentAggregate is an additional abstraction built on top of partitionPersist that knows how to take a 
	 * Trident aggregator and use it to apply updates to the source of state.
	 * Args:
	 * StateFactory instance - This factory implement the makeState API, that should return a instance of State.
	 * Fields list, that needs to be persisted. These field list should be present in the input stream.
	 * StateUpdater instance - The StateUpdater instance will update the underlying State.
	 */
	 inputStream
	    //input stream generated by spout1 has a field called randomPhrase.
	    //RandomPhraseSplitter takes a randomPhrase and additionally emits a field called randomWord into the stream.
	    .each(new Fields("randomPhrase"), new RandomPhraseSplitter(), new Fields("randomWord"))
	    //the input stream is grouped by randomWord - Isn't this same as storm field grouping ? yes , similar.
	    .groupBy(new Fields("randomWord"))
	    //count the occurence of randomWord using Count aggregrator, that will add a field called count to the stream.
	    //persist the count in Redis.
	    .persistentAggregate(new RedisStoreStateFactory(), new Count(), new Fields("count"));
	 
	 return topology.build();
}
 
Example 29
Project: storm-cassandra-cql   File: WordCountTopology.java   Source Code and License 5 votes vote down vote up
@SuppressWarnings("unchecked")
public static StormTopology buildWordCountAndSourceTopology(LocalDRPC drpc) {
    LOG.info("Building topology.");
    TridentTopology topology = new TridentTopology();

    String source1 = "spout1";
    String source2 = "spout2";
    FixedBatchSpout spout1 = new FixedBatchSpout(new Fields("sentence", "source"), 3,
            new Values("the cow jumped over the moon", source1),
            new Values("the man went to the store and bought some candy", source1),
            new Values("four score and four years ago", source2),
            new Values("how much wood can a wood chuck chuck", source2));
    spout1.setCycle(true);

    TridentState wordCounts =
            topology.newStream("spout1", spout1)
                    .each(new Fields("sentence"), new Split(), new Fields("word"))
                    .groupBy(new Fields("word", "source"))
                    .persistentAggregate(CassandraCqlMapState.nonTransactional(new WordCountAndSourceMapper()),
                            new IntegerCount(), new Fields("count"))
                    .parallelismHint(6);

    topology.newDRPCStream("words", drpc)
            .each(new Fields("args"), new Split(), new Fields("word"))
            .groupBy(new Fields("word"))
            .stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count"))
            .each(new Fields("count"), new FilterNull())
            .aggregate(new Fields("count"), new Sum(), new Fields("sum"));

    return topology.build();
}
 
Example 30
Project: yuzhouwan   File: LocalTopologyRunner.java   Source Code and License 5 votes vote down vote up
public static void main(String[] args) {

        StormTopology topology = CreditCardTopologyBuilder.build();
        Config config = new Config();
        config.setDebug(true);

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("local-topology", config, topology);
        Utils.sleep(30000);

        cluster.killTopology("local-topology");
        cluster.shutdown();
    }
 
Example 31
Project: jstorm-0.9.6.3-   File: Common.java   Source Code and License 5 votes vote down vote up
public static List<Object> all_components(StormTopology topology) {
	List<Object> rtn = new ArrayList<Object>();
	for (StormTopology._Fields field : Thrift.STORM_TOPOLOGY_FIELDS) {
		Object fields = topology.getFieldValue(field);
		if (fields != null) {
			rtn.addAll(((Map) fields).values());
		}
	}
	return rtn;
}
 
Example 32
Project: Tstream   File: Common.java   Source Code and License 5 votes vote down vote up
public static List<Object> all_components(StormTopology topology) {
	List<Object> rtn = new ArrayList<Object>();
	for (StormTopology._Fields field : Thrift.STORM_TOPOLOGY_FIELDS) {
		Object fields = topology.getFieldValue(field);
		if (fields != null) {
			rtn.addAll(((Map) fields).values());
		}
	}
	return rtn;
}
 
Example 33
Project: openimaj   File: LocalTopologyMode.java   Source Code and License 5 votes vote down vote up
@Override
public void submitTopology(StormToolOptions options) throws Exception {
	logger.debug("Configuring topology");
	Config conf = options.prepareConfig();
	logger.debug("Instantiating cluster");
	this.cluster = new LocalCluster();
	logger.debug("Constructing topology");
	StormTopology topology = options.constructTopology();
	logger.debug("Submitting topology");
	cluster.submitTopology(options.topologyName(), conf, topology);
}
 
Example 34
Project: jstrom   File: DoRebalanceTransitionCallback.java   Source Code and License 5 votes vote down vote up
private void setTaskInfo(StormTopology oldTopology, StormTopology newTopology) throws Exception {
    StormClusterState clusterState = data.getStormClusterState();
    // Retrieve the max task ID
    TreeSet<Integer> taskIds = new TreeSet<Integer>(clusterState.task_ids(topologyid));
    int cnt = taskIds.descendingIterator().next();

    cnt = setBoltInfo(oldTopology, newTopology, cnt, clusterState);
    cnt = setSpoutInfo(oldTopology, newTopology, cnt, clusterState);
}
 
Example 35
Project: Tstream   File: GeneralTopologyContext.java   Source Code and License 5 votes vote down vote up
public GeneralTopologyContext(StormTopology topology, Map stormConf,
		Map<Integer, String> taskToComponent,
		Map<String, List<Integer>> componentToSortedTasks,
		Map<String, Map<String, Fields>> componentToStreamToFields,
		String topologyId) {
	_topology = topology;
	_stormConf = stormConf;
	_taskToComponent = taskToComponent;
	_topologyId = topologyId;
	_componentToTasks = componentToSortedTasks;
	_componentToStreamToFields = componentToStreamToFields;
}
 
Example 36
Project: jstorm-0.9.6.3-   File: TopologyPage.java   Source Code and License 5 votes vote down vote up
/**
 * get spout or bolt's List<Components>
 * 
 * 
 * @param ts
 * @param topology
 * @throws NotAliveException
 */
private void getComponents(List<TaskSummary> ts, StormTopology topology)
		throws NotAliveException {
	if (ts == null) {
		LOG.error("Task list is empty");
		throw new NotAliveException("Task list is empty");
	}

	Map<String, List<TaskSummary>> spoutTasks = new HashMap<String, List<TaskSummary>>();
	Map<String, List<TaskSummary>> boltTasks = new HashMap<String, List<TaskSummary>>();

	for (TaskSummary t : ts) {
		if (t == null) {
			continue;
		}

		String componentid = t.get_component_id();
		String componentType = UIUtils.componentType(topology, componentid);
		if (componentType.equals(UIUtils.SPOUT_STR)) {

			UIUtils.addTask(spoutTasks, t, componentid);
		} else if (componentType.equals(UIUtils.BOLT_STR)) {
			UIUtils.addTask(boltTasks, t, componentid);
		}

	}

	scom = getComponents(spoutTasks, UIUtils.SPOUT_STR);
	bcom = getComponents(boltTasks, UIUtils.BOLT_STR);
}
 
Example 37
Project: Infrastructure   File: StormTests.java   Source Code and License 5 votes vote down vote up
/**
 * Performs a test-run of the profiling functionality.
 */
@Test
public void testProfilingPipeline() {
    EndOfDataEventHandler handler = new EndOfDataEventHandler(TestTopology.PIP_NAME);
    EventManager.register(handler);
    boolean localSer = IntSerializer.registerIfNeeded();
    String pathDFS = configureDfsPathTemp();
    LocalStormEnvironment env = new LocalStormEnvironment();
    // build the test topology
    RecordingTopologyBuilder builder = new RecordingTopologyBuilder();
    TestTopology.createTopology(builder);
    StormTopology topology = builder.createTopology();
    Map<String, TopologyTestInfo> topologies = new HashMap<String, TopologyTestInfo>();
    @SuppressWarnings("rawtypes")
    Map topoCfg = createTopologyConfiguration();
    topologies.put(TestTopology.PIP_NAME, new TopologyTestInfo(topology, 
        new File(Utils.getTestdataDir(), "pipeline.xml"), topoCfg));
    env.setTopologies(topologies);
    clear();
   
    sleep(1000);
    
    ProfileAlgorithmCommand cmd = new ProfileAlgorithmCommand(Naming.NODE_PROCESS_FAMILY, Naming.NODE_PROCESS_ALG1);
    cmd.execute();

    // wait for end-of-data event but at maximum 10s
    long now = System.currentTimeMillis();
    while (!handler.received() && System.currentTimeMillis() - now < 10000) {
        sleep(1000);
    }
    env.shutdown();
    env.cleanup();
    configureDfsPath(pathDFS);
    IntSerializer.unregisterIfNeeded(localSer);
    EventManager.unregister(handler);
}
 
Example 38
Project: jstorm-0.9.6.3-   File: ThriftTopologyUtils.java   Source Code and License 5 votes vote down vote up
public static Map<String, Object> getComponents(StormTopology topology) {
	Map<String, Object> ret = new HashMap<String, Object>();
	for (StormTopology._Fields f : StormTopology.metaDataMap.keySet()) {
		Map<String, Object> componentMap = (Map<String, Object>) topology
				.getFieldValue(f);
		ret.putAll(componentMap);
	}
	return ret;
}
 
Example 39
Project: jstorm-0.9.6.3-   File: Utils.java   Source Code and License 5 votes vote down vote up
public static ComponentCommon getComponentCommon(StormTopology topology,
		String id) {
	if (topology.get_spouts().containsKey(id)) {
		return topology.get_spouts().get(id).get_common();
	}
	if (topology.get_bolts().containsKey(id)) {
		return topology.get_bolts().get(id).get_common();
	}
	if (topology.get_state_spouts().containsKey(id)) {
		return topology.get_state_spouts().get(id).get_common();
	}
	throw new IllegalArgumentException("Could not find component with id "
			+ id);
}
 
Example 40
Project: storm-trident-example   File: ExampleTopology.java   Source Code and License 5 votes vote down vote up
public static StormTopology buildTopology()

{
	TridentTopology topology = new TridentTopology();
	RandomPhraseSpout spout1 = new RandomPhraseSpout();
	
	Stream inputStream = topology.newStream("dumbo", spout1);//where is dump used ? No where as per as I see.
	
	/**
	 * persistentAggregate : The persistentAggregate operation updates a source of state.Used for grouping operations unlike partitionPersist.
	 * persistentAggregate is an additional abstraction built on top of partitionPersist that knows how to take a 
	 * Trident aggregator and use it to apply updates to the source of state.
	 * Args:
	 * StateFactory instance - This factory implement the makeState API, that should return a instance of State.
	 * Fields list, that needs to be persisted. These field list should be present in the input stream.
	 * StateUpdater instance - The StateUpdater instance will update the underlying State.
	 */
	 inputStream
	    //input stream generated by spout1 has a field called randomPhrase.
	    //RandomPhraseSplitter takes a randomPhrase and additionally emits a field called randomWord into the stream.
	    .each(new Fields("randomPhrase"), new RandomPhraseSplitter(), new Fields("randomWord"))
	    //the input stream is grouped by randomWord - Isn't this same as storm field grouping ? yes , similar.
	    .groupBy(new Fields("randomWord"))
	    //count the occurence of randomWord using Count aggregrator, that will add a field called count to the stream.
	    //persist the count in Redis.
	    .persistentAggregate(new RedisStoreStateFactory(), new Count(), new Fields("count"));
	 
	 return topology.build();
}
 
Example 41
Project: jstorm-0.9.6.3-   File: GeneralTopologyContext.java   Source Code and License 5 votes vote down vote up
public GeneralTopologyContext(StormTopology topology, Map stormConf,
		Map<Integer, String> taskToComponent,
		Map<String, List<Integer>> componentToSortedTasks,
		Map<String, Map<String, Fields>> componentToStreamToFields,
		String topologyId) {
	_topology = topology;
	_stormConf = stormConf;
	_taskToComponent = taskToComponent;
	_topologyId = topologyId;
	_componentToTasks = componentToSortedTasks;
	_componentToStreamToFields = componentToStreamToFields;
}
 
Example 42
Project: aeolus   File: AbstractQuery.java   Source Code and License 5 votes vote down vote up
/**
 * Partial topology set up (adding spout and dispatcher bolt).
 */
private final StormTopology createTopology(OptionSet options, boolean realtime) {
	MonitoringTopoloyBuilder builder = new MonitoringTopoloyBuilder(options.has(measureThroughputOption),
		options.has(measureThroughputOption) ? options.valueOf(measureThroughputOption) : -1,
		options.has(measureLatencyOption),
		options.has(measureLatencyOption) ? options.valueOf(measureLatencyOption) : -1);
	
	IRichSpout spout = new FileReaderSpout();
	if(realtime) {
		spout = new DataDrivenStreamRateDriverSpout<Long>(spout, 0, TimeUnit.SECONDS);
	}
	final Integer dop = OperatorParallelism.get(TopologyControl.SPOUT_NAME);
	if(dop.intValue() > 1 && !options.has(highwaysOption)) {
		throw new IllegalArgumentException(
			"You configured a Spout parallelism greater than one, but provide only one input file "
				+ "(this would lead to data duplication as all Spout instances read the same file).");
	}
	builder.setSpout(TopologyControl.SPOUT_NAME, spout, dop);
	
	builder
		.setBolt(TopologyControl.SPLIT_STREAM_BOLT_NAME, new TimestampMerger(new DispatcherBolt(), 0),
			OperatorParallelism.get(TopologyControl.SPLIT_STREAM_BOLT_NAME))
		.shuffleGrouping(TopologyControl.SPOUT_NAME)
		.allGrouping(TopologyControl.SPOUT_NAME, TimestampMerger.FLUSH_STREAM_ID);
	
	this.addBolts(builder, options);
	
	return builder.createTopology();
}
 
Example 43
Project: storm-trident-example   File: ExampleTopology.java   Source Code and License 5 votes vote down vote up
public static StormTopology buildTopology()

{
	TridentTopology topology = new TridentTopology();
	RandomPhraseSpout spout1 = new RandomPhraseSpout();
	
	Stream inputStream = topology.newStream("dumbo", spout1);//where is dump used ? No where as per as I see.
	
	/**
	 * persistentAggregate : The persistentAggregate operation updates a source of state.
	 * persistentAggregate is an additional abstraction built on top of partitionPersist that knows how to take a 
	 * Trident aggregator and use it to apply updates to the source of state.
	 * Args:
	 * StateFactory instance - This factory implement the makeState API, that should return a instance of State.
	 * Fields list, that needs to be persisted. These field list should be present in the input stream.
	 * StateUpdater instance - The StateUpdater instance will update the underlying State.
	 */
	 inputStream
	    //input stream generated by spout1 has a field called randomPhrase.
	    //RandomPhraseSplitter takes a randomPhrase and additionally emits a field called randomWord into the stream.
	    .each(new Fields("randomPhrase"), new RandomPhraseSplitter(), new Fields("randomWord"))
	    //the input stream is grouped by randomWord - Isn't this same as storm field grouping ? yes , similar.
	    .groupBy(new Fields("randomWord"))
	    //count the occurence of randomWord using Count aggregrator, that will add a field called count to the stream.
	    //persist the count in Redis.
	    .persistentAggregate(new RedisStoreStateFactory(), new Count(), new Fields("count"));
	 
	 return topology.build();
}
 
Example 44
Project: Tstream   File: ServiceHandler.java   Source Code and License 5 votes vote down vote up
@Override
public void submitTopology(String name, String uploadedJarLocation,
		String jsonConf, StormTopology topology)
		throws AlreadyAliveException, InvalidTopologyException,
		TopologyAssignException, TException {
	SubmitOptions options = new SubmitOptions(TopologyInitialStatus.ACTIVE);

	submitTopologyWithOpts(name, uploadedJarLocation, jsonConf, topology,
			options);
}
 
Example 45
Project: netty-storm   File: StormRunner.java   Source Code and License 5 votes vote down vote up
public static void runTopologyLocally(StormTopology topology,
		String topologyName, Config conf, int runtimeInSeconds)
		throws InterruptedException {
	LocalCluster cluster = new LocalCluster();
	cluster.submitTopology(topologyName, conf, topology);
	System.out
			.println("\n\n==================================\n STORM TOPOLOGY INITIALIZING \n==================================\n\n");
	// If the runtime is 0, it will run indefinitely
	if (runtimeInSeconds != 0) {
		Thread.sleep((long) runtimeInSeconds * MILLIS_IN_SEC);
		cluster.killTopology(topologyName);
		cluster.shutdown();
	}
}
 
Example 46
Project: storm-cassandra-cql   File: SimpleUpdateTopology.java   Source Code and License 5 votes vote down vote up
@SuppressWarnings({ "rawtypes", "unchecked" })
public static StormTopology buildTopology() {
    LOG.info("Building topology.");
    TridentTopology topology = new TridentTopology();
    SimpleUpdateSpout spout = new SimpleUpdateSpout();
    Stream inputStream = topology.newStream("test", spout);
    SimpleUpdateMapper mapper = new SimpleUpdateMapper();
    inputStream.partitionPersist(new CassandraCqlStateFactory(ConsistencyLevel.ONE), new Fields("test"), new CassandraCqlStateUpdater(mapper));
    // inputStream.each(new Fields("test"), new Debug());
    return topology.build();
}
 
Example 47
Project: learn_jstorm   File: Worker.java   Source Code and License 5 votes vote down vote up
/**
 * get current task's output task list
 * 
 * @param tasks_component
 * @param mk_topology_context
 * @param task_ids
 * @throws Exception
 */
public Set<Integer> worker_output_tasks() {

	ContextMaker context_maker = workerData.getContextMaker();
	Set<Integer> task_ids = workerData.getTaskids();
	StormTopology topology = workerData.getSysTopology();

	Set<Integer> rtn = new HashSet<Integer>();

	for (Integer taskid : task_ids) {
		TopologyContext context = context_maker.makeTopologyContext(
				topology, taskid, null);

		// <StreamId, <ComponentId, Grouping>>
		Map<String, Map<String, Grouping>> targets = context
				.getThisTargets();
		for (Map<String, Grouping> e : targets.values()) {
			for (String componentId : e.keySet()) {
				List<Integer> tasks = context
						.getComponentTasks(componentId);
				rtn.addAll(tasks);
			}
		}
	}

	return rtn;
}
 
Example 48
Project: learn_jstorm   File: ContextMaker.java   Source Code and License 5 votes vote down vote up
public TopologyContext makeTopologyContext(StormTopology topology,
		Integer taskId, clojure.lang.Atom openOrPrepareWasCalled) {

	Map stormConf = workerData.getStormConf();
	String topologyId = workerData.getTopologyId();

	HashMap<String, Map<String, Fields>> componentToStreamToFields = new HashMap<String, Map<String, Fields>>();

	Set<String> components = ThriftTopologyUtils.getComponentIds(topology);
	for (String component : components) {

		Map<String, Fields> streamToFieldsMap = new HashMap<String, Fields>();

		Map<String, StreamInfo> streamInfoMap = ThriftTopologyUtils
				.getComponentCommon(topology, component).get_streams();
		for (Entry<String, StreamInfo> entry : streamInfoMap.entrySet()) {
			String streamId = entry.getKey();
			StreamInfo streamInfo = entry.getValue();

			streamToFieldsMap.put(streamId,
					new Fields(streamInfo.get_output_fields()));
		}

		componentToStreamToFields.put(component, streamToFieldsMap);
	}

	return new TopologyContext(topology, stormConf,
			workerData.getTasksToComponent(),
			workerData.getComponentToSortedTasks(),
			componentToStreamToFields, topologyId, resourcePath, pidDir,
			taskId, workerData.getPort(), workerTasks,
			workerData.getDefaultResources(),
			workerData.getUserResources(), workerData.getExecutorData(),
			workerData.getRegisteredMetrics(), openOrPrepareWasCalled);

}
 
Example 49
Project: learn_jstorm   File: TopologyDetails.java   Source Code and License 5 votes vote down vote up
public TopologyDetails(String topologyId, Map topologyConf,
		StormTopology topology, int numWorkers,
		Map<ExecutorDetails, String> executorToComponents) {
	this(topologyId, topologyConf, topology, numWorkers);
	this.executorToComponent = new HashMap<ExecutorDetails, String>(0);
	if (executorToComponents != null) {
		this.executorToComponent.putAll(executorToComponents);
	}
}
 
Example 50
Project: learn_jstorm   File: Common.java   Source Code and License 5 votes vote down vote up
public static List<Object> all_components(StormTopology topology) {
	List<Object> rtn = new ArrayList<Object>();
	for (StormTopology._Fields field : Thrift.STORM_TOPOLOGY_FIELDS) {
		Object fields = topology.getFieldValue(field);
		if (fields != null) {
			rtn.addAll(((Map) fields).values());
		}
	}
	return rtn;
}
 
Example 51
Project: learn_jstorm   File: Common.java   Source Code and License 5 votes vote down vote up
public static StormTopology add_system_components(StormTopology topology) {
	// generate inputs
	Map<GlobalStreamId, Grouping> inputs = new HashMap<GlobalStreamId, Grouping>();

	// generate outputs
	HashMap<String, StreamInfo> outputs = new HashMap<String, StreamInfo>();
	ArrayList<String> fields = new ArrayList<String>();

	outputs.put(Constants.SYSTEM_TICK_STREAM_ID,
			Thrift.outputFields(JStormUtils.mk_list("rate_secs")));
	outputs.put(Constants.METRICS_TICK_STREAM_ID,
			Thrift.outputFields(JStormUtils.mk_list("interval")));
	outputs.put(Constants.CREDENTIALS_CHANGED_STREAM_ID,
			Thrift.outputFields(JStormUtils.mk_list("creds")));

	ComponentCommon common = new ComponentCommon(inputs, outputs);

	IBolt ackerbolt = new SystemBolt();

	Bolt bolt = Thrift.mkBolt(inputs, ackerbolt, outputs,
			Integer.valueOf(0));

	topology.put_to_bolts(Constants.SYSTEM_COMPONENT_ID, bolt);

	add_system_streams(topology);

	return topology;

}
 
Example 52
Project: learn_jstorm   File: Common.java   Source Code and License 5 votes vote down vote up
public static StormTopology add_metrics_component(StormTopology topology) {

		/**
		 * @@@ TODO Add metrics consumer bolt
		 */
		//		(defn metrics-consumer-bolt-specs [storm-conf topology]
		//				  (let [component-ids-that-emit-metrics (cons SYSTEM-COMPONENT-ID (keys (all-components topology)))
		//				        inputs (->> (for [comp-id component-ids-that-emit-metrics]
		//				                      {[comp-id METRICS-STREAM-ID] :shuffle})
		//				                    (into {}))
		//				        
		//				        mk-bolt-spec (fn [class arg p]
		//				                       (thrift/mk-bolt-spec*
		//				                        inputs
		//				                        (backtype.storm.metric.MetricsConsumerBolt. class arg)
		//				                        {} :p p :conf {TOPOLOGY-TASKS p}))]
		//				    
		//				    (map
		//				     (fn [component-id register]           
		//				       [component-id (mk-bolt-spec (get register "class")
		//				                                   (get register "argument")
		//				                                   (or (get register "parallelism.hint") 1))])
		//				     
		//				     (metrics-consumer-register-ids storm-conf)
		//				     (get storm-conf TOPOLOGY-METRICS-CONSUMER-REGISTER))))
		return topology;
	}
 
Example 53
Project: jstrom   File: SequenceTopologyTool.java   Source Code and License 5 votes vote down vote up
public  void SetLocalTopology() throws Exception {
	Config conf = getConf();

	StormTopology topology = buildTopology();
	LocalCluster cluster = new LocalCluster();
	cluster.submitTopology("SplitMerge", conf, topology);
	Thread.sleep(60000);
	cluster.shutdown();
}
 
Example 54
Project: learn_jstorm   File: StormConfig.java   Source Code and License 5 votes vote down vote up
public static StormTopology read_supervisor_topology_code(Map conf,
		String topologyId) throws IOException {
	String topologyRoot = StormConfig.supervisor_stormdist_root(conf,
			topologyId);
	String codePath = StormConfig.stormcode_path(topologyRoot);
	return (StormTopology) readLocalObject(topologyId, codePath);
}
 
Example 55
Project: jstrom   File: TopologyDetails.java   Source Code and License 5 votes vote down vote up
public TopologyDetails(String topologyId, Map topologyConf, StormTopology topology, int numWorkers, Map<ExecutorDetails, String> executorToComponents) {
    this(topologyId, topologyConf, topology, numWorkers);
    this.executorToComponent = new HashMap<ExecutorDetails, String>(0);
    if (executorToComponents != null) {
        this.executorToComponent.putAll(executorToComponents);
    }
}
 
Example 56
Project: Tstream   File: TopologyDetails.java   Source Code and License 5 votes vote down vote up
public TopologyDetails(String topologyId, Map topologyConf,
		StormTopology topology, int numWorkers) {
	this.topologyId = topologyId;
	this.topologyConf = topologyConf;
	this.topology = topology;
	this.numWorkers = numWorkers;
}
 
Example 57
Project: learn_jstorm   File: TopologyPage.java   Source Code and License 5 votes vote down vote up
/**
 * get spout or bolt's List<Components>
 * 
 * 
 * @param ts
 * @param topology
 * @throws NotAliveException
 */
private void getComponents(List<TaskSummary> ts, StormTopology topology)
		throws NotAliveException {
	if (ts == null) {
		LOG.error("Task list is empty");
		throw new NotAliveException("Task list is empty");
	}

	Map<String, List<TaskSummary>> spoutTasks = new HashMap<String, List<TaskSummary>>();
	Map<String, List<TaskSummary>> boltTasks = new HashMap<String, List<TaskSummary>>();

	for (TaskSummary t : ts) {
		if (t == null) {
			continue;
		}

		String componentid = t.get_component_id();
		String componentType = UIUtils.componentType(topology, componentid);
		if (componentType.equals(UIUtils.SPOUT_STR)) {

			UIUtils.addTask(spoutTasks, t, componentid);
		} else if (componentType.equals(UIUtils.BOLT_STR)) {
			UIUtils.addTask(boltTasks, t, componentid);
		}

	}

	scom = getComponents(spoutTasks, UIUtils.SPOUT_STR);
	bcom = getComponents(boltTasks, UIUtils.BOLT_STR);
}
 
Example 58
Project: learn_jstorm   File: UIUtils.java   Source Code and License 5 votes vote down vote up
public static String componentType(StormTopology topology, String id) {
	Map<String, Bolt> bolts = topology.get_bolts();
	Map<String, SpoutSpec> spouts = topology.get_spouts();
	String type = "";
	if (bolts.containsKey(id)) {
		type = BOLT_STR;
	} else if (spouts.containsKey(id)) {
		type = SPOUT_STR;
	}
	return type;
}
 
Example 59
Project: Tstream   File: ThriftTopologyUtils.java   Source Code and License 5 votes vote down vote up
public static Map<String, Object> getComponents(StormTopology topology) {
	Map<String, Object> ret = new HashMap<String, Object>();
	for (StormTopology._Fields f : StormTopology.metaDataMap.keySet()) {
		Map<String, Object> componentMap = (Map<String, Object>) topology
				.getFieldValue(f);
		ret.putAll(componentMap);
	}
	return ret;
}
 
Example 60
Project: Tstream   File: TopologyPage.java   Source Code and License 5 votes vote down vote up
/**
 * get spout or bolt's List<Components>
 * 
 * 
 * @param ts
 * @param topology
 * @throws NotAliveException
 */
private void getComponents(List<TaskSummary> ts, StormTopology topology)
		throws NotAliveException {
	if (ts == null) {
		LOG.error("Task list is empty");
		throw new NotAliveException("Task list is empty");
	}

	Map<String, List<TaskSummary>> spoutTasks = new HashMap<String, List<TaskSummary>>();
	Map<String, List<TaskSummary>> boltTasks = new HashMap<String, List<TaskSummary>>();

	for (TaskSummary t : ts) {
		if (t == null) {
			continue;
		}

		String componentid = t.get_component_id();
		String componentType = UIUtils.componentType(topology, componentid);
		if (componentType.equals(UIUtils.SPOUT_STR)) {

			UIUtils.addTask(spoutTasks, t, componentid);
		} else if (componentType.equals(UIUtils.BOLT_STR)) {
			UIUtils.addTask(boltTasks, t, componentid);
		}

	}

	scom = getComponents(spoutTasks, UIUtils.SPOUT_STR);
	bcom = getComponents(boltTasks, UIUtils.BOLT_STR);
}