Java Code Examples for backtype.storm.utils.Utils

The following are top voted examples for showing how to use backtype.storm.utils.Utils. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: Mastering-Mesos   File: MesosSupervisor.java   View source code 6 votes vote down vote up
@Override
public void run() {
  try {
    while (true) {
      long now = System.currentTimeMillis();
      if (!_supervisorViewOfAssignedPorts.get().isEmpty()) {
        _lastTime = now;
      }
      if ((now - _lastTime) > 1000L * _timeoutSecs) {
        LOG.info("Supervisor has not had anything assigned for {} secs. Committing suicide...", _timeoutSecs);
        Runtime.getRuntime().halt(0);
      }
      Utils.sleep(5000);
    }
  } catch (Throwable t) {
    LOG.error(t.getMessage());
    Runtime.getRuntime().halt(2);
  }
}
 
Example 2
Project: storm-scheduler   File: UuidFixedRatePayloadSpout.java   View source code 6 votes vote down vote up
@Override
public void nextTuple() {
    long thisSecond;

    // find out if a new second has started and if so
    thisSecond = System.currentTimeMillis() / 1000;
    if (thisSecond != this.lastSecond) {
        // a new second has started
        this.lastSecond = thisSecond;
        this.tuplesRemainingThisSecond = this.tuplesPerSecond;
    }

    this.tuplesRemainingThisSecond--;             // bookkeeping
    if (this.tuplesRemainingThisSecond > 0) {     // emit if possible
        super.nextTuple();
    } else {                                      // throttle if necessary
        /** we should wait 1ms. {@see backtype.storm.spout.ISpout#nextTuple()} */
        Utils.sleep(1);
    }
}
 
Example 3
Project: splice-community-sample-code   File: MySqlToSpliceTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws SQLException {

        // tableName is the name of the table in splice to insert records to
        // server is the server instance running splice
        String tableName = "students";
        String server = "localhost";
        TopologyBuilder builder = new TopologyBuilder();

        // set the spout for the topology
        builder.setSpout("seedDataFromMySql", new MySqlSpout());

        // dump the stream data into splice       
        builder.setBolt("dbRowProcessing", new MySqlSpliceBolt(server, tableName), 1).shuffleGrouping("seedDataFromMySql");

        Config conf = new Config();
        conf.setDebug(true);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("mysql-splice-topology", conf, builder.createTopology());
        Utils.sleep(3000);
        cluster.shutdown();
    }
 
Example 4
Project: splice-community-sample-code   File: SpliceIntegerSpout.java   View source code 6 votes vote down vote up
@Override
public void nextTuple() {
    Utils.sleep(1000);
    String[] words = new String[]{"splice", "machine", "hadoop", "rdbms", "acid", "sql", "transactions"};
    Integer[] numbers = new Integer[]{
            1, 2, 3, 4, 5, 6, 7
    };
    if (count == numbers.length - 1) {
        count = 0;
    }
    count++;
    int number = numbers[count];
    String word = words[count];
    int randomNum = (int) (Math.random() * 1000);
    System.out.println("Random Number: " + randomNum);
    System.out.println("SpliceIntegerSpout emitting: " + number);
    _collector.emit(new Values(word, number));
}
 
Example 5
Project: splice-community-sample-code   File: MySqlSpout.java   View source code 6 votes vote down vote up
public void nextTuple() {
    if (bufferQueue.isEmpty()) {
        // pass in mysql server, db name, user, password
        seedBufferQueue("localhost", "test", "root", "");
        Utils.sleep(100);
    } else {
        // Replace name with the data being extracted.
        // This example expects only name to be returned in the sql/and thus is only item output by the spout.
        //  To add additional data add them to the values using new Values(value1, value2, etc) then emit the values
        String name = bufferQueue.poll();
        if (name != null) {
            Values values = new Values();
            values.add(name);
            _collector.emit(values);
        }
        Utils.sleep(50);
    }
}
 
Example 6
Project: aeolus   File: IncSpoutTest.java   View source code 6 votes vote down vote up
@Test
public void testExecuteUnique() {
	IncSpout spout = new IncSpout(1);
	
	TestSpoutOutputCollector collector = new TestSpoutOutputCollector();
	spout.open(null, null, new SpoutOutputCollector(collector));
	
	List<List<Object>> result = new LinkedList<List<Object>>();
	
	for(int i = 0; i < 5; ++i) {
		ArrayList<Object> attributes = new ArrayList<Object>();
		attributes.add(new Long(i));
		result.add(attributes);
		
		spout.nextTuple();
	}
	
	Assert.assertEquals(result, collector.output.get(Utils.DEFAULT_STREAM_ID));
}
 
Example 7
Project: Tstream   File: SerializationFactory.java   View source code 6 votes vote down vote up
public IdDictionary(StormTopology topology) {
	List<String> componentNames = new ArrayList<String>(topology
			.get_spouts().keySet());
	componentNames.addAll(topology.get_bolts().keySet());
	componentNames.addAll(topology.get_state_spouts().keySet());

	for (String name : componentNames) {
		ComponentCommon common = Utils.getComponentCommon(topology,
				name);
		List<String> streams = new ArrayList<String>(common
				.get_streams().keySet());
		streamNametoId.put(name, idify(streams));
		streamIdToName.put(name,
				Utils.reverseMap(streamNametoId.get(name)));
	}
}
 
Example 8
Project: jstorm-0.9.6.3-   File: StormZkClusterState.java   View source code 6 votes vote down vote up
@Override
public void update_storm(String topologyId, StormStatus newElems)
		throws Exception {
	/**
	 * , not sure where the old exist error or not The raw code
	 * (set-data cluster-state (storm-path storm-id) (-> (storm-base this
	 * storm-id nil) (merge new-elems) Utils/serialize)))
	 */

	StormBase base = this.storm_base(topologyId, null);

	if (base != null) {
		base.setStatus(newElems);
		cluster_state.set_data(Cluster.storm_path(topologyId),
				Utils.serialize(base));
	}

}
 
Example 9
Project: Tstream   File: GeneralTopologyContext.java   View source code 6 votes vote down vote up
public int maxTopologyMessageTimeout() {
	Integer max = Utils.getInt(_stormConf
			.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS));
	for (String spout : getRawTopology().get_spouts().keySet()) {
		ComponentCommon common = getComponentCommon(spout);
		String jsonConf = common.get_json_conf();
		if (jsonConf != null) {
			Map conf = (Map) Utils.from_json(jsonConf);
			Object comp = conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS);
			if (comp != null) {
				max = Math.max(Utils.getInt(comp), max);
			}
		}
	}
	return max;
}
 
Example 10
Project: jstrom   File: rebalance.java   View source code 6 votes vote down vote up
public static void submitRebalance(String topologyName, RebalanceOptions options, Map conf) throws Exception {
    Map stormConf = Utils.readStormConfig();
    if (conf != null) {
        stormConf.putAll(conf);
    }

    NimbusClient client = null;
    try {
        client = NimbusClient.getConfiguredClient(stormConf);
        client.getClient().rebalance(topologyName, options);
    } catch (Exception e) {
        throw e;
    } finally {
        if (client != null) {
            client.close();
        }
    }
}
 
Example 11
Project: learn_jstorm   File: DRPCSpout.java   View source code 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	_collector = collector;
	if (_local_drpc_id == null) {
		int numTasks = context.getComponentTasks(
				context.getThisComponentId()).size();
		int index = context.getThisTaskIndex();

		int port = Utils.getInt(conf.get(Config.DRPC_INVOCATIONS_PORT));
		List<String> servers = (List<String>) conf.get(Config.DRPC_SERVERS);
		if (servers == null || servers.isEmpty()) {
			throw new RuntimeException(
					"No DRPC servers configured for topology");
		}
		if (numTasks < servers.size()) {
			for (String s : servers) {
				_clients.add(new DRPCInvocationsClient(s, port));
			}
		} else {
			int i = index % servers.size();
			_clients.add(new DRPCInvocationsClient(servers.get(i), port));
		}
	}

}
 
Example 12
Project: learn_jstorm   File: TridentTopology.java   View source code 6 votes vote down vote up
private static Map<Group, String> genBoltIds(Collection<Group> groups) {
    Map<Group, String> ret = new HashMap();
    int ctr = 0;
    for(Group g: groups) {
        if(!isSpoutGroup(g)) {
            List<String> name = new ArrayList();
            name.add("b");
            name.add("" + ctr);
            String groupName = getGroupName(g);
            if(groupName!=null && !groupName.isEmpty()) {
                name.add(getGroupName(g));                
            }
            ret.put(g, Utils.join(name, "-"));
            ctr++;
        }
    }
    return ret;
}
 
Example 13
Project: jstorm-0.9.6.3-   File: TridentBoltExecutor.java   View source code 6 votes vote down vote up
private boolean finishBatch(TrackedBatch tracked, Tuple finishTuple) {
    boolean success = true;
    try {
        _bolt.finishBatch(tracked.info);
        String stream = COORD_STREAM(tracked.info.batchGroup);
        for(Integer task: tracked.condition.targetTasks) {
            _collector.emitDirect(task, stream, finishTuple, new Values(tracked.info.batchId, Utils.get(tracked.taskEmittedTuples, task, 0)));
        }
        if(tracked.delayedAck!=null) {
            _collector.ack(tracked.delayedAck);
            tracked.delayedAck = null;
        }
    } catch(FailedException e) {
        failBatch(tracked, e);
        success = false;
    }
    _batches.remove(tracked.info.batchId.getId());
    return success;
}
 
Example 14
Project: cdh-storm   File: ExclamationTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("word", new TestWordSpout(), 10);
  builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
  builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");

  Config conf = new Config();
  conf.setDebug(true);

  if (args != null && args.length > 0) {
    conf.setNumWorkers(3);

    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  }
  else {

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.killTopology("test");
    cluster.shutdown();
  }
}
 
Example 15
Project: aeolus   File: RandomSpoutTest.java   View source code 6 votes vote down vote up
@Test
public void testExecuteMultipleStreams() {
	int numberOfAttributes = 1 + this.r.nextInt(10);
	String[] streamIds = new String[] {Utils.DEFAULT_STREAM_ID, "myStreamId"};
	RandomSpout spout = new RandomSpout(numberOfAttributes, 100, streamIds);
	
	TestSpoutOutputCollector collector = new TestSpoutOutputCollector();
	spout.open(null, null, new SpoutOutputCollector(collector));
	
	for(int i = 0; i < 50; ++i) {
		spout.nextTuple();
		for(String stream : streamIds) {
			Assert.assertTrue(collector.output.get(stream).size() == i + 1); // size of result
			Assert.assertTrue(collector.output.get(stream).get(i).size() == numberOfAttributes);
			for(int j = 0; j < numberOfAttributes; ++j) {
				Assert.assertTrue(0 < ((Integer)collector.output.get(stream).get(i).get(j)).intValue());
				Assert.assertTrue(100 >= ((Integer)collector.output.get(stream).get(i).get(j)).intValue());
			}
		}
	}
	
}
 
Example 16
Project: aeolus   File: ForwardBoltTest.java   View source code 6 votes vote down vote up
@Test
public void testExecute() {
	ForwardBolt bolt = new ForwardBolt(new Fields("dummy"));
	
	TestOutputCollector collector = new TestOutputCollector();
	bolt.prepare(null, null, new OutputCollector(collector));
	
	LinkedList<Tuple> tuples = new LinkedList<Tuple>();
	List<List<Object>> result = new LinkedList<List<Object>>();
	
	for(int i = 0; i < 3; ++i) {
		ArrayList<Object> attributes = new ArrayList<Object>();
		attributes.add(new Integer(i));
		
		tuples.add(mock(Tuple.class));
		when(tuples.get(i).getValues()).thenReturn(attributes);
		result.add(attributes);
		
		bolt.execute(tuples.get(i));
		Assert.assertEquals(tuples, collector.acked);
	}
	
	Assert.assertEquals(result, collector.output.get(Utils.DEFAULT_STREAM_ID));
}
 
Example 17
Project: aeolus   File: IncSpoutTest.java   View source code 6 votes vote down vote up
@Test
public void testExecuteUniqueMultipleStreams() {
	String[] streamIds = new String[] {Utils.DEFAULT_STREAM_ID, "myStreamId"};
	IncSpout spout = new IncSpout(streamIds);
	
	TestSpoutOutputCollector collector = new TestSpoutOutputCollector();
	spout.open(null, null, new SpoutOutputCollector(collector));
	
	List<List<Object>> result = new LinkedList<List<Object>>();
	
	for(int i = 0; i < 5; ++i) {
		ArrayList<Object> attributes = new ArrayList<Object>();
		attributes.add(new Long(i));
		result.add(attributes);
		
		spout.nextTuple();
	}
	
	for(String stream : streamIds) {
		Assert.assertEquals(result, collector.output.get(stream));
	}
}
 
Example 18
Project: rb-bi   File: ZkState.java   View source code 5 votes vote down vote up
private CuratorFramework newCurator(Map stateConf) throws Exception {
    Integer port = (Integer) stateConf.get(Config.TRANSACTIONAL_ZOOKEEPER_PORT);
    String serverPorts = "";
    for (String server : (List<String>) stateConf.get(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS)) {
        serverPorts = serverPorts + server + ":" + port + ",";
    }
    return CuratorFrameworkFactory.newClient(serverPorts,
            Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
            15000,
            new RetryNTimes(Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
                    Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
}
 
Example 19
Project: rb-bi   File: KafkaUtils.java   View source code 5 votes vote down vote up
public static Iterable<List<Object>> generateTuples(KafkaConfig kafkaConfig, Message msg) {
    Iterable<List<Object>> tups;
    ByteBuffer payload = msg.payload();
    if (payload == null) {
        return null;
    }
    ByteBuffer key = msg.key();
    if (key != null && kafkaConfig.scheme instanceof KeyValueSchemeAsMultiScheme) {
        tups = ((KeyValueSchemeAsMultiScheme) kafkaConfig.scheme).deserializeKeyAndValue(Utils.toByteArray(key), Utils.toByteArray(payload));
    } else {
        tups = kafkaConfig.scheme.deserialize(Utils.toByteArray(payload));
    }
    return tups;
}
 
Example 20
Project: rb-bi   File: DynamicBrokersReader.java   View source code 5 votes vote down vote up
public DynamicBrokersReader(Map conf, String zkStr, String zkPath, String topic) {
    _zkPath = zkPath;
    _topic = topic;
    try {
        _curator = CuratorFrameworkFactory.newClient(
                zkStr,
                Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
                Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT)),
                new RetryNTimes(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
                        Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
        _curator.start();
    } catch (Exception ex) {
        LOG.error("Couldn't connect to zookeeper", ex);
    }
}
 
Example 21
Project: storm-scheduler   File: SchedulingMetricsCollectionHook.java   View source code 5 votes vote down vote up
/**
 * Returns the configured interval in which metrics should be send to the metrics consumer.
 *
 * @param stormConf the configuration object to check.
 * @return the interval value in seconds.
 */
public static int getConfiguredSchedulingIntervalSecs(Map stormConf) {
    int result;

    result = DEFAULT_INTERVAL_SECS;
    if (stormConf.containsKey(CONF_SCHEDULING_METRICS_INTERVAL_SECS)) {
        result = Utils.getInt(stormConf.get(CONF_SCHEDULING_METRICS_INTERVAL_SECS)).intValue();
    }

    return result;
}
 
Example 22
Project: storm-scheduler   File: UuidSpout.java   View source code 5 votes vote down vote up
@Override
public void nextTuple() {
    if (!this.disableAniello) {
        taskMonitor.checkThreadId();
    }

    this.emitCount++; // we start with msgId = 1
    this.collector.emit(new Values(this.uuid), this.emitCount);
    if ((emitCount % (100 * 1000)) == 0) {
        LOG.info("Emitted {} tuples", this.emitCount);
        Utils.sleep(1);
    }
}
 
Example 23
Project: storm-scheduler   File: UuidPayloadSpout.java   View source code 5 votes vote down vote up
@Override
public void nextTuple() {
    if (!this.disableAniello) {
        taskMonitor.checkThreadId();
    }

    this.emitCount++; // we start with msgId = 1
    this.collector.emit(new Values(this.uuid, this.payload), this.emitCount);
    if ((emitCount % (100 * 1000) ) == 0) {
        LOG.info("Emitted {} tuples", this.emitCount);
        Utils.sleep(1);
    }
}
 
Example 24
Project: fiware-sinfonier   File: DynamicTopology.java   View source code 5 votes vote down vote up
public static void main(String[] args) throws Exception {

        LOG.info("Reading JSON file configuration...");
        JSONProperties config = new JSONProperties("/topology.json");
        TopologyBuilder builder = new TopologyBuilder();

        /* Spout Configuration */
        JSONArray spouts = config.getSpouts();
        configureSpouts(builder, spouts);

        /* Bolt Configuration */
        JSONArray bolts = config.getBolts();
        configureBolts(builder, bolts);

        /* Drain Configuration */
        JSONArray drains = config.getDrains();
        configureDrains(builder, drains);

        /* Configure more Storm options */
        Config conf = setTopologyStormConfig(config.getProperties());


        if(config.getProperty("name") != null){
            StormSubmitter.submitTopology((String)config.getProperty("name"), conf, builder.createTopology());
        } else {
            conf.setDebug(true);
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("test", conf, builder.createTopology());
            Utils.sleep(1000000); // Alive for 100 seconds = 100000 ms
            cluster.killTopology("test");
            cluster.shutdown();
        }

    }
 
Example 25
Project: splice-community-sample-code   File: SpliceDumperTopology.java   View source code 5 votes vote down vote up
public static void main(String[] args) throws SQLException {

        ArrayList<String> columnNames = new ArrayList<String>();
        ArrayList<String> columnTypes = new ArrayList<String>();
        // this table must exist in splice
        // create table testTable (word varchar(100), number int);
        String tableName = "testTable";
        String server = "localhost";

        // add the column names and the respective types in the two arraylists
        columnNames.add("word");
        columnNames.add("number");

        // add the types
        columnTypes.add("varchar (100)");
        columnTypes.add("int");

        TopologyBuilder builder = new TopologyBuilder();

        // set the spout for the topology
        builder.setSpout("spout", new SpliceIntegerSpout(), 10);

        // dump the stream data into splice       
        SpliceDumperBolt dumperBolt = new SpliceDumperBolt(server, tableName);
        builder.setBolt("dumperBolt", dumperBolt, 1).shuffleGrouping("spout");
        Config conf = new Config();
        conf.setDebug(true);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("splice-topology", conf, builder.createTopology());
        Utils.sleep(10000);
        cluster.shutdown();
    }
 
Example 26
Project: preliminary.demo   File: RaceSentenceSpout.java   View source code 5 votes vote down vote up
public void nextTuple() {
    int n = sendNumPerNexttuple;
    while (--n >= 0) {
        Utils.sleep(10);
        String sentence = CHOICES[_rand.nextInt(CHOICES.length)];
        _collector.emit(new Values(sentence));
    }
    updateSendTps();
}
 
Example 27
Project: preliminary.demo   File: SpoutLocal.java   View source code 5 votes vote down vote up
public void nextTuple() {
    int n = sendNumPerNexttuple;
    while (--n >= 0) {
        Utils.sleep(10);
        String sentence = CHOICES[_rand.nextInt(CHOICES.length)];
        _collector.emit(new Values(sentence));
    }
    updateSendTps();
}
 
Example 28
Project: miner   File: EmitMessageSpout.java   View source code 5 votes vote down vote up
public void nextTuple() {
        Utils.sleep(5000);
        for(int i = 22; i<= 30; i++) {
            Utils.sleep(1000);
            String url = "https://account.wandoujia.com/v4/api/simple/profile?uid="+String.valueOf(i);
//            String message = Crawl4HttpClient.downLoadPage(url);
//        System.out.println(message);
            _collector.emit(new Values(url), i);
        }
    }
 
Example 29
Project: miner   File: ProduceRecordSpout.java   View source code 5 votes vote down vote up
public void nextTuple(){
    Utils.sleep(500);
    String record = recordLines[rand.nextInt(recordLines.length)];
    List<Object> values = new Values(type, record);
    collector.emit(values, values);
    System.out.println("Record emitted: type=" + type + ", record=" + record);
}
 
Example 30
Project: miner   File: ExclaimBasicTopo.java   View source code 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new RandomSpout());
    builder.setBolt("exclaim", new ProxyBolt()).shuffleGrouping("spout");
    builder.setBolt("print", new PrintBolt()).shuffleGrouping("exclaim");

    Config conf = new Config();
    conf.setDebug(false);

    /* Config里封装了Redis的配置 */
    conf.put("ip","127.0.0.1");
    conf.put("port","6379");
    conf.put("password","password");

    if (args != null && args.length > 0) {
        conf.setNumWorkers(1);

        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", conf, builder.createTopology());
        Utils.sleep(10*1000);
        cluster.killTopology("test");
        cluster.shutdown();
    }
}
 
Example 31
Project: miner   File: EmitMessageSpout.java   View source code 5 votes vote down vote up
public void nextTuple() {
        Utils.sleep(1000);
        for(int i = 22; i<= 30; i++) {
            Utils.sleep(1000);
            String url = "https://account.wandoujia.com/v4/api/simple/profile?uid="+String.valueOf(i);
            String message = Crawl4HttpClient.downLoadPage(url);
//        System.out.println(message);
            _collector.emit(new Values(message));
        }
    }
 
Example 32
Project: erad2016-streamprocessing   File: TwitterSpout.java   View source code 5 votes vote down vote up
public void nextTuple() {
    String ret = queue.poll();
    if (ret == null) {
        Utils.sleep(50);
    } else {
        collector.emit(new Values(ret));
    }
}
 
Example 33
Project: Get-ENVS   File: RandomSentenceSpout.java   View source code 5 votes vote down vote up
@Override
public void nextTuple() {
//Sleep for a bit
  Utils.sleep(100);
  //The sentences that will be randomly emitted
  String[] sentences = new String[]{ "the cow jumped over the moon", "an apple a day keeps the doctor away",
      "four score and seven years ago", "snow white and the seven dwarfs", "i am at two with nature" };
  //Randomly pick a sentence
  String sentence = sentences[_rand.nextInt(sentences.length)];
  //Emit the sentence
  _collector.emit(new Values(sentence));
}
 
Example 34
Project: es-hadoop-v2.2.0   File: RandomSentenceSpout.java   View source code 5 votes vote down vote up
@Override
public void nextTuple() {
    Utils.sleep(100);
    String[] sentences = new String[] { "the cow jumped over the moon", "an apple a day keeps the doctor away",
            "four score and seven years ago", "snow white and the seven dwarfs", "i am at two with nature" };
    String sentence = sentences[_rand.nextInt(sentences.length)];
    _collector.emit(new Values(sentence));
}
 
Example 35
Project: ignite-book-code-samples   File: RandomSentenceSpout.java   View source code 5 votes vote down vote up
@Override
public void nextTuple() {
    //Sleep for a bit
    Utils.sleep(100);
    //The sentences that will be randomly emitted
    String[] sentences = new String[]{"the cow jumped over the moon", "an apple a day keeps the doctor away",
            "four score and seven years ago", "snow white and the seven dwarfs", "i am at two with nature"};
    //Randomly pick a sentence
    String sentence = sentences[_rand.nextInt(sentences.length)];
    //Emit the sentence
    _collector.emit(new Values(sentence));
}
 
Example 36
Project: sourcevirtues-samples   File: RandomSentenceTestSpout.java   View source code 5 votes vote down vote up
@Override
public void nextTuple() {
    Utils.sleep(sleepMillis);

    String sentence = sentences[_rand.nextInt(sentences.length)];

    _collector.emit(new Values(sentence));
}
 
Example 37
Project: java   File: DeliveryTopology.java   View source code 5 votes vote down vote up
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
  TopologyBuilder builder = new TopologyBuilder();

  LOGGER.info("Starting..");
  builder.setSpout("trade", new DeliveryCheckSpout(), 1);
  builder.setBolt("eligibility", new DeliveryCheckBolt(), 10).shuffleGrouping("trade");
  builder.setBolt("odd", new DeliveryCheckOddBolt(), 10).shuffleGrouping("eligibility",
      "oddstream");
  builder.setBolt("even", new DeliveryCheckEvenBolt(), 10).shuffleGrouping("eligibility",
      "evenstream");

  Config conf = new Config();
  conf.setDebug(false);
  conf.setMaxSpoutPending(5);

  if (args != null && args.length > 0) {
    conf.setNumWorkers(1);
    LOGGER.info("Submitting DeliveryTopology");
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  } else {

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("DeliveryTopology", conf, builder.createTopology());
    Utils.sleep(100000000);
    cluster.killTopology("DeliveryTopology");
    cluster.shutdown();
  }
}
 
Example 38
Project: java   File: TradeReportPersistenceBolt.java   View source code 5 votes vote down vote up
@Override
public void execute(Tuple tuple) {
  LOGGER.info("Processing ELIGIBLE Trade");
  long newTime = 0;
  try {
    if (CONFIG.is("REPORTING_TIME_DELAY_ON")) {
      Utils.sleep(CONFIG.getLong("REPORTING_PERSISTENCE_TIME"));
    }
    FileWriter fileWriter = new FileWriter(CONFIG.get("REPT_PERSISTENCE_PATH"), true);
    BufferedWriter bufferedWriter = new BufferedWriter(fileWriter);
    bufferedWriter.write(tuple.getString(0));
    bufferedWriter.write(COMMA_SEPARATOR);
    bufferedWriter.write(String.valueOf(new Date()));
    bufferedWriter.write(COMMA_SEPARATOR);
    newTime = new Date().getTime();
    bufferedWriter.write(String.valueOf(newTime));
    bufferedWriter.write(COMMA_SEPARATOR);
    bufferedWriter.write(
        String.valueOf(newTime - Long.parseLong(tuple.getString(0).split(COMMA_SEPARATOR)[4])));
    bufferedWriter.newLine();
    bufferedWriter.close();
    // Checking and Performing Ack
    if (CONFIG.is("ACK_ON")) {
      _collector.ack(tuple);
    }
  } catch (Throwable e) {
    LOGGER.error(EXEC_EXCP_MSG, e);
    _collector.fail(tuple);

  }

}
 
Example 39
Project: java   File: TradeExclusionPersistenceBolt.java   View source code 5 votes vote down vote up
@Override
public void execute(Tuple tuple) {
  LOGGER.info("Processing INELIGIBLE Trade");
  try {
    if (CONFIG.is("EXCLUSION_TIME_DELAY_ON")) {
      Utils.sleep(CONFIG.getLong("EXCLUSION_PERSISTENCE_TIME"));
    }
    long newTime = 0;
    FileWriter fileWriter = new FileWriter(CONFIG.get("EXCL_PERSISTENCE_PATH"), true);
    BufferedWriter bufferedWriter = new BufferedWriter(fileWriter);
    bufferedWriter.write(tuple.getString(0));
    bufferedWriter.write(COMMA_SEPARATOR);
    bufferedWriter.write(String.valueOf(new Date()));
    bufferedWriter.write(COMMA_SEPARATOR);
    newTime = new Date().getTime();
    bufferedWriter.write(String.valueOf(newTime));
    bufferedWriter.write(COMMA_SEPARATOR);
    bufferedWriter.write(
        String.valueOf(newTime - Long.parseLong(tuple.getString(0).split(COMMA_SEPARATOR)[4])));
    bufferedWriter.newLine();
    bufferedWriter.close();
    // Checking and Performing Ack
    if (CONFIG.is("ACK_ON")) {
      _collector.ack(tuple);
    }
  } catch (Throwable e) {
    LOGGER.error(EXEC_EXCP_MSG, e);
    _collector.fail(tuple);
  }
}
 
Example 40
Project: java   File: TradeEligibilityBolt.java   View source code 5 votes vote down vote up
@Override
public void execute(Tuple tuple) {
  try {
    // Parsing Incoming Trade
    String[] tradeDetails = tuple.getString(0).split(COMMA_SEPARATOR);
    // Checking if the incoming trade is in valid format
    if (tradeDetails != null && tradeDetails.length > 1) {
      // Checking the eligibility of the trade for reporting
      if (CONFIG.is("ELIGIBILITY_TIME_DELAY_ON")) {
        Utils.sleep(CONFIG.getLong("ELIGIBILITY_CHECK_TIME"));
      }
      if (this.isTradeEligible(tradeDetails[1])) {
        LOGGER.info("Emitting Trade as ELIGIBLE");
        _collector.emit(REPORT_STREAM, tuple.getValues());
      } else {
        LOGGER.info("Emitting Trade as INELIGIBLE");
        _collector.emit(EXCLUDE_STREAM, tuple.getValues());
      }
    }
    // Checking and Performing Ack
    if (CONFIG.is("ACK_ON")) {
      _collector.ack(tuple);
    }
  } catch (Throwable e) {
    LOGGER.error(EXEC_EXCP_MSG, e);
    _collector.fail(tuple);
  }
}