Java Code Examples for backtype.storm.spout.SpoutOutputCollector

The following are top voted examples for showing how to use backtype.storm.spout.SpoutOutputCollector. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: rb-bi   File: PartitionManager.java   View source code 6 votes vote down vote up
public EmitState next(SpoutOutputCollector collector) {
    if (_waitingToEmit.isEmpty()) {
        fill();
    }
    while (true) {
        MessageAndRealOffset toEmit = _waitingToEmit.pollFirst();
        if (toEmit == null) {
            return EmitState.NO_EMITTED;
        }
        Iterable<List<Object>> tups = KafkaUtils.generateTuples(_spoutConfig, toEmit.msg);
        if (tups != null) {
            for (List<Object> tup : tups) {
                collector.emit(tup, new KafkaMessageId(_partition, toEmit.offset));
            }
            break;
        } else {
            ack(toEmit.offset);
        }
    }
    if (!_waitingToEmit.isEmpty()) {
        return EmitState.EMITTED_MORE_LEFT;
    } else {
        return EmitState.EMITTED_END;
    }
}
 
Example 2
Project: reddit-sentiment-storm   File: SubredditCommentsSpout.java   View source code 6 votes vote down vote up
public void open(Map conf, TopologyContext ctx, SpoutOutputCollector collector) {
	this.collector = collector;
	this.history = new ProcessedHistory();
	this.subreddit = (String) conf.get("subreddit");
	
	try {
		this.subredditCommentsfeedURL = new URL((String)conf.get("feedURL"));
	} catch (MalformedURLException e) {
		throw new RuntimeException(e);
	}
	LOG.info("Spout subreddit:{} feedURL:{}", this.subreddit, this.subredditCommentsfeedURL);
	
	if (conf.containsKey("sentimentData")) {
		LOG.info("Spouts can also see sentimentData");
	}
}
 
Example 3
Project: hadooparchitecturebook   File: StockTicksSpout.java   View source code 6 votes vote down vote up
/**
 * Open file with stock tick data and read into List object.
 */
@Override
public void open(Map map,
                 TopologyContext context,
                 SpoutOutputCollector outputCollector) {
  this.outputCollector = outputCollector;

  try {
    ticks = 
      IOUtils.readLines(ClassLoader.getSystemResourceAsStream(
 "NASDAQ_daily_prices_A.csv"),
        Charset.defaultCharset().name());
  } catch (IOException e) {
      throw new RuntimeException(e);
  }
}
 
Example 4
Project: miner   File: BeginSpout.java   View source code 6 votes vote down vote up
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
	try {
		_collector = collector;

		ru = new RedisUtil();
		redis = ru.getJedisInstance();

		SchedulerFactory schedulerFactory = new StdSchedulerFactory();
		Scheduler scheduler = schedulerFactory.getScheduler();
		_qManager = new QuartzManager();
		_qManager.setScheduler(scheduler);
		PlatformUtils.initRegisterProject(_qManager);
		scheduler.start();

		//init Hbase tables
		CreateTable.initHbaseTable();
	}catch(Exception ex){
		logger.error("error:"+MySysLogger.formatException(ex));
		ex.printStackTrace();
	}
}
 
Example 5
Project: erad2016-streamprocessing   File: TwitterSpout.java   View source code 6 votes vote down vote up
public void open(Map conf, TopologyContext context,
                 SpoutOutputCollector collector) {
    queue = new LinkedBlockingQueue<String>(1000);
    this.collector = collector;

    StatusListener listener = new StatusListener() {
        public void onStatus(Status status) {
            queue.offer(TwitterObjectFactory.getRawJSON(status));
        }

        public void onDeletionNotice(StatusDeletionNotice sdn) { }
        public void onTrackLimitationNotice(int i) { }
        public void onScrubGeo(long l, long l1) { }
        public void onStallWarning(StallWarning stallWarning) { }
        public void onException(Exception e) { }
    };

    ConfigurationBuilder cb = new ConfigurationBuilder();
    cb.setJSONStoreEnabled(true);

    TwitterStreamFactory factory = new TwitterStreamFactory(cb.build());
    twitterStream = factory.getInstance();
    twitterStream.addListener(listener);
    twitterStream.filter(new FilterQuery().language("en").track("trump"));
}
 
Example 6
Project: java   File: DeliveryCheckSpout.java   View source code 6 votes vote down vote up
@Override
public void open(Map arg0, TopologyContext arg1, SpoutOutputCollector arg2) {
  /*
   * FileReader fileReader; try { fileReader = new
   * FileReader("C:/proj/Migration/apache-storm-0.9.4/apache-storm-0.9.4/logs/InPut.txt");
   * bufferedReader =new BufferedReader(fileReader); _collector= arg2; } catch
   * (FileNotFoundException e) { // TODO Auto-generated catch block e.printStackTrace(); }
   */

  try {
    _collector = arg2;
    setUpConnection();
  } catch (JMSException e) {
    LOGGER.error(e);
  }

}
 
Example 7
Project: Tstream   File: BatchSpoutTrigger.java   View source code 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	batchQueue = new LinkedBlockingQueue<BatchSpoutMsgId>();
	this.collector = collector;
	this.conf = conf;
	taskName = context.getThisComponentId() + "_" + context.getThisTaskId();
	
	intervalCheck = new IntervalCheck();

	try {
		zkClient = BatchCommon.getZkClient(conf);

		initMsgId();

	} catch (Exception e) {
		LOG.error("", e);
		throw new RuntimeException("Failed to init");
	}
	LOG.info("Successfully open " + taskName);
}
 
Example 8
Project: jstorm-0.9.6.3-   File: SpoutExecutors.java   View source code 6 votes vote down vote up
public void prepare(TaskSendTargets sendTargets, TaskTransfer transferFn,
		TopologyContext topologyContext) {

	
	// collector, in fact it call send_spout_msg
	this.output_collector = new SpoutCollector(taskId, spout, task_stats,
			sendTargets, storm_conf, transferFn, pending, topologyContext,
			exeQueue, report_error);

	try {
		WorkerClassLoader.switchThreadContext();
		this.spout.open(storm_conf, userTopologyCtx,
				new SpoutOutputCollector(output_collector));
	} catch (Throwable e) {
		error = e;
		LOG.error("spout open error ", e);
		report_error.report(e);
	} finally {
		WorkerClassLoader.restoreThreadContext();
	}

	LOG.info("Successfully create SpoutExecutors " + idStr);

}
 
Example 9
Project: jstorm-0.9.6.3-   File: MasterBatchCoordinator.java   View source code 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    _throttler = new WindowedTimeThrottler((Number)conf.get(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS), 1);
    for(String spoutId: _managedSpoutIds) {
        _states.add(TransactionalState.newCoordinatorState(conf, spoutId));
    }
    _currTransaction = getStoredCurrTransaction();

    _collector = collector;
    Number active = (Number) conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING);
    if(active==null) {
        _maxTransactionActive = 1;
    } else {
        _maxTransactionActive = active.intValue();
    }
    _attemptIds = getStoredCurrAttempts(_currTransaction, _maxTransactionActive);

    
    for(int i=0; i<_spouts.size(); i++) {
        String txId = _managedSpoutIds.get(i);
        _coordinators.add(_spouts.get(i).getCoordinator(txId, conf, context));
    }
}
 
Example 10
Project: jstorm-0.9.6.3-   File: DRPCSpout.java   View source code 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	_collector = collector;
	if (_local_drpc_id == null) {
		int numTasks = context.getComponentTasks(
				context.getThisComponentId()).size();
		int index = context.getThisTaskIndex();

		int port = Utils.getInt(conf.get(Config.DRPC_INVOCATIONS_PORT));
		List<String> servers = (List<String>) conf.get(Config.DRPC_SERVERS);
		if (servers == null || servers.isEmpty()) {
			throw new RuntimeException(
					"No DRPC servers configured for topology");
		}
		if (numTasks < servers.size()) {
			for (String s : servers) {
				_clients.add(new DRPCInvocationsClient(s, port));
			}
		} else {
			int i = index % servers.size();
			_clients.add(new DRPCInvocationsClient(servers.get(i), port));
		}
	}

}
 
Example 11
Project: jstorm-0.9.6.3-   File: FixedTupleSpout.java   View source code 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	_context = context;
	List<Integer> tasks = context.getComponentTasks(context
			.getThisComponentId());
	int startIndex;
	for (startIndex = 0; startIndex < tasks.size(); startIndex++) {
		if (tasks.get(startIndex) == context.getThisTaskId()) {
			break;
		}
	}
	_collector = collector;
	_pending = new HashMap<String, FixedTuple>();
	_serveTuples = new ArrayList<FixedTuple>();
	for (int i = startIndex; i < _tuples.size(); i += tasks.size()) {
		_serveTuples.add(_tuples.get(i));
	}
}
 
Example 12
Project: Tstream   File: FixedTupleSpout.java   View source code 6 votes vote down vote up
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	_context = context;
	List<Integer> tasks = context.getComponentTasks(context
			.getThisComponentId());
	int startIndex;
	for (startIndex = 0; startIndex < tasks.size(); startIndex++) {
		if (tasks.get(startIndex) == context.getThisTaskId()) {
			break;
		}
	}
	_collector = collector;
	_pending = new HashMap<String, FixedTuple>();
	_serveTuples = new ArrayList<FixedTuple>();
	for (int i = startIndex; i < _tuples.size(); i += tasks.size()) {
		_serveTuples.add(_tuples.get(i));
	}
}
 
Example 13
Project: StreamCQL   File: StormSpout.java   View source code 6 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector)
{
    LOG.info("Start to open storm spout.");
    Map<String, IEmitter> emitters = createEmitters(collector);
    
    try
    {
        input.initialize(emitters);
    }
    catch (StreamingException e)
    {
        LOG.error("Failed to initialize input stream.");
        throw new RuntimeException("Failed to initialize output stream", e);
    }
    
}
 
Example 14
Project: jstorm-0.9.6.3-   File: BatchSpoutTrigger.java   View source code 6 votes vote down vote up
@SuppressWarnings("rawtypes")
@Override
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	batchQueue = new LinkedBlockingQueue<BatchSpoutMsgId>();
	this.collector = collector;
	this.conf = conf;
	taskName = context.getThisComponentId() + "_" + context.getThisTaskId();
	
	intervalCheck = new IntervalCheck();

	try {
		zkClient = BatchCommon.getZkClient(conf);

		initMsgId();

	} catch (Exception e) {
		LOG.error("", e);
		throw new RuntimeException("Failed to init");
	}
	LOG.info("Successfully open " + taskName);
}
 
Example 15
Project: senti-storm   File: DatasetSpout.java   View source code 6 votes vote down vote up
public void open(Map config, TopologyContext context,
    SpoutOutputCollector collector) {
  this.m_collector = collector;
  this.m_dataset = Configuration.getDataSetSemEval2013();
  this.m_tweets = m_dataset.getTestTweets();

  // Optional sleep between tuples emitting
  if (config.get(CONF_TUPLE_SLEEP_MS) != null) {
    m_tupleSleepMs = (Long) config.get(CONF_TUPLE_SLEEP_MS);
  } else {
    m_tupleSleepMs = 0;
  }
  if (config.get(CONF_TUPLE_SLEEP_NS) != null) {
    m_tupleSleepNs = (Long) config.get(CONF_TUPLE_SLEEP_NS);
  } else {
    m_tupleSleepNs = 0;
  }

  // Optional startup sleep to finish bolt preparation
  // before spout starts emitting
  if (config.get(CONF_STARTUP_SLEEP_MS) != null) {
    long startupSleepMillis = (Long) config.get(CONF_STARTUP_SLEEP_MS);
    TimeUtils.sleepMillis(startupSleepMillis);
  }
}
 
Example 16
Project: storm-mqtt   File: MQTTSpout.java   View source code 6 votes vote down vote up
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    this.topologyName = (String)conf.get(Config.TOPOLOGY_NAME);

    this.collector = collector;
    this.context = context;
    this.conf = conf;

    this.incoming = new LinkedBlockingQueue<MQTTMessage>();
    this.pending = new HashMap<Long, MQTTMessage>();

    try {
        connectMqtt();
    } catch (Exception e) {
        this.collector.reportError(e);
        throw new RuntimeException("MQTT Connection failed.", e);
    }

}
 
Example 17
Project: learn_jstorm   File: SpoutExecutors.java   View source code 6 votes vote down vote up
public void prepare(TaskSendTargets sendTargets, TaskTransfer transferFn,
		TopologyContext topologyContext) {

	
	// collector, in fact it call send_spout_msg
	this.output_collector = new SpoutCollector(taskId, spout, task_stats,
			sendTargets, storm_conf, transferFn, pending, topologyContext,
			exeQueue, report_error);

	try {
		WorkerClassLoader.switchThreadContext();
		this.spout.open(storm_conf, userTopologyCtx,
				new SpoutOutputCollector(output_collector));
	} catch (Throwable e) {
		error = e;
		LOG.error("spout open error ", e);
		report_error.report(e);
	} finally {
		WorkerClassLoader.restoreThreadContext();
	}

	LOG.info("Successfully create SpoutExecutors " + idStr);

}
 
Example 18
Project: learn_jstorm   File: MasterBatchCoordinator.java   View source code 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    _throttler = new WindowedTimeThrottler((Number)conf.get(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS), 1);
    for(String spoutId: _managedSpoutIds) {
        _states.add(TransactionalState.newCoordinatorState(conf, spoutId));
    }
    _currTransaction = getStoredCurrTransaction();

    _collector = collector;
    Number active = (Number) conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING);
    if(active==null) {
        _maxTransactionActive = 1;
    } else {
        _maxTransactionActive = active.intValue();
    }
    _attemptIds = getStoredCurrAttempts(_currTransaction, _maxTransactionActive);

    
    for(int i=0; i<_spouts.size(); i++) {
        String txId = _managedSpoutIds.get(i);
        _coordinators.add(_spouts.get(i).getCoordinator(txId, conf, context));
    }
}
 
Example 19
Project: jstrom   File: MasterBatchCoordinator.java   View source code 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    _throttler = new WindowedTimeThrottler((Number)conf.get(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS), 1);
    for(String spoutId: _managedSpoutIds) {
        _states.add(TransactionalState.newCoordinatorState(conf, spoutId));
    }
    _currTransaction = getStoredCurrTransaction();

    _collector = collector;
    Number active = (Number) conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING);
    if(active==null) {
        _maxTransactionActive = 1;
    } else {
        _maxTransactionActive = active.intValue();
    }
    _attemptIds = getStoredCurrAttempts(_currTransaction, _maxTransactionActive);

    
    for(int i=0; i<_spouts.size(); i++) {
        String txId = _managedSpoutIds.get(i);
        _coordinators.add(_spouts.get(i).getCoordinator(txId, conf, context));
    }
}
 
Example 20
Project: learn_jstorm   File: TransactionalSpoutCoordinator.java   View source code 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	_rand = new Random(Utils.secureRandomLong());
	_state = TransactionalState.newCoordinatorState(conf,
			(String) conf.get(Config.TOPOLOGY_TRANSACTIONAL_ID),
			_spout.getComponentConfiguration());
	_coordinatorState = new RotatingTransactionalState(_state, META_DIR,
			true);
	_collector = collector;
	_coordinator = _spout.getCoordinator(conf, context);
	_currTransaction = getStoredCurrTransaction(_state);
	Object active = conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING);
	if (active == null) {
		_maxTransactionActive = 1;
	} else {
		_maxTransactionActive = Utils.getInt(active);
	}
	_initializer = new StateInitializer();
}
 
Example 21
Project: learn_jstorm   File: BatchSpoutTrigger.java   View source code 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	batchQueue = new LinkedBlockingQueue<BatchSpoutMsgId>();
	this.collector = collector;
	this.conf = conf;
	taskName = context.getThisComponentId() + "_" + context.getThisTaskId();
	
	intervalCheck = new IntervalCheck();

	try {
		zkClient = BatchCommon.getZkClient(conf);

		initMsgId();

	} catch (Exception e) {
		LOG.error("", e);
		throw new RuntimeException("Failed to init");
	}
	LOG.info("Successfully open " + taskName);
}
 
Example 22
Project: Tstream   File: DRPCSpout.java   View source code 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	_collector = collector;
	if (_local_drpc_id == null) {
		int numTasks = context.getComponentTasks(
				context.getThisComponentId()).size();
		int index = context.getThisTaskIndex();

		int port = Utils.getInt(conf.get(Config.DRPC_INVOCATIONS_PORT));
		List<String> servers = (List<String>) conf.get(Config.DRPC_SERVERS);
		if (servers == null || servers.isEmpty()) {
			throw new RuntimeException(
					"No DRPC servers configured for topology");
		}
		if (numTasks < servers.size()) {
			for (String s : servers) {
				_clients.add(new DRPCInvocationsClient(s, port));
			}
		} else {
			int i = index % servers.size();
			_clients.add(new DRPCInvocationsClient(servers.get(i), port));
		}
	}

}
 
Example 23
Project: alfresco-apache-storm-demo   File: FileSpout.java   View source code 6 votes vote down vote up
@SuppressWarnings("rawtypes")
@Override
public void open(Map conf, TopologyContext context,
        SpoutOutputCollector collector) {
    _collector = collector;

    for (String inputFile : _inputFiles) {
        Path inputPath = Paths.get(inputFile);
        try (BufferedReader reader = Files.newBufferedReader(inputPath,
                StandardCharsets.UTF_8)) {
            String line = null;
            while ((line = reader.readLine()) != null) {
                if (StringUtils.isBlank(line))
                    continue;
                toPut.add(line.getBytes(StandardCharsets.UTF_8));
            }
        } catch (IOException x) {
            System.err.format("IOException: %s%n", x);
        }
    }
}
 
Example 24
Project: Tstream   File: SpoutExecutors.java   View source code 6 votes vote down vote up
public void prepare(TaskSendTargets sendTargets, TaskTransfer transferFn,
		TopologyContext topologyContext) {

	
	// collector, in fact it call send_spout_msg
	this.output_collector = new SpoutCollector(taskId, spout, task_stats,
			sendTargets, storm_conf, transferFn, pending, topologyContext,
			exeQueue, report_error);

	try {
		WorkerClassLoader.switchThreadContext();
		this.spout.open(storm_conf, userTopologyCtx,
				new SpoutOutputCollector(output_collector));
	} catch (Throwable e) {
		error = e;
		LOG.error("spout open error ", e);
		report_error.report(e);
	} finally {
		WorkerClassLoader.restoreThreadContext();
	}

	LOG.info("Successfully create SpoutExecutors " + idStr);

}
 
Example 25
Project: aeolus   File: OrderedFileInputSpoutTest.java   View source code 6 votes vote down vote up
@Test
public void testOpenSinglePartition() throws Exception {
	TestOrderedFileInputSpout spout = new TestOrderedFileInputSpout();
	
	Config conf = new Config();
	conf.put(TestOrderedFileInputSpout.INPUT_FILE_NAME, "dummyFileName");
	
	FileReader fileReaderMock = PowerMockito.mock(FileReader.class);
	PowerMockito.whenNew(FileReader.class).withArguments("dummyFileName").thenReturn(fileReaderMock);
	
	BufferedReader bufferedReaderMock = PowerMockito.mock(BufferedReader.class);
	PowerMockito.whenNew(BufferedReader.class).withArguments(fileReaderMock).thenReturn(bufferedReaderMock);
	
	spout.open(conf, mock(TopologyContext.class), mock(SpoutOutputCollector.class));
	Assert.assertTrue(spout.closePartition(new Integer(0)));
	try {
		spout.closePartition(new Integer(1));
		Assert.fail();
	} catch(RuntimeException e) {
		// expected
	}
}
 
Example 26
Project: aeolus   File: OrderedFileInputSpoutTest.java   View source code 6 votes vote down vote up
@Test
public void testSingleEmptyPartition() {
	TestOrderedFileInputSpout spout = new TestOrderedFileInputSpout();
	
	Config conf = new Config();
	conf.put(TestOrderedFileInputSpout.NUMBER_OF_PARTITIONS, new Integer(1));
	
	TestSpoutOutputCollector col = new TestSpoutOutputCollector();
	spout.open(conf, mock(TopologyContext.class), new SpoutOutputCollector(col));
	
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	
	Assert.assertEquals(0, col.output.size());
}
 
Example 27
Project: jstrom   File: BatchSpoutTrigger.java   View source code 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    batchQueue = new LinkedBlockingQueue<BatchSpoutMsgId>();
    this.collector = collector;
    this.conf = conf;
    taskName = context.getThisComponentId() + "_" + context.getThisTaskId();

    intervalCheck = new IntervalCheck();

    try {
        zkClient = BatchCommon.getZkClient(conf);

        initMsgId();

    } catch (Exception e) {
        LOG.error("", e);
        throw new RuntimeException("Failed to init");
    }
    LOG.info("Successfully open " + taskName);
}
 
Example 28
Project: aeolus   File: OrderedInputSpoutTest.java   View source code 6 votes vote down vote up
@Test
public void testSingleEmptyPartition() {
	@SuppressWarnings("unchecked")
	List<Deque<String>> data = new LinkedList<Deque<String>>(Arrays.asList(new LinkedList<String>()));
	TestOrderedInputSpout spout = new TestOrderedInputSpout(data, this.r);
	
	Config conf = new Config();
	conf.put(TestOrderedInputSpout.NUMBER_OF_PARTITIONS, new Integer(1));
	
	TestSpoutOutputCollector col = new TestSpoutOutputCollector();
	spout.open(conf, mock(TopologyContext.class), new SpoutOutputCollector(col));
	
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	
	Assert.assertEquals(0, col.output.size());
}
 
Example 29
Project: aeolus   File: OrderedInputSpoutTest.java   View source code 6 votes vote down vote up
@Test
public void testAllPartitionsEmpty() {
	@SuppressWarnings("unchecked")
	List<Deque<String>> data = new LinkedList<Deque<String>>(Arrays.asList(new LinkedList<String>(),
		new LinkedList<String>(), new LinkedList<String>()));
	TestOrderedInputSpout spout = new TestOrderedInputSpout(data, this.r);
	
	Config conf = new Config();
	conf.put(TestOrderedInputSpout.NUMBER_OF_PARTITIONS, new Integer(3));
	
	TestSpoutOutputCollector col = new TestSpoutOutputCollector();
	spout.open(conf, mock(TopologyContext.class), new SpoutOutputCollector(col));
	
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	
	Assert.assertEquals(0, col.output.size());
}
 
Example 30
Project: aeolus   File: SpoutDataDrivenStreamRateDriverTest.java   View source code 6 votes vote down vote up
@Test
public void testNextTupleFixedSecond() {
	IRichSpout worker = new IncSpout();
	DataDrivenStreamRateDriverSpout<Integer> driver = new DataDrivenStreamRateDriverSpout<Integer>(worker, 0,
		TimeUnit.SECONDS);
	
	Config cfg = mock(Config.class);
	TopologyContext c = mock(TopologyContext.class);
	SpoutOutputCollector col = mock(SpoutOutputCollector.class);
	
	driver.open(cfg, c, col);
	
	long start = System.nanoTime();
	driver.activate();
	for(int i = 0; i < 5; ++i) {
		driver.nextTuple();
	}
	long stop = System.nanoTime();
	
	Assert.assertEquals(5, (stop - start) / 1000 / 1000 / 1000, 1);
}
 
Example 31
Project: incubator-samoa   File: StormEntranceProcessingItem.java   View source code 6 votes vote down vote up
@Override
public void open(@SuppressWarnings("rawtypes") Map conf, TopologyContext context, SpoutOutputCollector collector) {
  this.collector = collector;
  // this.tupleInfoQueue = new LinkedBlockingQueue<StormTupleInfo>();

  // Processor and this class share the same instance of stream
  // for (StormSpoutStream stream : streams) {
  // stream.setSpout(this);
  // }
  // outputStream.setSpout(this);

  this.entranceProcessor.onCreate(context.getThisTaskId());
  // this.spoutStarter = new SpoutStarter(this.starter);

  // this.spoutExecutors = Executors.newSingleThreadExecutor();
  // this.spoutExecutors.execute(spoutStarter);
}
 
Example 32
Project: aeolus   File: IncSpoutTest.java   View source code 6 votes vote down vote up
@Test
public void testExecuteUnique() {
	IncSpout spout = new IncSpout(1);
	
	TestSpoutOutputCollector collector = new TestSpoutOutputCollector();
	spout.open(null, null, new SpoutOutputCollector(collector));
	
	List<List<Object>> result = new LinkedList<List<Object>>();
	
	for(int i = 0; i < 5; ++i) {
		ArrayList<Object> attributes = new ArrayList<Object>();
		attributes.add(new Long(i));
		result.add(attributes);
		
		spout.nextTuple();
	}
	
	Assert.assertEquals(result, collector.output.get(Utils.DEFAULT_STREAM_ID));
}
 
Example 33
Project: aeolus   File: IncSpoutTest.java   View source code 6 votes vote down vote up
@Test
public void testExecuteUniqueMultipleStreams() {
	String[] streamIds = new String[] {Utils.DEFAULT_STREAM_ID, "myStreamId"};
	IncSpout spout = new IncSpout(streamIds);
	
	TestSpoutOutputCollector collector = new TestSpoutOutputCollector();
	spout.open(null, null, new SpoutOutputCollector(collector));
	
	List<List<Object>> result = new LinkedList<List<Object>>();
	
	for(int i = 0; i < 5; ++i) {
		ArrayList<Object> attributes = new ArrayList<Object>();
		attributes.add(new Long(i));
		result.add(attributes);
		
		spout.nextTuple();
	}
	
	for(String stream : streamIds) {
		Assert.assertEquals(result, collector.output.get(stream));
	}
}
 
Example 34
Project: aeolus   File: IncSpoutTest.java   View source code 6 votes vote down vote up
@Test
public void testExecute() {
	IncSpout spout = new IncSpout(this.r.nextDouble(), 1);
	
	TestSpoutOutputCollector collector = new TestSpoutOutputCollector();
	spout.open(null, null, new SpoutOutputCollector(collector));
	
	for(int i = 0; i < 50; ++i) {
		spout.nextTuple();
	}
	
	List<Object> first = collector.output.get(Utils.DEFAULT_STREAM_ID).removeFirst();
	for(List<Object> second : collector.output.get(Utils.DEFAULT_STREAM_ID)) {
		Assert.assertTrue(((Long)first.get(0)).longValue() <= ((Long)second.get(0)).longValue());
		first = second;
	}
}
 
Example 35
Project: alfresco-apache-storm-demo   File: ElasticSearchSpout.java   View source code 6 votes vote down vote up
@Override
public void open(Map stormConf, TopologyContext context,
        SpoutOutputCollector collector) {
    indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
            "status");
    docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
            "status");
    maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf,
            ESStatusMaxInflightParamName, 1);
    try {
        client = ElasticSearchConnection.getClient(stormConf, ESBoltType);
    } catch (Exception e1) {
        LOG.error("Can't connect to ElasticSearch", e1);
        throw new RuntimeException(e1);
    }

    partitioner = new URLPartitioner();
    partitioner.configure(stormConf);

    _collector = collector;
}
 
Example 36
Project: aeolus   File: RandomSpoutTest.java   View source code 6 votes vote down vote up
@Test
public void testExecute() {
	int numberOfAttributes = 1 + this.r.nextInt(10);
	RandomSpout spout = new RandomSpout(numberOfAttributes, 100);
	
	TestSpoutOutputCollector collector = new TestSpoutOutputCollector();
	spout.open(null, null, new SpoutOutputCollector(collector));
	
	for(int i = 0; i < 50; ++i) {
		spout.nextTuple();
		Assert.assertEquals(i + 1, collector.output.get(Utils.DEFAULT_STREAM_ID).size());
		Assert.assertEquals(numberOfAttributes, collector.output.get(Utils.DEFAULT_STREAM_ID).get(i).size());
		for(int j = 0; j < numberOfAttributes; ++j) {
			Assert
				.assertTrue(0 < ((Integer)collector.output.get(Utils.DEFAULT_STREAM_ID).get(i).get(j)).intValue());
			Assert.assertTrue(100 >= ((Integer)collector.output.get(Utils.DEFAULT_STREAM_ID).get(i).get(j))
				.intValue());
		}
	}
	
}
 
Example 37
Project: jstrom   File: FixedTupleSpout.java   View source code 6 votes vote down vote up
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    _context = context;
    List<Integer> tasks = context.getComponentTasks(context.getThisComponentId());
    int startIndex;
    for (startIndex = 0; startIndex < tasks.size(); startIndex++) {
        if (tasks.get(startIndex) == context.getThisTaskId()) {
            break;
        }
    }
    _collector = collector;
    _pending = new HashMap<String, FixedTuple>();
    _serveTuples = new ArrayList<FixedTuple>();
    for (int i = startIndex; i < _tuples.size(); i += tasks.size()) {
        _serveTuples.add(_tuples.get(i));
    }
}
 
Example 38
Project: spiderz   File: WikiCrawlerSpout.java   View source code 6 votes vote down vote up
@Override
public void open(Map arg0, TopologyContext topCtx,
		SpoutOutputCollector collector) {
	_collector = collector;

	// connect to redis to access queue which 
	// contains unexplored titles
	jedis = new Jedis(this.redisIp, Integer.parseInt(this.redisPort), 2000);
	
	// set the starting point to crawl if already not set
	if(jedis.llen(queueId) == 0) {
		// give a variety of links to start crawling from
		jedis.rpush(queueId, "Computer science");
		jedis.rpush(queueId, "Botany");
		jedis.rpush(queueId, "Physics");
		jedis.rpush(queueId, "Mathematics");
	}
}
 
Example 39
Project: cloud-computing-specialization   File: FileReaderSpout.java   View source code 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {

 /*
----------------------TODO-----------------------
Task: initialize the file reader
------------------------------------------------- */
    try {
        in = new BufferedReader(new FileReader(filePath));
    } catch (FileNotFoundException e) {
        e.printStackTrace();
    }


    this.context = context;
    this.collector = collector;
}
 
Example 40
Project: Practical-Real-time-Processing-and-Analytics   File: FileSpout.java   View source code 5 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
	//fileName = (String) conf.get("file");
	this.collector = collector;

	try {
		reader = new BufferedReader(new FileReader(fileName));
	} catch (Exception e) {
		throw new RuntimeException(e);
	}
}