Java Code Examples for backtype.storm.spout.SpoutOutputCollector

The following are top voted examples for showing how to use backtype.storm.spout.SpoutOutputCollector. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: rb-bi   File: PartitionManager.java   Source Code and License 6 votes vote down vote up
public EmitState next(SpoutOutputCollector collector) {
    if (_waitingToEmit.isEmpty()) {
        fill();
    }
    while (true) {
        MessageAndRealOffset toEmit = _waitingToEmit.pollFirst();
        if (toEmit == null) {
            return EmitState.NO_EMITTED;
        }
        Iterable<List<Object>> tups = KafkaUtils.generateTuples(_spoutConfig, toEmit.msg);
        if (tups != null) {
            for (List<Object> tup : tups) {
                collector.emit(tup, new KafkaMessageId(_partition, toEmit.offset));
            }
            break;
        } else {
            ack(toEmit.offset);
        }
    }
    if (!_waitingToEmit.isEmpty()) {
        return EmitState.EMITTED_MORE_LEFT;
    } else {
        return EmitState.EMITTED_END;
    }
}
 
Example 2
Project: reddit-sentiment-storm   File: SubredditCommentsSpout.java   Source Code and License 6 votes vote down vote up
public void open(Map conf, TopologyContext ctx, SpoutOutputCollector collector) {
	this.collector = collector;
	this.history = new ProcessedHistory();
	this.subreddit = (String) conf.get("subreddit");
	
	try {
		this.subredditCommentsfeedURL = new URL((String)conf.get("feedURL"));
	} catch (MalformedURLException e) {
		throw new RuntimeException(e);
	}
	LOG.info("Spout subreddit:{} feedURL:{}", this.subreddit, this.subredditCommentsfeedURL);
	
	if (conf.containsKey("sentimentData")) {
		LOG.info("Spouts can also see sentimentData");
	}
}
 
Example 3
Project: hadooparchitecturebook   File: StockTicksSpout.java   Source Code and License 6 votes vote down vote up
/**
 * Open file with stock tick data and read into List object.
 */
@Override
public void open(Map map,
                 TopologyContext context,
                 SpoutOutputCollector outputCollector) {
  this.outputCollector = outputCollector;

  try {
    ticks = 
      IOUtils.readLines(ClassLoader.getSystemResourceAsStream(
 "NASDAQ_daily_prices_A.csv"),
        Charset.defaultCharset().name());
  } catch (IOException e) {
      throw new RuntimeException(e);
  }
}
 
Example 4
Project: miner   File: BeginSpout.java   Source Code and License 6 votes vote down vote up
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
	try {
		_collector = collector;

		ru = new RedisUtil();
		redis = ru.getJedisInstance();

		SchedulerFactory schedulerFactory = new StdSchedulerFactory();
		Scheduler scheduler = schedulerFactory.getScheduler();
		_qManager = new QuartzManager();
		_qManager.setScheduler(scheduler);
		PlatformUtils.initRegisterProject(_qManager);
		scheduler.start();

		//init Hbase tables
		CreateTable.initHbaseTable();
	}catch(Exception ex){
		logger.error("error:"+MySysLogger.formatException(ex));
		ex.printStackTrace();
	}
}
 
Example 5
Project: erad2016-streamprocessing   File: TwitterSpout.java   Source Code and License 6 votes vote down vote up
public void open(Map conf, TopologyContext context,
                 SpoutOutputCollector collector) {
    queue = new LinkedBlockingQueue<String>(1000);
    this.collector = collector;

    StatusListener listener = new StatusListener() {
        public void onStatus(Status status) {
            queue.offer(TwitterObjectFactory.getRawJSON(status));
        }

        public void onDeletionNotice(StatusDeletionNotice sdn) { }
        public void onTrackLimitationNotice(int i) { }
        public void onScrubGeo(long l, long l1) { }
        public void onStallWarning(StallWarning stallWarning) { }
        public void onException(Exception e) { }
    };

    ConfigurationBuilder cb = new ConfigurationBuilder();
    cb.setJSONStoreEnabled(true);

    TwitterStreamFactory factory = new TwitterStreamFactory(cb.build());
    twitterStream = factory.getInstance();
    twitterStream.addListener(listener);
    twitterStream.filter(new FilterQuery().language("en").track("trump"));
}
 
Example 6
Project: java   File: DeliveryCheckSpout.java   Source Code and License 6 votes vote down vote up
@Override
public void open(Map arg0, TopologyContext arg1, SpoutOutputCollector arg2) {
  /*
   * FileReader fileReader; try { fileReader = new
   * FileReader("C:/proj/Migration/apache-storm-0.9.4/apache-storm-0.9.4/logs/InPut.txt");
   * bufferedReader =new BufferedReader(fileReader); _collector= arg2; } catch
   * (FileNotFoundException e) { // TODO Auto-generated catch block e.printStackTrace(); }
   */

  try {
    _collector = arg2;
    setUpConnection();
  } catch (JMSException e) {
    LOGGER.error(e);
  }

}
 
Example 7
Project: Tstream   File: BatchSpoutTrigger.java   Source Code and License 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	batchQueue = new LinkedBlockingQueue<BatchSpoutMsgId>();
	this.collector = collector;
	this.conf = conf;
	taskName = context.getThisComponentId() + "_" + context.getThisTaskId();
	
	intervalCheck = new IntervalCheck();

	try {
		zkClient = BatchCommon.getZkClient(conf);

		initMsgId();

	} catch (Exception e) {
		LOG.error("", e);
		throw new RuntimeException("Failed to init");
	}
	LOG.info("Successfully open " + taskName);
}
 
Example 8
Project: jstorm-0.9.6.3-   File: SpoutExecutors.java   Source Code and License 6 votes vote down vote up
public void prepare(TaskSendTargets sendTargets, TaskTransfer transferFn,
		TopologyContext topologyContext) {

	
	// collector, in fact it call send_spout_msg
	this.output_collector = new SpoutCollector(taskId, spout, task_stats,
			sendTargets, storm_conf, transferFn, pending, topologyContext,
			exeQueue, report_error);

	try {
		WorkerClassLoader.switchThreadContext();
		this.spout.open(storm_conf, userTopologyCtx,
				new SpoutOutputCollector(output_collector));
	} catch (Throwable e) {
		error = e;
		LOG.error("spout open error ", e);
		report_error.report(e);
	} finally {
		WorkerClassLoader.restoreThreadContext();
	}

	LOG.info("Successfully create SpoutExecutors " + idStr);

}
 
Example 9
Project: jstorm-0.9.6.3-   File: MasterBatchCoordinator.java   Source Code and License 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    _throttler = new WindowedTimeThrottler((Number)conf.get(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS), 1);
    for(String spoutId: _managedSpoutIds) {
        _states.add(TransactionalState.newCoordinatorState(conf, spoutId));
    }
    _currTransaction = getStoredCurrTransaction();

    _collector = collector;
    Number active = (Number) conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING);
    if(active==null) {
        _maxTransactionActive = 1;
    } else {
        _maxTransactionActive = active.intValue();
    }
    _attemptIds = getStoredCurrAttempts(_currTransaction, _maxTransactionActive);

    
    for(int i=0; i<_spouts.size(); i++) {
        String txId = _managedSpoutIds.get(i);
        _coordinators.add(_spouts.get(i).getCoordinator(txId, conf, context));
    }
}
 
Example 10
Project: jstorm-0.9.6.3-   File: DRPCSpout.java   Source Code and License 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	_collector = collector;
	if (_local_drpc_id == null) {
		int numTasks = context.getComponentTasks(
				context.getThisComponentId()).size();
		int index = context.getThisTaskIndex();

		int port = Utils.getInt(conf.get(Config.DRPC_INVOCATIONS_PORT));
		List<String> servers = (List<String>) conf.get(Config.DRPC_SERVERS);
		if (servers == null || servers.isEmpty()) {
			throw new RuntimeException(
					"No DRPC servers configured for topology");
		}
		if (numTasks < servers.size()) {
			for (String s : servers) {
				_clients.add(new DRPCInvocationsClient(s, port));
			}
		} else {
			int i = index % servers.size();
			_clients.add(new DRPCInvocationsClient(servers.get(i), port));
		}
	}

}
 
Example 11
Project: jstorm-0.9.6.3-   File: FixedTupleSpout.java   Source Code and License 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	_context = context;
	List<Integer> tasks = context.getComponentTasks(context
			.getThisComponentId());
	int startIndex;
	for (startIndex = 0; startIndex < tasks.size(); startIndex++) {
		if (tasks.get(startIndex) == context.getThisTaskId()) {
			break;
		}
	}
	_collector = collector;
	_pending = new HashMap<String, FixedTuple>();
	_serveTuples = new ArrayList<FixedTuple>();
	for (int i = startIndex; i < _tuples.size(); i += tasks.size()) {
		_serveTuples.add(_tuples.get(i));
	}
}
 
Example 12
Project: Tstream   File: FixedTupleSpout.java   Source Code and License 6 votes vote down vote up
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	_context = context;
	List<Integer> tasks = context.getComponentTasks(context
			.getThisComponentId());
	int startIndex;
	for (startIndex = 0; startIndex < tasks.size(); startIndex++) {
		if (tasks.get(startIndex) == context.getThisTaskId()) {
			break;
		}
	}
	_collector = collector;
	_pending = new HashMap<String, FixedTuple>();
	_serveTuples = new ArrayList<FixedTuple>();
	for (int i = startIndex; i < _tuples.size(); i += tasks.size()) {
		_serveTuples.add(_tuples.get(i));
	}
}
 
Example 13
Project: StreamCQL   File: StormSpout.java   Source Code and License 6 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector)
{
    LOG.info("Start to open storm spout.");
    Map<String, IEmitter> emitters = createEmitters(collector);
    
    try
    {
        input.initialize(emitters);
    }
    catch (StreamingException e)
    {
        LOG.error("Failed to initialize input stream.");
        throw new RuntimeException("Failed to initialize output stream", e);
    }
    
}
 
Example 14
Project: jstorm-0.9.6.3-   File: BatchSpoutTrigger.java   Source Code and License 6 votes vote down vote up
@SuppressWarnings("rawtypes")
@Override
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	batchQueue = new LinkedBlockingQueue<BatchSpoutMsgId>();
	this.collector = collector;
	this.conf = conf;
	taskName = context.getThisComponentId() + "_" + context.getThisTaskId();
	
	intervalCheck = new IntervalCheck();

	try {
		zkClient = BatchCommon.getZkClient(conf);

		initMsgId();

	} catch (Exception e) {
		LOG.error("", e);
		throw new RuntimeException("Failed to init");
	}
	LOG.info("Successfully open " + taskName);
}
 
Example 15
Project: senti-storm   File: DatasetSpout.java   Source Code and License 6 votes vote down vote up
public void open(Map config, TopologyContext context,
    SpoutOutputCollector collector) {
  this.m_collector = collector;
  this.m_dataset = Configuration.getDataSetSemEval2013();
  this.m_tweets = m_dataset.getTestTweets();

  // Optional sleep between tuples emitting
  if (config.get(CONF_TUPLE_SLEEP_MS) != null) {
    m_tupleSleepMs = (Long) config.get(CONF_TUPLE_SLEEP_MS);
  } else {
    m_tupleSleepMs = 0;
  }
  if (config.get(CONF_TUPLE_SLEEP_NS) != null) {
    m_tupleSleepNs = (Long) config.get(CONF_TUPLE_SLEEP_NS);
  } else {
    m_tupleSleepNs = 0;
  }

  // Optional startup sleep to finish bolt preparation
  // before spout starts emitting
  if (config.get(CONF_STARTUP_SLEEP_MS) != null) {
    long startupSleepMillis = (Long) config.get(CONF_STARTUP_SLEEP_MS);
    TimeUtils.sleepMillis(startupSleepMillis);
  }
}
 
Example 16
Project: storm-mqtt   File: MQTTSpout.java   Source Code and License 6 votes vote down vote up
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    this.topologyName = (String)conf.get(Config.TOPOLOGY_NAME);

    this.collector = collector;
    this.context = context;
    this.conf = conf;

    this.incoming = new LinkedBlockingQueue<MQTTMessage>();
    this.pending = new HashMap<Long, MQTTMessage>();

    try {
        connectMqtt();
    } catch (Exception e) {
        this.collector.reportError(e);
        throw new RuntimeException("MQTT Connection failed.", e);
    }

}
 
Example 17
Project: learn_jstorm   File: SpoutExecutors.java   Source Code and License 6 votes vote down vote up
public void prepare(TaskSendTargets sendTargets, TaskTransfer transferFn,
		TopologyContext topologyContext) {

	
	// collector, in fact it call send_spout_msg
	this.output_collector = new SpoutCollector(taskId, spout, task_stats,
			sendTargets, storm_conf, transferFn, pending, topologyContext,
			exeQueue, report_error);

	try {
		WorkerClassLoader.switchThreadContext();
		this.spout.open(storm_conf, userTopologyCtx,
				new SpoutOutputCollector(output_collector));
	} catch (Throwable e) {
		error = e;
		LOG.error("spout open error ", e);
		report_error.report(e);
	} finally {
		WorkerClassLoader.restoreThreadContext();
	}

	LOG.info("Successfully create SpoutExecutors " + idStr);

}
 
Example 18
Project: learn_jstorm   File: MasterBatchCoordinator.java   Source Code and License 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    _throttler = new WindowedTimeThrottler((Number)conf.get(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS), 1);
    for(String spoutId: _managedSpoutIds) {
        _states.add(TransactionalState.newCoordinatorState(conf, spoutId));
    }
    _currTransaction = getStoredCurrTransaction();

    _collector = collector;
    Number active = (Number) conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING);
    if(active==null) {
        _maxTransactionActive = 1;
    } else {
        _maxTransactionActive = active.intValue();
    }
    _attemptIds = getStoredCurrAttempts(_currTransaction, _maxTransactionActive);

    
    for(int i=0; i<_spouts.size(); i++) {
        String txId = _managedSpoutIds.get(i);
        _coordinators.add(_spouts.get(i).getCoordinator(txId, conf, context));
    }
}
 
Example 19
Project: jstrom   File: MasterBatchCoordinator.java   Source Code and License 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    _throttler = new WindowedTimeThrottler((Number)conf.get(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS), 1);
    for(String spoutId: _managedSpoutIds) {
        _states.add(TransactionalState.newCoordinatorState(conf, spoutId));
    }
    _currTransaction = getStoredCurrTransaction();

    _collector = collector;
    Number active = (Number) conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING);
    if(active==null) {
        _maxTransactionActive = 1;
    } else {
        _maxTransactionActive = active.intValue();
    }
    _attemptIds = getStoredCurrAttempts(_currTransaction, _maxTransactionActive);

    
    for(int i=0; i<_spouts.size(); i++) {
        String txId = _managedSpoutIds.get(i);
        _coordinators.add(_spouts.get(i).getCoordinator(txId, conf, context));
    }
}
 
Example 20
Project: learn_jstorm   File: TransactionalSpoutCoordinator.java   Source Code and License 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	_rand = new Random(Utils.secureRandomLong());
	_state = TransactionalState.newCoordinatorState(conf,
			(String) conf.get(Config.TOPOLOGY_TRANSACTIONAL_ID),
			_spout.getComponentConfiguration());
	_coordinatorState = new RotatingTransactionalState(_state, META_DIR,
			true);
	_collector = collector;
	_coordinator = _spout.getCoordinator(conf, context);
	_currTransaction = getStoredCurrTransaction(_state);
	Object active = conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING);
	if (active == null) {
		_maxTransactionActive = 1;
	} else {
		_maxTransactionActive = Utils.getInt(active);
	}
	_initializer = new StateInitializer();
}
 
Example 21
Project: learn_jstorm   File: BatchSpoutTrigger.java   Source Code and License 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	batchQueue = new LinkedBlockingQueue<BatchSpoutMsgId>();
	this.collector = collector;
	this.conf = conf;
	taskName = context.getThisComponentId() + "_" + context.getThisTaskId();
	
	intervalCheck = new IntervalCheck();

	try {
		zkClient = BatchCommon.getZkClient(conf);

		initMsgId();

	} catch (Exception e) {
		LOG.error("", e);
		throw new RuntimeException("Failed to init");
	}
	LOG.info("Successfully open " + taskName);
}
 
Example 22
Project: Tstream   File: DRPCSpout.java   Source Code and License 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	_collector = collector;
	if (_local_drpc_id == null) {
		int numTasks = context.getComponentTasks(
				context.getThisComponentId()).size();
		int index = context.getThisTaskIndex();

		int port = Utils.getInt(conf.get(Config.DRPC_INVOCATIONS_PORT));
		List<String> servers = (List<String>) conf.get(Config.DRPC_SERVERS);
		if (servers == null || servers.isEmpty()) {
			throw new RuntimeException(
					"No DRPC servers configured for topology");
		}
		if (numTasks < servers.size()) {
			for (String s : servers) {
				_clients.add(new DRPCInvocationsClient(s, port));
			}
		} else {
			int i = index % servers.size();
			_clients.add(new DRPCInvocationsClient(servers.get(i), port));
		}
	}

}
 
Example 23
Project: alfresco-apache-storm-demo   File: FileSpout.java   Source Code and License 6 votes vote down vote up
@SuppressWarnings("rawtypes")
@Override
public void open(Map conf, TopologyContext context,
        SpoutOutputCollector collector) {
    _collector = collector;

    for (String inputFile : _inputFiles) {
        Path inputPath = Paths.get(inputFile);
        try (BufferedReader reader = Files.newBufferedReader(inputPath,
                StandardCharsets.UTF_8)) {
            String line = null;
            while ((line = reader.readLine()) != null) {
                if (StringUtils.isBlank(line))
                    continue;
                toPut.add(line.getBytes(StandardCharsets.UTF_8));
            }
        } catch (IOException x) {
            System.err.format("IOException: %s%n", x);
        }
    }
}
 
Example 24
Project: Tstream   File: SpoutExecutors.java   Source Code and License 6 votes vote down vote up
public void prepare(TaskSendTargets sendTargets, TaskTransfer transferFn,
		TopologyContext topologyContext) {

	
	// collector, in fact it call send_spout_msg
	this.output_collector = new SpoutCollector(taskId, spout, task_stats,
			sendTargets, storm_conf, transferFn, pending, topologyContext,
			exeQueue, report_error);

	try {
		WorkerClassLoader.switchThreadContext();
		this.spout.open(storm_conf, userTopologyCtx,
				new SpoutOutputCollector(output_collector));
	} catch (Throwable e) {
		error = e;
		LOG.error("spout open error ", e);
		report_error.report(e);
	} finally {
		WorkerClassLoader.restoreThreadContext();
	}

	LOG.info("Successfully create SpoutExecutors " + idStr);

}
 
Example 25
Project: aeolus   File: OrderedFileInputSpoutTest.java   Source Code and License 6 votes vote down vote up
@Test
public void testOpenSinglePartition() throws Exception {
	TestOrderedFileInputSpout spout = new TestOrderedFileInputSpout();
	
	Config conf = new Config();
	conf.put(TestOrderedFileInputSpout.INPUT_FILE_NAME, "dummyFileName");
	
	FileReader fileReaderMock = PowerMockito.mock(FileReader.class);
	PowerMockito.whenNew(FileReader.class).withArguments("dummyFileName").thenReturn(fileReaderMock);
	
	BufferedReader bufferedReaderMock = PowerMockito.mock(BufferedReader.class);
	PowerMockito.whenNew(BufferedReader.class).withArguments(fileReaderMock).thenReturn(bufferedReaderMock);
	
	spout.open(conf, mock(TopologyContext.class), mock(SpoutOutputCollector.class));
	Assert.assertTrue(spout.closePartition(new Integer(0)));
	try {
		spout.closePartition(new Integer(1));
		Assert.fail();
	} catch(RuntimeException e) {
		// expected
	}
}
 
Example 26
Project: aeolus   File: OrderedFileInputSpoutTest.java   Source Code and License 6 votes vote down vote up
@Test
public void testSingleEmptyPartition() {
	TestOrderedFileInputSpout spout = new TestOrderedFileInputSpout();
	
	Config conf = new Config();
	conf.put(TestOrderedFileInputSpout.NUMBER_OF_PARTITIONS, new Integer(1));
	
	TestSpoutOutputCollector col = new TestSpoutOutputCollector();
	spout.open(conf, mock(TopologyContext.class), new SpoutOutputCollector(col));
	
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	
	Assert.assertEquals(0, col.output.size());
}
 
Example 27
Project: jstrom   File: BatchSpoutTrigger.java   Source Code and License 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    batchQueue = new LinkedBlockingQueue<BatchSpoutMsgId>();
    this.collector = collector;
    this.conf = conf;
    taskName = context.getThisComponentId() + "_" + context.getThisTaskId();

    intervalCheck = new IntervalCheck();

    try {
        zkClient = BatchCommon.getZkClient(conf);

        initMsgId();

    } catch (Exception e) {
        LOG.error("", e);
        throw new RuntimeException("Failed to init");
    }
    LOG.info("Successfully open " + taskName);
}
 
Example 28
Project: aeolus   File: OrderedInputSpoutTest.java   Source Code and License 6 votes vote down vote up
@Test
public void testSingleEmptyPartition() {
	@SuppressWarnings("unchecked")
	List<Deque<String>> data = new LinkedList<Deque<String>>(Arrays.asList(new LinkedList<String>()));
	TestOrderedInputSpout spout = new TestOrderedInputSpout(data, this.r);
	
	Config conf = new Config();
	conf.put(TestOrderedInputSpout.NUMBER_OF_PARTITIONS, new Integer(1));
	
	TestSpoutOutputCollector col = new TestSpoutOutputCollector();
	spout.open(conf, mock(TopologyContext.class), new SpoutOutputCollector(col));
	
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	
	Assert.assertEquals(0, col.output.size());
}
 
Example 29
Project: aeolus   File: OrderedInputSpoutTest.java   Source Code and License 6 votes vote down vote up
@Test
public void testAllPartitionsEmpty() {
	@SuppressWarnings("unchecked")
	List<Deque<String>> data = new LinkedList<Deque<String>>(Arrays.asList(new LinkedList<String>(),
		new LinkedList<String>(), new LinkedList<String>()));
	TestOrderedInputSpout spout = new TestOrderedInputSpout(data, this.r);
	
	Config conf = new Config();
	conf.put(TestOrderedInputSpout.NUMBER_OF_PARTITIONS, new Integer(3));
	
	TestSpoutOutputCollector col = new TestSpoutOutputCollector();
	spout.open(conf, mock(TopologyContext.class), new SpoutOutputCollector(col));
	
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	spout.nextTuple();
	
	Assert.assertEquals(0, col.output.size());
}
 
Example 30
Project: aeolus   File: SpoutDataDrivenStreamRateDriverTest.java   Source Code and License 6 votes vote down vote up
@Test
public void testNextTupleFixedSecond() {
	IRichSpout worker = new IncSpout();
	DataDrivenStreamRateDriverSpout<Integer> driver = new DataDrivenStreamRateDriverSpout<Integer>(worker, 0,
		TimeUnit.SECONDS);
	
	Config cfg = mock(Config.class);
	TopologyContext c = mock(TopologyContext.class);
	SpoutOutputCollector col = mock(SpoutOutputCollector.class);
	
	driver.open(cfg, c, col);
	
	long start = System.nanoTime();
	driver.activate();
	for(int i = 0; i < 5; ++i) {
		driver.nextTuple();
	}
	long stop = System.nanoTime();
	
	Assert.assertEquals(5, (stop - start) / 1000 / 1000 / 1000, 1);
}
 
Example 31
Project: incubator-samoa   File: StormEntranceProcessingItem.java   Source Code and License 6 votes vote down vote up
@Override
public void open(@SuppressWarnings("rawtypes") Map conf, TopologyContext context, SpoutOutputCollector collector) {
  this.collector = collector;
  // this.tupleInfoQueue = new LinkedBlockingQueue<StormTupleInfo>();

  // Processor and this class share the same instance of stream
  // for (StormSpoutStream stream : streams) {
  // stream.setSpout(this);
  // }
  // outputStream.setSpout(this);

  this.entranceProcessor.onCreate(context.getThisTaskId());
  // this.spoutStarter = new SpoutStarter(this.starter);

  // this.spoutExecutors = Executors.newSingleThreadExecutor();
  // this.spoutExecutors.execute(spoutStarter);
}
 
Example 32
Project: aeolus   File: IncSpoutTest.java   Source Code and License 6 votes vote down vote up
@Test
public void testExecuteUnique() {
	IncSpout spout = new IncSpout(1);
	
	TestSpoutOutputCollector collector = new TestSpoutOutputCollector();
	spout.open(null, null, new SpoutOutputCollector(collector));
	
	List<List<Object>> result = new LinkedList<List<Object>>();
	
	for(int i = 0; i < 5; ++i) {
		ArrayList<Object> attributes = new ArrayList<Object>();
		attributes.add(new Long(i));
		result.add(attributes);
		
		spout.nextTuple();
	}
	
	Assert.assertEquals(result, collector.output.get(Utils.DEFAULT_STREAM_ID));
}
 
Example 33
Project: aeolus   File: IncSpoutTest.java   Source Code and License 6 votes vote down vote up
@Test
public void testExecuteUniqueMultipleStreams() {
	String[] streamIds = new String[] {Utils.DEFAULT_STREAM_ID, "myStreamId"};
	IncSpout spout = new IncSpout(streamIds);
	
	TestSpoutOutputCollector collector = new TestSpoutOutputCollector();
	spout.open(null, null, new SpoutOutputCollector(collector));
	
	List<List<Object>> result = new LinkedList<List<Object>>();
	
	for(int i = 0; i < 5; ++i) {
		ArrayList<Object> attributes = new ArrayList<Object>();
		attributes.add(new Long(i));
		result.add(attributes);
		
		spout.nextTuple();
	}
	
	for(String stream : streamIds) {
		Assert.assertEquals(result, collector.output.get(stream));
	}
}
 
Example 34
Project: aeolus   File: IncSpoutTest.java   Source Code and License 6 votes vote down vote up
@Test
public void testExecute() {
	IncSpout spout = new IncSpout(this.r.nextDouble(), 1);
	
	TestSpoutOutputCollector collector = new TestSpoutOutputCollector();
	spout.open(null, null, new SpoutOutputCollector(collector));
	
	for(int i = 0; i < 50; ++i) {
		spout.nextTuple();
	}
	
	List<Object> first = collector.output.get(Utils.DEFAULT_STREAM_ID).removeFirst();
	for(List<Object> second : collector.output.get(Utils.DEFAULT_STREAM_ID)) {
		Assert.assertTrue(((Long)first.get(0)).longValue() <= ((Long)second.get(0)).longValue());
		first = second;
	}
}
 
Example 35
Project: alfresco-apache-storm-demo   File: ElasticSearchSpout.java   Source Code and License 6 votes vote down vote up
@Override
public void open(Map stormConf, TopologyContext context,
        SpoutOutputCollector collector) {
    indexName = ConfUtils.getString(stormConf, ESStatusIndexNameParamName,
            "status");
    docType = ConfUtils.getString(stormConf, ESStatusDocTypeParamName,
            "status");
    maxInFlightURLsPerBucket = ConfUtils.getInt(stormConf,
            ESStatusMaxInflightParamName, 1);
    try {
        client = ElasticSearchConnection.getClient(stormConf, ESBoltType);
    } catch (Exception e1) {
        LOG.error("Can't connect to ElasticSearch", e1);
        throw new RuntimeException(e1);
    }

    partitioner = new URLPartitioner();
    partitioner.configure(stormConf);

    _collector = collector;
}
 
Example 36
Project: aeolus   File: RandomSpoutTest.java   Source Code and License 6 votes vote down vote up
@Test
public void testExecute() {
	int numberOfAttributes = 1 + this.r.nextInt(10);
	RandomSpout spout = new RandomSpout(numberOfAttributes, 100);
	
	TestSpoutOutputCollector collector = new TestSpoutOutputCollector();
	spout.open(null, null, new SpoutOutputCollector(collector));
	
	for(int i = 0; i < 50; ++i) {
		spout.nextTuple();
		Assert.assertEquals(i + 1, collector.output.get(Utils.DEFAULT_STREAM_ID).size());
		Assert.assertEquals(numberOfAttributes, collector.output.get(Utils.DEFAULT_STREAM_ID).get(i).size());
		for(int j = 0; j < numberOfAttributes; ++j) {
			Assert
				.assertTrue(0 < ((Integer)collector.output.get(Utils.DEFAULT_STREAM_ID).get(i).get(j)).intValue());
			Assert.assertTrue(100 >= ((Integer)collector.output.get(Utils.DEFAULT_STREAM_ID).get(i).get(j))
				.intValue());
		}
	}
	
}
 
Example 37
Project: jstrom   File: FixedTupleSpout.java   Source Code and License 6 votes vote down vote up
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    _context = context;
    List<Integer> tasks = context.getComponentTasks(context.getThisComponentId());
    int startIndex;
    for (startIndex = 0; startIndex < tasks.size(); startIndex++) {
        if (tasks.get(startIndex) == context.getThisTaskId()) {
            break;
        }
    }
    _collector = collector;
    _pending = new HashMap<String, FixedTuple>();
    _serveTuples = new ArrayList<FixedTuple>();
    for (int i = startIndex; i < _tuples.size(); i += tasks.size()) {
        _serveTuples.add(_tuples.get(i));
    }
}
 
Example 38
Project: spiderz   File: WikiCrawlerSpout.java   Source Code and License 6 votes vote down vote up
@Override
public void open(Map arg0, TopologyContext topCtx,
		SpoutOutputCollector collector) {
	_collector = collector;

	// connect to redis to access queue which 
	// contains unexplored titles
	jedis = new Jedis(this.redisIp, Integer.parseInt(this.redisPort), 2000);
	
	// set the starting point to crawl if already not set
	if(jedis.llen(queueId) == 0) {
		// give a variety of links to start crawling from
		jedis.rpush(queueId, "Computer science");
		jedis.rpush(queueId, "Botany");
		jedis.rpush(queueId, "Physics");
		jedis.rpush(queueId, "Mathematics");
	}
}
 
Example 39
Project: cloud-computing-specialization   File: FileReaderSpout.java   Source Code and License 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {

 /*
----------------------TODO-----------------------
Task: initialize the file reader
------------------------------------------------- */
    try {
        in = new BufferedReader(new FileReader(filePath));
    } catch (FileNotFoundException e) {
        e.printStackTrace();
    }


    this.context = context;
    this.collector = collector;
}
 
Example 40
Project: Practical-Real-time-Processing-and-Analytics   File: FileSpout.java   Source Code and License 5 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
	//fileName = (String) conf.get("file");
	this.collector = collector;

	try {
		reader = new BufferedReader(new FileReader(fileName));
	} catch (Exception e) {
		throw new RuntimeException(e);
	}
}
 
Example 41
Project: Practical-Real-time-Processing-and-Analytics   File: FileSpout.java   Source Code and License 5 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
	//fileName = (String) conf.get("file");
	this.collector = collector;

	try {
		reader = new BufferedReader(new FileReader(fileName));
	} catch (Exception e) {
		throw new RuntimeException(e);
	}
}
 
Example 42
Project: storm-scheduler   File: UuidSpout.java   Source Code and License 5 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    MessageDigest md;
    int counter;

    this.thisTaskIndex = context.getThisTaskIndex();
    this.numSpouts = context.getComponentTasks(context.getThisComponentId()).size();
    counter = 0;

    try {
        md = MessageDigest.getInstance("MD5");
    } catch (NoSuchAlgorithmException e) {
        throw new RuntimeException("Couldn't find MD5 algorithm.", e);
    }

    // we want to create a message that hashes to exacly one of the following spouts. As there are the same number
    // of bolts on each level as there are spouts, we just keep looking until we find a uuid whose hash code would
    // be assigned to the id of this spout (if it were a bolt).
    do {
        if (++counter > 1000 * 1000) {
            throw new RuntimeException("Unable to generate required UUID in 1 mio tries.");
        }
        byte[] bytes = md.digest(UUID.randomUUID().toString().getBytes());
        this.uuid = new String(bytes);
    } while (this.uuid.hashCode() % this.numSpouts != this.thisTaskIndex);

    this.collector = collector;

    if (!this.disableAniello) {
        // this will create/configure the worker monitor once per worker
        WorkerMonitor.getInstance().setContextInfo(context);

        // this object is used in the emit/execute method to compute the number of inter-node messages
        this.taskMonitor = new TaskMonitor(context.getThisTaskId());
    }
}
 
Example 43
Project: storm-scheduler   File: RandomSpout.java   Source Code and License 5 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    this.collector = collector;
    this.rnd = new Random();

    if (!this.disableAniello) {
        // this will create/configure the worker monitor once per worker
        WorkerMonitor.getInstance().setContextInfo(context);

        // this object is used in the emit/execute method to compute the number of inter-node messages
        this.taskMonitor = new TaskMonitor(context.getThisTaskId());
    }
}
 
Example 44
Project: fiware-sinfonier   File: BaseSinfonierSpout.java   Source Code and License 5 votes vote down vote up
@SuppressWarnings({ "rawtypes", "unchecked" })
  @Override
  public final void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
      mapper = new ObjectMapper();
      _collector = collector;
      this.configParams = (JSONObject)new JSONObject(this.config).get("params");
      try {
	this.configMap = mapper.readValue(this.config,new TypeReference<Map<String, Object>>() {});
	mapParams = (Map<String,Object>) configMap.get("params");
} catch (IOException e) {
	e.printStackTrace();
}
      this.useropen();
  }
 
Example 45
Project: Get-ENVS   File: RandomSentenceSpout.java   Source Code and License 5 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
//Set the instance collector to the one passed in
  _collector = collector;
  //For randomness
  _rand = new Random();
}
 
Example 46
Project: es-hadoop-v2.2.0   File: EsSpout.java   Source Code and License 5 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    this.collector = collector;

    LinkedHashMap copy = new LinkedHashMap(conf);
    copy.putAll(spoutConfig);

    StormSettings settings = new StormSettings(copy);

    InitializationUtils.setValueReaderIfNotSet(settings, JdkValueReader.class, log);

    ackReads = settings.getStormSpoutReliable();

    if (ackReads) {
        inTransitQueue = new LinkedHashMap<Object, Object>();
        replayQueue = new LinkedList<Object[]>();
        retries = new HashMap<Object, Integer>();
        queueSize = settings.getStormSpoutReliableQueueSize();
        tupleRetries = settings.getStormSpoutReliableRetriesPerTuple();
        tupleFailure = settings.getStormSpoutReliableTupleFailureHandling();
    }

    int totalTasks = context.getComponentTasks(context.getThisComponentId()).size();
    int currentTask = context.getThisTaskIndex();

    // match the partitions based on the current topology
    List<PartitionDefinition> partitions = RestService.findPartitions(settings, log);
    List<PartitionDefinition> assigned = RestService.assignPartitions(partitions, currentTask, totalTasks);
    iterator = RestService.multiReader(settings, assigned, log);
}
 
Example 47
Project: es-hadoop-v2.2.0   File: TestSpout.java   Source Code and License 5 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    this.collector = new InterceptingSpoutOutputCollector(collector);

    if (spout != null) {
        spout.open(conf, context, this.collector);
    }
}
 
Example 48
Project: ignite-book-code-samples   File: RandomSentenceSpout.java   Source Code and License 5 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    //Set the instance collector to the one passed in
    _collector = collector;
    //For randomness
    _rand = new Random();
}
 
Example 49
Project: sourcevirtues-samples   File: RandomSentenceTestSpout.java   Source Code and License 5 votes vote down vote up
@SuppressWarnings("rawtypes")
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    _collector = collector;
    _rand = new Random();

    genSentences();
}
 
Example 50
Project: java   File: TradeCollectorSpout.java   Source Code and License 5 votes vote down vote up
@Override
public void open(Map arg0, TopologyContext arg1, SpoutOutputCollector arg2) {
  try {
    _collector = arg2;
    this.setUpJMSConnection();
    inflightCacheDAO = new InflightCacheDAO();
  } catch (Exception e) {
    LOGGER.error(e);
  }
}
 
Example 51
Project: jstorm-0.9.6.3-   File: SequenceSpout.java   Source Code and License 5 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context,
		SpoutOutputCollector collector) {
	this.collector = collector;

	if (conf.get("spout.max.sending.num") == null) {
		isLimited = false;
	} else {
		isLimited = true;
		SPOUT_MAX_SEND_NUM = JStormUtils.parseLong(conf
				.get("spout.max.sending.num"));
	}

	isFinished = false;

	tpsCounter = new TpsCounter(context.getThisComponentId() + ":"
			+ context.getThisTaskId());

	MAX_PENDING_COUNTER = getMaxPending(conf);
	
	bufferLen = JStormUtils.parseInt(conf.get("byte.buffer.len"), 0);
	
	random = new Random();
	random.setSeed(System.currentTimeMillis());
	
	LOG.info("Finish open, buffer Len:"  + bufferLen);

}
 
Example 52
Project: jstorm-0.9.6.3-   File: RichSpoutBatchExecutor.java   Source Code and License 5 votes vote down vote up
@Override
public void emitBatch(TransactionAttempt tx, Object coordinatorMeta, TridentCollector collector) {
    long txid = tx.getTransactionId();
    
    long now = System.currentTimeMillis();
    if(now - lastRotate > rotateTime) {
        Map<Long, List<Object>> failed = idsMap.rotate();
        for(Long id: failed.keySet()) {
            // this isn't right... it's not in the map anymore
            fail(id);
        }
        lastRotate = now;
    }
    
    if(idsMap.containsKey(txid)) {
        fail(txid);
    }
    
    _collector.reset(collector);
    if(!prepared) {
        _spout.open(_conf, _context, new SpoutOutputCollector(_collector));
        prepared = true;
    }
    for(int i=0; i<_maxBatchSize; i++) {
        _spout.nextTuple();
        if(_collector.numEmitted < i) {
            break;
        }
    }
    idsMap.put(txid, _collector.ids);

}
 
Example 53
Project: jstorm-0.9.6.3-   File: RichSpoutBatchTriggerer.java   Source Code and License 5 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    _delegate.open(conf, context, new SpoutOutputCollector(new StreamOverrideCollector(collector)));
    _outputTasks = new ArrayList<Integer>();
    for(String component: Utils.get(context.getThisTargets(),
                                    _coordStream,
                                    new HashMap<String, Grouping>()).keySet()) {
        _outputTasks.addAll(context.getComponentTasks(component));
    }
    _rand = new Random(Utils.secureRandomLong());
}
 
Example 54
Project: jstrom   File: PumaSpout.java   Source Code and License 5 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector _collector) {
    collector = _collector;
    receiveQueue = new LinkedBlockingQueue<RowChangedEvent>();
    waitingForAck = new ConcurrentHashMap<String, RowChangedEvent>();
    
    ConfigurationBuilder configBuilder = new ConfigurationBuilder();
    configBuilder.ddl(false);
    configBuilder.dml(true);
    configBuilder.transaction(false);
    if (pumaSeqFileBase != null) {
        configBuilder.seqFileBase(pumaSeqFileBase);
    }
    configBuilder.host(pumaHost);
    configBuilder.port(pumaPort);
    configBuilder.serverId(pumaServerId);
    configBuilder.name(pumaName);
    for (Entry<String, String[]> e : watchTables.entrySet()) {
        String db = e.getKey();
        String[] tabs = e.getValue();
        configBuilder.tables(db, tabs);
    }
    configBuilder.target(pumaTarget);     
    PumaClient pc = new PumaClient(configBuilder.build());
    
    listener = new PumaEventListener();
    pc.register(listener);
    pc.start();
}
 
Example 55
Project: jstorm-0.9.6.3-   File: PartitionConsumer.java   Source Code and License 5 votes vote down vote up
public EmitState emit(SpoutOutputCollector collector) {
    if (emittingMessages.isEmpty()) {
        fillMessages();
    }

    int count = 0;
    while (true) {
        MessageAndOffset toEmitMsg = emittingMessages.pollFirst();
        if (toEmitMsg == null) {
            return EmitState.EMIT_END;
        }
        count ++;
        Iterable<List<Object>> tups = generateTuples(toEmitMsg.message());

        if (tups != null) {
            for (List<Object> tuple : tups) {
                LOG.debug("emit message {}", new String(Utils.toByteArray(toEmitMsg.message().payload())));
                collector.emit(tuple, new KafkaMessageId(partition, toEmitMsg.offset()));
            }
            if(count>=config.batchSendCount) {
                break;
            }
        } else {
            ack(toEmitMsg.offset());
        }
    }

    if (emittingMessages.isEmpty()) {
        return EmitState.EMIT_END;
    } else {
        return EmitState.EMIT_MORE;
    }
}
 
Example 56
Project: jstrom   File: JStormMetricsReporter.java   Source Code and License 5 votes vote down vote up
public void setOutputCollector(Object outputCollector) {
    if (outputCollector instanceof OutputCollector) {
        this.boltOutput = (OutputCollector) outputCollector;
    } else if (outputCollector instanceof SpoutOutputCollector) {
        this.spoutOutput = (SpoutOutputCollector) outputCollector;
    }

}
 
Example 57
Project: learn_jstorm   File: RichSpoutBatchExecutor.java   Source Code and License 5 votes vote down vote up
@Override
public void emitBatch(TransactionAttempt tx, Object coordinatorMeta, TridentCollector collector) {
    long txid = tx.getTransactionId();
    
    long now = System.currentTimeMillis();
    if(now - lastRotate > rotateTime) {
        Map<Long, List<Object>> failed = idsMap.rotate();
        for(Long id: failed.keySet()) {
            //TODO: this isn't right... it's not in the map anymore
            fail(id);
        }
        lastRotate = now;
    }
    
    if(idsMap.containsKey(txid)) {
        fail(txid);
    }
    
    _collector.reset(collector);
    if(!prepared) {
        _spout.open(_conf, _context, new SpoutOutputCollector(_collector));
        prepared = true;
    }
    for(int i=0; i<_maxBatchSize; i++) {
        _spout.nextTuple();
        if(_collector.numEmitted < i) {
            break;
        }
    }
    idsMap.put(txid, _collector.ids);

}
 
Example 58
Project: learn_jstorm   File: RichSpoutBatchTriggerer.java   Source Code and License 5 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    _delegate.open(conf, context, new SpoutOutputCollector(new StreamOverrideCollector(collector)));
    _outputTasks = new ArrayList<Integer>();
    for(String component: Utils.get(context.getThisTargets(),
                                    _coordStream,
                                    new HashMap<String, Grouping>()).keySet()) {
        _outputTasks.addAll(context.getComponentTasks(component));
    }
    _rand = new Random(Utils.secureRandomLong());
}
 
Example 59
Project: jstrom   File: PythonShellMetricsSpout.java   Source Code and License 5 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    super.open(conf, context, collector);

    CountShellMetric cMetric = new CountShellMetric();
    context.registerMetric("my-custom-shellspout-metric", cMetric, 5);
}
 
Example 60
Project: learn_jstorm   File: PartitionConsumer.java   Source Code and License 5 votes vote down vote up
public EmitState emit(SpoutOutputCollector collector) {
    if (emittingMessages.isEmpty()) {
        fillMessages();
    }

    int count = 0;
    while (true) {
        MessageAndOffset toEmitMsg = emittingMessages.pollFirst();
        if (toEmitMsg == null) {
            return EmitState.EMIT_END;
        }
        count ++;
        Iterable<List<Object>> tups = generateTuples(toEmitMsg.message());

        if (tups != null) {
            for (List<Object> tuple : tups) {
                LOG.debug("emit message {}", new String(Utils.toByteArray(toEmitMsg.message().payload())));
                collector.emit(tuple, new KafkaMessageId(partition, toEmitMsg.offset()));
            }
            if(count>=config.batchSendCount) {
                break;
            }
        } else {
            ack(toEmitMsg.offset());
        }
    }

    if (emittingMessages.isEmpty()) {
        return EmitState.EMIT_END;
    } else {
        return EmitState.EMIT_MORE;
    }
}