backtype.storm.spout.SpoutOutputCollector Java Examples
The following examples show how to use
backtype.storm.spout.SpoutOutputCollector.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CorrelationSpout.java From eagle with Apache License 2.0 | 6 votes |
@SuppressWarnings("rawtypes") @Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { if (LOG.isDebugEnabled()) { LOG.debug("open method invoked"); } this.conf = conf; this.context = context; this.collector = collector; this.taskIndex = context.getThisTaskIndex(); // initialize an empty SpoutSpec cachedSpoutSpec = new SpoutSpec(topologyId, new HashMap<>(), new HashMap<>(), new HashMap<>()); changeNotifyService.registerListener(this); changeNotifyService.init(config, MetadataType.SPOUT); // register KafkaSpout metric kafkaSpoutMetric = new KafkaSpoutMetric(); context.registerMetric("kafkaSpout", kafkaSpoutMetric, 60); this.serializer = Serializers.newPartitionedEventSerializer(this); }
Example #2
Source File: EMQSpout.java From galaxy-sdk-java with Apache License 2.0 | 6 votes |
@Override public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) { collector = spoutOutputCollector; EMQClientFactory clientFactory = new EMQClientFactory(emqConfig.credential); messageClient = clientFactory.newMessageClient(emqConfig.endpoint); checkTopologyTimeout(clientFactory.newQueueClient(emqConfig.endpoint), map); deleteMessageThread = new DeleteMessageThread(); fetchMessageThread = new FetchMessageThread(); ackedMessagesQueue = new LinkedBlockingQueue<String>(); int capacity = emqConfig.emqCoordinator.newReceiveMessageRequest().getMaxReceiveMessageNumber(); fetchedMessageQueue = new LinkedBlockingQueue<ReceiveMessageResponse>(capacity); fetchMessageThread.start(); deleteMessageThread.start(); LOG.info("Open EMQSpout"); }
Example #3
Source File: GenericInternalTestSpout.java From opensoc-streaming with Apache License 2.0 | 6 votes |
@SuppressWarnings("rawtypes") public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { _collector = collector; try { Reader = new FileReader(); jsons = Reader.readFromFile(_filename); } catch (IOException e) { System.out.println("Could not read sample JSONs"); e.printStackTrace(); } }
Example #4
Source File: StormSpout.java From PoseidonX with Apache License 2.0 | 6 votes |
/** * {@inheritDoc} */ @Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { LOG.info("Start to open storm spout."); Map<String, IEmitter> emitters = createEmitters(collector); try { input.initialize(emitters); } catch (StreamingException e) { LOG.error("Failed to initialize input stream."); throw new RuntimeException("Failed to initialize output stream", e); } }
Example #5
Source File: ScheduleTransactionSpout.java From jstorm with Apache License 2.0 | 6 votes |
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { super.open(conf, context, collector); spoutExecutor.open(conf, context, new SpoutOutputCollector(outputCollector)); int threadPoolNum = JStormUtils.parseInt(conf.get("transaction.schedule.thread.pool"), 1); int delay = JStormUtils.parseInt(conf.get("transaction.schedule.batch.delay.ms"), 1000); int initDelay = delay >= 30000 ? 30000 : delay; if (scheduledService == null) { scheduledService = Executors.newScheduledThreadPool(threadPoolNum); } scheduledService.scheduleWithFixedDelay(new Runnable() { @Override public void run() { process(Operation.commit, null); } }, initDelay, delay, TimeUnit.MILLISECONDS); }
Example #6
Source File: DatasetSpout.java From senti-storm with Apache License 2.0 | 6 votes |
public void open(Map config, TopologyContext context, SpoutOutputCollector collector) { this.m_collector = collector; this.m_dataset = Configuration.getDataSetSemEval2013(); this.m_tweets = m_dataset.getTestTweets(); // Optional sleep between tuples emitting if (config.get(CONF_TUPLE_SLEEP_MS) != null) { m_tupleSleepMs = (Long) config.get(CONF_TUPLE_SLEEP_MS); } else { m_tupleSleepMs = 0; } if (config.get(CONF_TUPLE_SLEEP_NS) != null) { m_tupleSleepNs = (Long) config.get(CONF_TUPLE_SLEEP_NS); } else { m_tupleSleepNs = 0; } // Optional startup sleep to finish bolt preparation // before spout starts emitting if (config.get(CONF_STARTUP_SLEEP_MS) != null) { long startupSleepMillis = (Long) config.get(CONF_STARTUP_SLEEP_MS); TimeUtils.sleepMillis(startupSleepMillis); } }
Example #7
Source File: TransactionalSpoutCoordinator.java From jstorm with Apache License 2.0 | 6 votes |
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { _rand = new Random(Utils.secureRandomLong()); _state = TransactionalState.newCoordinatorState(conf, (String) conf.get(Config.TOPOLOGY_TRANSACTIONAL_ID), _spout.getComponentConfiguration()); _coordinatorState = new RotatingTransactionalState(_state, META_DIR, true); _collector = collector; _coordinator = _spout.getCoordinator(conf, context); _currTransaction = getStoredCurrTransaction(_state); Object active = conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING); if (active == null) { _maxTransactionActive = 1; } else { _maxTransactionActive = Utils.getInt(active); } _initializer = new StateInitializer(); }
Example #8
Source File: DeploySpoult.java From jstorm with Apache License 2.0 | 6 votes |
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { _rand = new Random(); this.context = context; _collector = collector; final int differentMessages = 100; _messages = new String[differentMessages]; for (int i = 0; i < differentMessages; i++) { StringBuilder sb = new StringBuilder(_sizeInBytes); //Even though java encodes strings in UCS2, the serialized version sent by the tuples // is UTF8, so it should be a single byte for (int j = 0; j < _sizeInBytes; j++) { sb.append(_rand.nextInt(9)); } _messages[i] = sb.toString(); } _sourceTasks = context.getThisSourceComponentTasks(); _targetTasks = context.getThisTargetComponentTasks(); LOG.info("this component's sourceTasks is {}, and the target tasks is {}", _sourceTasks, _targetTasks); }
Example #9
Source File: StormKafkaSpoutTest.java From cognition with Apache License 2.0 | 6 votes |
@Test public void testOpen( final @Injectable SpoutConfig spoutConfig, final @Injectable Map conf, final @Injectable TopologyContext context, final @Injectable SpoutOutputCollector collector, final @Injectable KafkaSpout kafkaSpout) throws Exception { spout.rateLimiter = null; spout.kafkaSpout = kafkaSpout; new Expectations(spout) {{ spout.setupKafkaSpout(); }}; spout.permitsPerSecond = spout.DEFAULT_PERMITS_PER_SECOND; spout.open(conf, context, collector); Assert.assertNull(spout.rateLimiter); spout.permitsPerSecond = 1D; spout.open(conf, context, collector); Assert.assertNotNull(spout.rateLimiter); }
Example #10
Source File: BlackholeBlockingQueueSpout.java From jstorm with Apache License 2.0 | 6 votes |
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector _collector) { collector = _collector; _spoutMetric = new CountMetric(); context.registerMetric(CatMetricUtil.getSpoutMetricName(topic, group), _spoutMetric, Constants.EMIT_FREQUENCY_IN_SECONDS); ConsumerConfig config = new ConsumerConfig(); try { consumer = new Consumer(topic, group, config); } catch (LionException e) { throw new RuntimeException(e); } consumer.start(); stream = consumer.getStream(); fetchThread = new MessageFetcher(stream); new Thread(fetchThread).start(); }
Example #11
Source File: CVParticleSpout.java From StormCV with Apache License 2.0 | 6 votes |
/** * Configures the spout by fetching optional parameters from the provided configuration. If faultTolerant is true the open * function will also construct the cache to hold the emitted tuples. * Configuration options are: * <ul> * <li>stormcv.faulttolerant --> boolean: indicates if the spout must operate in fault tolerant mode (i.e. replay tuples after failure)</li> * <li>stormcv.tuplecache.timeout --> long: timeout (seconds) for tuples in the cache </li> * <li>stormcv.tuplecache.maxsize --> int: maximum number of tuples in the cache (used to avoid memory overload)</li> * </ul> */ @SuppressWarnings("rawtypes") @Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { this.collector = collector; if(conf.containsKey(StormCVConfig.STORMCV_SPOUT_FAULTTOLERANT)){ faultTolerant = (Boolean) conf.get(StormCVConfig.STORMCV_SPOUT_FAULTTOLERANT); } if(faultTolerant){ long timeout = conf.get(StormCVConfig.STORMCV_CACHES_TIMEOUT_SEC) == null ? 30 : (Long)conf.get(StormCVConfig.STORMCV_CACHES_TIMEOUT_SEC); int maxSize = conf.get(StormCVConfig.STORMCV_CACHES_MAX_SIZE) == null ? 500 : ((Long)conf.get(StormCVConfig.STORMCV_CACHES_MAX_SIZE)).intValue(); tupleCache = CacheBuilder.newBuilder() .maximumSize(maxSize) .expireAfterAccess(timeout, TimeUnit.SECONDS) .build(); } // pass configuration to subclasses try { fetcher.prepare(conf, context); } catch (Exception e) { logger.warn("Unable to configure spout due to ", e); } }
Example #12
Source File: StormEntranceProcessingItem.java From samoa with Apache License 2.0 | 6 votes |
@Override public void open(@SuppressWarnings("rawtypes") Map conf, TopologyContext context, SpoutOutputCollector collector) { this.collector = collector; // this.tupleInfoQueue = new LinkedBlockingQueue<StormTupleInfo>(); // Processor and this class share the same instance of stream // for (StormSpoutStream stream : streams) { // stream.setSpout(this); // } // outputStream.setSpout(this); this.entranceProcessor.onCreate(context.getThisTaskId()); // this.spoutStarter = new SpoutStarter(this.starter); // this.spoutExecutors = Executors.newSingleThreadExecutor(); // this.spoutExecutors.execute(spoutStarter); }
Example #13
Source File: MultiStageAckingTopology.java From incubator-heron with Apache License 2.0 | 5 votes |
@SuppressWarnings("rawtypes") public void open( Map conf, TopologyContext context, SpoutOutputCollector acollector) { collector = acollector; words = new String[]{"nathan", "mike", "jackson", "golda", "bertels"}; rand = new Random(); }
Example #14
Source File: PerformanceTestTopology.java From jstorm with Apache License 2.0 | 5 votes |
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { this.collector = collector; this.count = 0l; this.isAckRequired = Utils.getBoolean(conf.get("is.ack.required"), false); this.sendNumEachTime = Utils.getInt(conf.get("send.num.each.time"), 1); this.startTimeStamp = System.currentTimeMillis(); }
Example #15
Source File: DRPCSpout.java From jstorm with Apache License 2.0 | 5 votes |
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { _collector = collector; _clients = new ArrayList<>(); if (_local_drpc_id == null) { _backround = new ExtendedThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS, new SynchronousQueue<Runnable>()); _futures = new LinkedList<>(); int numTasks = context.getComponentTasks(context.getThisComponentId()).size(); int index = context.getThisTaskIndex(); int port = Utils.getInt(conf.get(Config.DRPC_INVOCATIONS_PORT)); List<String> servers = NetWorkUtils.host2Ip((List<String>) conf.get(Config.DRPC_SERVERS)); if (servers == null || servers.isEmpty()) { throw new RuntimeException("No DRPC servers configured for topology"); } if (numTasks < servers.size()) { for (String s : servers) { _futures.add(_backround.submit(new Adder(s, port, conf))); } } else { int i = index % servers.size(); _futures.add(_backround.submit(new Adder(servers.get(i), port, conf))); } } }
Example #16
Source File: SlidingTupleTestRandomSpout.java From jstorm with Apache License 2.0 | 5 votes |
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { this.collector = collector; this.random = new Random(); this.metricClient = new MetricClient(context); this.asmCounter = metricClient.registerCounter("SlidingTupleTsTopologyTest.SpoutSum"); }
Example #17
Source File: ThroughputHostsTracking.java From flink-perf with Apache License 2.0 | 5 votes |
@Override public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) { this.spoutOutputCollector = spoutOutputCollector; try { this.host = InetAddress.getLocalHost().getHostName(); } catch (UnknownHostException e) { e.printStackTrace(); } }
Example #18
Source File: StormKafkaSpout.java From cognition with Apache License 2.0 | 5 votes |
@Override public void open(Map conf, final TopologyContext context, final SpoutOutputCollector collector) { setupKafkaSpout(); kafkaSpout.open(conf, context, collector); if (isRateLimited()) { rateLimiter = RateLimiter.create(permitsPerSecond); } }
Example #19
Source File: SequenceTestSpout.java From jstorm with Apache License 2.0 | 5 votes |
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { this.collector = collector; this.metricClient = new MetricClient(context); this.emitCounter = metricClient.registerCounter(SequenceTestMetricsDef.METRIC_SPOUT_EMIT); this.successCounter = metricClient.registerCounter(SequenceTestMetricsDef.METRIC_SPOUT_SUCCESS); this.failCounter = metricClient.registerCounter(SequenceTestMetricsDef.METRIC_SPOUT_FAIL); this.tradeSumCounter = metricClient.registerCounter(SequenceTestMetricsDef.METRIC_SPOUT_TRADE_SUM); this.customerSumCounter = metricClient.registerCounter(SequenceTestMetricsDef.METRIC_SPOUT_CUSTOMER_SUM); this.isFinished = false; this.idGenerator = new Random(); SPOUT_MAX_SEND_NUM = (long)conf.get("spout.max.sending.num"); JStormUtils.sleepMs(10 * 1000); }
Example #20
Source File: HadoopQueueRunningExtractor.java From eagle with Apache License 2.0 | 5 votes |
public HadoopQueueRunningExtractor(HadoopQueueRunningAppConfig eagleConf, SpoutOutputCollector collector) { site = eagleConf.eagleProps.site; urlBases = eagleConf.dataSourceConfig.rMEndPoints; if (urlBases == null) { throw new IllegalArgumentException(site + ".baseUrl is null"); } String[] urls = urlBases.split(","); urlSelector = new HAURLSelectorImpl(urls, Constants.CompressionType.NONE); executorService = Executors.newFixedThreadPool(MAX_NUM_THREADS); this.collector = collector; }
Example #21
Source File: SequenceSpout.java From jstorm with Apache License 2.0 | 5 votes |
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { this.collector = collector; if (conf.get("spout.max.sending.num") == null) { isLimited = false; } else { isLimited = true; SPOUT_MAX_SEND_NUM = JStormUtils.parseLong(conf.get("spout.max.sending.num")); } Boolean btrue = JStormUtils.parseBoolean(conf.get("spout.send.contrl.message")); if (btrue != null && btrue) { isSendCtrlMsg = true; } else { isSendCtrlMsg = false; } isFinished = false; tpsCounter = new TpsCounter(context.getThisComponentId() + ":" + context.getThisTaskId()); MetricClient metricClient = new MetricClient(context); this.tpCounter = metricClient.registerTopologyCounter("TpCounter"); MAX_PENDING_COUNTER = getMaxPending(conf); bufferLen = JStormUtils.parseInt(conf.get("byte.buffer.len"), 0); random = new Random(); random.setSeed(System.currentTimeMillis()); idGenerate = new Random(Utils.secureRandomLong()); JStormUtils.sleepMs(20 * 1000); LOG.info("Finish open, buffer Len:" + bufferLen); }
Example #22
Source File: JStormMetricsReporter.java From jstorm with Apache License 2.0 | 5 votes |
public void setOutputCollector(Object outputCollector) { if (outputCollector instanceof OutputCollector) { this.boltOutput = (OutputCollector) outputCollector; } else if (outputCollector instanceof SpoutOutputCollector) { this.spoutOutput = (SpoutOutputCollector) outputCollector; } }
Example #23
Source File: PartitionConsumer.java From jstorm with Apache License 2.0 | 5 votes |
public EmitState emit(SpoutOutputCollector collector) { if (emittingMessages.isEmpty()) { fillMessages(); } int count = 0; while (true) { MessageAndOffset toEmitMsg = emittingMessages.pollFirst(); if (toEmitMsg == null) { return EmitState.EMIT_END; } count ++; Iterable<List<Object>> tups = generateTuples(toEmitMsg.message()); if (tups != null) { for (List<Object> tuple : tups) { LOG.debug("emit message {}", new String(Utils.toByteArray(toEmitMsg.message().payload()))); collector.emit(tuple, new KafkaMessageId(partition, toEmitMsg.offset())); } if(count>=config.batchSendCount) { break; } } else { ack(toEmitMsg.offset()); } } if (emittingMessages.isEmpty()) { return EmitState.EMIT_END; } else { return EmitState.EMIT_MORE; } }
Example #24
Source File: RichSpoutBatchTriggerer.java From jstorm with Apache License 2.0 | 5 votes |
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { _delegate.open(conf, context, new SpoutOutputCollector(new StreamOverrideCollector(collector))); _outputTasks = new ArrayList<>(); for(String component: Utils.get(context.getThisTargets(), _coordStream, new HashMap<String, Grouping>()).keySet()) { _outputTasks.addAll(context.getComponentTasks(component)); } _rand = new Random(Utils.secureRandomLong()); }
Example #25
Source File: PythonShellMetricsSpout.java From jstorm with Apache License 2.0 | 5 votes |
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { super.open(conf, context, collector); CountShellMetric cMetric = new CountShellMetric(); context.registerMetric("my-custom-shellspout-metric", cMetric, 5); }
Example #26
Source File: KafkaSpout.java From jstorm with Apache License 2.0 | 5 votes |
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { this.collector = collector; if (this.config == null) { config = new KafkaSpoutConfig(); } config.configure(conf); zkState = new ZkState(conf, config); coordinator = new PartitionCoordinator(conf, config, context, zkState); lastUpdateMs = System.currentTimeMillis(); }
Example #27
Source File: RichSpoutBatchExecutor.java From jstorm with Apache License 2.0 | 5 votes |
@Override public void emitBatch(TransactionAttempt tx, Object coordinatorMeta, TridentCollector collector) { long txid = tx.getTransactionId(); long now = System.currentTimeMillis(); if (now - lastRotate > rotateTime) { Map<Long, List<Object>> failed = idsMap.rotate(); for (Long id : failed.keySet()) { // TODO: this isn't right... it's not in the map anymore fail(id); } lastRotate = now; } if (idsMap.containsKey(txid)) { fail(txid); } _collector.reset(collector); if (!prepared) { _spout.open(_conf, _context, new SpoutOutputCollector(_collector)); prepared = true; } for (int i = 0; i < _maxBatchSize; i++) { _spout.nextTuple(); if(_collector.numEmitted < i) { break; } } idsMap.put(txid, _collector.ids); _collector.pendingCount = idsMap.size(); }
Example #28
Source File: StormSpout.java From PoseidonX with Apache License 2.0 | 5 votes |
private Map<String, IEmitter> createEmitters(SpoutOutputCollector collector) { Map<String, IEmitter> emitters = Maps.newHashMap(); SpoutEmitter emitter = new SpoutEmitter(collector, input.getOutputStream()); emitters.put(input.getOutputStream(), emitter); return emitters; }
Example #29
Source File: PerformanceTestTopology.java From jstorm with Apache License 2.0 | 5 votes |
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { this.collector = collector; this.count = 0l; this.isAckRequired = Utils.getBoolean(conf.get("is.ack.required"), false); this.sendNumEachTime = Utils.getInt(conf.get("send.num.each.time"), 1); this.startTimeStamp = System.currentTimeMillis(); }
Example #30
Source File: InOrderTestSpout.java From jstorm with Apache License 2.0 | 5 votes |
@Override public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) { this.collector = collector; this.task = context.getThisTaskIndex(); this.metricClient = new MetricClient(context); this.emitCounter = metricClient.registerCounter(InOrderTestMetricsDef.METRIC_SPOUT_EMIT); this.emitCounter.setOp(AsmMetric.MetricOp.LOG & AsmMetric.MetricOp.REPORT); LOG.info("open. task = " + task); }