Java Code Examples for com.datatorrent.api.Context.OperatorContext
The following examples show how to use
com.datatorrent.api.Context.OperatorContext. These examples are extracted from open source projects.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: attic-apex-malhar Source File: AbstractFlumeInputOperator.java License: Apache License 2.0 | 6 votes |
@Override @SuppressWarnings({"unchecked"}) public void activate(OperatorContext ctx) { if (connectionSpecs.length == 0) { logger.info("Discovered zero FlumeSink"); } else if (connectionSpecs.length == 1) { for (String connectAddresse: connectionSpecs) { logger.debug("Connection spec is {}", connectAddresse); String[] parts = connectAddresse.split(":"); eventloop.connect(new InetSocketAddress(parts[1], Integer.parseInt(parts[2])), client = new Client(parts[0])); } } else { throw new IllegalArgumentException( String.format("A physical %s operator cannot connect to more than 1 addresses!", this.getClass().getSimpleName())); } context = ctx; }
Example 2
Source Project: attic-apex-malhar Source File: FSWindowDataManagerTest.java License: Apache License 2.0 | 6 votes |
@Test public void testSave() throws IOException { Pair<Context.OperatorContext, FSWindowDataManager> pair = createManagerAndContextFor(1); pair.second.setup(pair.first); Map<Integer, String> data = Maps.newHashMap(); data.put(1, "one"); data.put(2, "two"); data.put(3, "three"); pair.second.save(data, 1); pair.second.setup(pair.first); @SuppressWarnings("unchecked") Map<Integer, String> artifact = (Map<Integer, String>)pair.second.retrieve(1); Assert.assertEquals("dataOf1", data, artifact); pair.second.teardown(); }
Example 3
Source Project: attic-apex-malhar Source File: AbstractKafkaInputOperator.java License: Apache License 2.0 | 6 votes |
@Override public void setup(OperatorContext context) { logger.debug("consumer {} topic {} cacheSize {}", consumer, consumer.getTopic(), consumer.getCacheSize()); consumer.create(); // reset the offsets to checkpointed one if (consumer instanceof SimpleKafkaConsumer && !offsetStats.isEmpty()) { Map<KafkaPartition, Long> currentOffsets = new HashMap<>(); // Increment the offsets and set it to consumer for (Map.Entry<KafkaPartition, Long> e: offsetStats.entrySet()) { currentOffsets.put(e.getKey(), e.getValue() + 1); } ((SimpleKafkaConsumer)consumer).resetOffset(currentOffsets); } this.context = context; operatorId = context.getId(); if (consumer instanceof HighlevelKafkaConsumer && !(windowDataManager instanceof WindowDataManager.NoopWindowDataManager)) { throw new RuntimeException("Idempotency is not supported for High Level Kafka Consumer"); } windowDataManager.setup(context); }
Example 4
Source Project: attic-apex-malhar Source File: ManagedStateBenchmarkApp.java License: Apache License 2.0 | 6 votes |
@Override public void populateDAG(DAG dag, Configuration conf) { TestStatsListener sl = new TestStatsListener(); sl.adjustRate = conf.getBoolean("dt.ManagedStateBenchmark.adjustRate", false); TestGenerator gen = dag.addOperator("Generator", new TestGenerator()); gen.setRange(timeRange); dag.setAttribute(gen, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener)sl)); storeOperator = new StoreOperator(); storeOperator.setStore(createStore(conf)); storeOperator.setTimeRange(timeRange); storeOperator = dag.addOperator("Store", storeOperator); dag.setAttribute(storeOperator, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener)sl)); dag.addStream("Events", gen.data, storeOperator.input).setLocality(Locality.CONTAINER_LOCAL); }
Example 5
Source Project: attic-apex-malhar Source File: SourceModule.java License: Apache License 2.0 | 6 votes |
@Override public void activate(OperatorContext ctx) { for (int i = 0; i < testNum; i++) { HashMap<String, Integer> dataMapa = new HashMap<String, Integer>(); dataMapa.put("a", 2); holdingBuffer.add(dataMapa.toString().getBytes()); HashMap<String, Integer> dataMapb = new HashMap<String, Integer>(); dataMapb.put("b", 20); holdingBuffer.add(dataMapb.toString().getBytes()); HashMap<String, Integer> dataMapc = new HashMap<String, Integer>(); dataMapc.put("c", 1000); holdingBuffer.add(dataMapc.toString().getBytes()); } }
Example 6
Source Project: attic-apex-core Source File: Slider.java License: Apache License 2.0 | 6 votes |
@Override public void setup(OperatorContext context) { OutputPort<?> unifierOutputPort = getOutputPort(); unifierOutputPort.setSink( new Sink<Object>() { @Override public void put(Object tuple) { outputPort.emit(tuple); } @Override public int getCount(boolean reset) { return 0; } } ); unifier.setup(context); spinMillis = context.getValue(OperatorContext.SPIN_MILLIS); }
Example 7
Source Project: attic-apex-malhar Source File: JavaScriptOperator.java License: Apache License 2.0 | 6 votes |
@Override public void setup(OperatorContext context) { for (Map.Entry<String, Object> entry : serializableBindings.entrySet()) { scriptBindings.put(entry.getKey(), entry.getValue()); } this.scriptContext.setBindings(scriptBindings, ScriptContext.ENGINE_SCOPE); engine.setContext(this.scriptContext); try { for (String s : setupScripts) { engine.eval(s, this.scriptContext); } } catch (ScriptException ex) { throw new RuntimeException(ex); } }
Example 8
Source Project: attic-apex-core Source File: StreamingContainerTest.java License: Apache License 2.0 | 6 votes |
@Test public void testCommitted() throws IOException, ClassNotFoundException { LogicalPlan lp = new LogicalPlan(); String workingDir = new File("target/testCommitted").getAbsolutePath(); lp.setAttribute(Context.OperatorContext.STORAGE_AGENT, new AsyncFSStorageAgent(workingDir, null)); lp.setAttribute(DAGContext.CHECKPOINT_WINDOW_COUNT, 1); String opName = "CommitAwareOperatorTestCommit"; lp.addOperator(opName, new CommitAwareOperator()); StramLocalCluster lc = new StramLocalCluster(lp); lc.run(5000); /* this is not foolproof but some insurance is better than nothing */ Assert.assertTrue("No Committed Windows", committedWindowIds.contains(opName)); }
Example 9
Source Project: attic-apex-malhar Source File: AbstractJMSInputOperator.java License: Apache License 2.0 | 6 votes |
/** * Implement ActivationListener Interface. * @param ctx */ @Override public void activate(OperatorContext ctx) { try { super.createConnection(); replyProducer = getSession().createProducer(null); consumer = (isDurable() && isTopic()) ? getSession().createDurableSubscriber((Topic)getDestination(), consumerName) : getSession().createConsumer(getDestination()); consumer.setMessageListener(this); } catch (JMSException ex) { throw new RuntimeException(ex); } }
Example 10
Source Project: attic-apex-core Source File: ProcessingModeTests.java License: Apache License 2.0 | 6 votes |
public void testLinearInlineOperatorsRecovery() throws Exception { RecoverableInputOperator.initGenTuples(); CollectorOperator.collection.clear(); CollectorOperator.duplicates.clear(); dag.getAttributes().put(LogicalPlan.CHECKPOINT_WINDOW_COUNT, 2); dag.getAttributes().put(LogicalPlan.STREAMING_WINDOW_SIZE_MILLIS, 300); dag.getAttributes().put(LogicalPlan.CONTAINERS_MAX_COUNT, 1); RecoverableInputOperator rip = dag.addOperator("LongGenerator", RecoverableInputOperator.class); rip.setMaximumTuples(maxTuples); rip.setSimulateFailure(true); CollectorOperator cm = dag.addOperator("LongCollector", CollectorOperator.class); cm.setSimulateFailure(true); dag.getMeta(cm).getAttributes().put(OperatorContext.PROCESSING_MODE, processingMode); dag.addStream("connection", rip.output, cm.input).setLocality(Locality.CONTAINER_LOCAL); StramLocalCluster lc = new StramLocalCluster(dag); lc.run(); }
Example 11
Source Project: attic-apex-malhar Source File: KafkaExactlyOnceOutputOperatorTest.java License: Apache License 2.0 | 6 votes |
@Override public void activate(OperatorContext ctx) { dataGeneratorThread = new Thread("String Generator") { @Override public void run() { try { int i = 0; while (dataGeneratorThread != null && i < maxTuple) { stringBuffer.put((++i) + "###testString " + i); } stringBuffer.put((maxTuple + 1) + "###" + KafkaOperatorTestBase.END_TUPLE); } catch (InterruptedException ie) { // } } }; dataGeneratorThread.start(); }
Example 12
Source Project: attic-apex-core Source File: InputOperatorTest.java License: Apache License 2.0 | 6 votes |
@Override public void activate(OperatorContext ctx) { dataGeneratorThread = new Thread("Integer Emitter") { @Override @SuppressWarnings("SleepWhileInLoop") public void run() { try { int i = 0; while (dataGeneratorThread != null) { (i % 2 == 0 ? evenBuffer : oddBuffer).put(i++); Thread.sleep(20); } } catch (InterruptedException ie) { // break out } } }; dataGeneratorThread.start(); }
Example 13
Source Project: attic-apex-malhar Source File: S3TupleOutputModule.java License: Apache License 2.0 | 5 votes |
@Override public void populateDAG(DAG dag, Configuration conf) { FSRecordCompactionOperator<INPUT> s3compaction = dag.addOperator("S3Compaction", new FSRecordCompactionOperator<INPUT>()); s3compaction.setConverter(getConverter()); s3compaction.setMaxIdleWindows(maxIdleWindows); s3compaction.setMaxLength(maxLength); StatelessThroughputBasedPartitioner<FSRecordCompactionOperator<INPUT>> partitioner = new StatelessThroughputBasedPartitioner<FSRecordCompactionOperator<INPUT>>(); partitioner.setMaximumEvents(maxTuplesPerSecPerPartition); partitioner.setMinimumEvents(minTuplesPerSecPerPartition); partitioner.setCooldownMillis(coolDownMillis); dag.setAttribute(s3compaction, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] {partitioner})); dag.setAttribute(s3compaction, OperatorContext.PARTITIONER, partitioner); S3Reconciler s3Reconciler = dag.addOperator("S3Reconciler", new S3Reconciler()); s3Reconciler.setAccessKey(accessKey); s3Reconciler.setSecretKey(secretAccessKey); s3Reconciler.setBucketName(bucketName); if (region != null) { s3Reconciler.setRegion(region); } s3Reconciler.setDirectoryName(outputDirectoryPath); S3ReconcilerQueuePartitioner<S3Reconciler> reconcilerPartitioner = new S3ReconcilerQueuePartitioner<S3Reconciler>(); reconcilerPartitioner.setCooldownMillis(coolDownMillis); reconcilerPartitioner.setMinPartitions(minS3UploadPartitions); reconcilerPartitioner.setMaxPartitions(maxS3UploadPartitions); reconcilerPartitioner.setMaxQueueSizePerPartition(maxQueueSizeS3Upload); dag.setAttribute(s3Reconciler, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] {reconcilerPartitioner})); dag.setAttribute(s3Reconciler, OperatorContext.PARTITIONER, reconcilerPartitioner); dag.addStream("write-to-s3", s3compaction.output, s3Reconciler.input); input.set(s3compaction.input); output.set(s3Reconciler.outputPort); }
Example 14
Source Project: attic-apex-malhar Source File: SpillableSetMultimapImplTest.java License: Apache License 2.0 | 5 votes |
@Test public void testLoad() { Random random = new Random(); final int keySize = 1000000; final int valueSize = 100000000; final int numOfEntry = 100000; SpillableStateStore store = testMeta.store; SpillableSetMultimapImpl<String, String> multimap = new SpillableSetMultimapImpl<>(testMeta.store, ID1, 0L, createStringSerde(), createStringSerde()); Attribute.AttributeMap.DefaultAttributeMap attributes = new Attribute.AttributeMap.DefaultAttributeMap(); attributes.put(DAG.APPLICATION_PATH, testMeta.applicationPath); OperatorContext context = mockOperatorContext(testMeta.operatorContext.getId(), attributes); store.setup(context); multimap.setup(context); store.beginWindow(1); multimap.beginWindow(1); for (int i = 0; i < numOfEntry; ++i) { multimap.put(String.valueOf(random.nextInt(keySize)), String.valueOf(random.nextInt(valueSize))); } multimap.endWindow(); store.endWindow(); }
Example 15
Source Project: examples Source File: FileWordCount.java License: Apache License 2.0 | 5 votes |
@Override public void setup(OperatorContext context) { if (null == wordMapFile) { wordMapFile = new HashMap<>(); } if (null == wordMapGlobal) { wordMapGlobal = new HashMap<>(); } resultPerFile = new ArrayList(1); resultGlobal = new ArrayList(1); // singleton map {<fileName> => fileFinalList}; cannot populate it yet since we need fileName resultFileFinal = new HashMap<>(1); fileFinalList = new ArrayList<>(); }
Example 16
Source Project: attic-apex-malhar Source File: AbstractStoreInputOperator.java License: Apache License 2.0 | 5 votes |
@Override public void setup(OperatorContext t1) { try { store.connect(); } catch (IOException ex) { throw new RuntimeException(ex); } }
Example 17
Source Project: examples Source File: SequenceGenerator.java License: Apache License 2.0 | 5 votes |
@Override public void setup(OperatorContext context) { super.setup(context); id = context.getId(); sleepTime = context.getValue(OperatorContext.SPIN_MILLIS); LOG.debug("Leaving setup, id = {}, sleepTime = {}, divisor = {}", id, sleepTime, divisor); }
Example 18
Source Project: attic-apex-malhar Source File: HBasePOJOPutOperatorTest.java License: Apache License 2.0 | 5 votes |
protected void setupOperator(HBasePOJOPutOperator operator) { configure(operator); AttributeMap.DefaultAttributeMap attributeMap = new AttributeMap.DefaultAttributeMap(); attributeMap.put(OperatorContext.PROCESSING_MODE, ProcessingMode.AT_LEAST_ONCE); attributeMap.put(OperatorContext.ACTIVATION_WINDOW_ID, -1L); attributeMap.put(DAG.APPLICATION_ID, APP_ID); OperatorContext context = mockOperatorContext(OPERATOR_ID, attributeMap); operator.setup(context); }
Example 19
Source Project: attic-apex-malhar Source File: KafkaInputBenchmark.java License: Apache License 2.0 | 5 votes |
@Override public void populateDAG(DAG dag, Configuration conf) { dag.setAttribute(DAG.APPLICATION_NAME, "KafkaInputOperatorPartitionDemo"); BenchmarkKafkaInputOperator bpkio = new BenchmarkKafkaInputOperator(); String type = conf.get("kafka.consumertype", "simple"); KafkaConsumer consumer = null; if (type.equals("highlevel")) { // Create template high-level consumer Properties props = new Properties(); props.put("group.id", "main_group"); props.put("auto.offset.reset", "smallest"); consumer = new HighlevelKafkaConsumer(props); } else { // topic is set via property file consumer = new SimpleKafkaConsumer(null, 10000, 100000, "test_kafka_autop_client", null); } bpkio.setZookeeper(conf.get("dt.kafka.zookeeper")); bpkio.setInitialPartitionCount(1); //bpkio.setTuplesBlast(1024 * 1024); bpkio.setConsumer(consumer); bpkio = dag.addOperator("KafkaBenchmarkConsumer", bpkio); CollectorModule cm = dag.addOperator("DataBlackhole", CollectorModule.class); dag.addStream("end", bpkio.oport, cm.inputPort).setLocality(Locality.CONTAINER_LOCAL); dag.setInputPortAttribute(cm.inputPort, PortContext.PARTITION_PARALLEL, true); dag.setAttribute(bpkio, OperatorContext.COUNTERS_AGGREGATOR, new KafkaConsumer.KafkaMeterStatsAggregator()); // dag.setAttribute(bpkio, OperatorContext.STATS_LISTENER, KafkaMeterStatsListener.class); }
Example 20
Source Project: attic-apex-malhar Source File: AbstractJdbcInputOperator.java License: Apache License 2.0 | 5 votes |
@Override public void setup(OperatorContext context) { super.setup(context); try { queryStatement = store.getConnection().createStatement(); } catch (SQLException e) { throw new RuntimeException("creating query", e); } }
Example 21
Source Project: attic-apex-malhar Source File: AbstractExactlyOnceKafkaOutputOperator.java License: Apache License 2.0 | 5 votes |
@Override public void setup(OperatorContext context) { super.setup(context); try { String className = (String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_PARTITIONER); if (className != null) { partitioner = (Partitioner)Class.forName(className).newInstance(); } } catch (Exception e) { throw new RuntimeException("Failed to initialize partitioner", e); } //read last message from kafka initializeLastProcessingOffset(); }
Example 22
Source Project: attic-apex-malhar Source File: ApacheLogInputGenerator.java License: Apache License 2.0 | 5 votes |
@Override public void activate(OperatorContext context) { thread = new Thread(new Runnable() { @Override public void run() { int i = 0; while (true) { if (i++ > numberOfTuples) { return; } StringBuilder builder = new StringBuilder(); builder.append(ipAddress.get(random.nextInt(ipAddressCount))); // url builder.append(" - - "); builder.append("[").append(sdf.format(new Date())).append("] "); // timestamp int urlIndex = random.nextInt(urlCount); builder.append(url.get(urlIndex)).append(" "); // url builder.append(status.get(urlIndex)).append(" "); // status builder.append(bytes.get(urlIndex)).append(" "); // bytes builder.append(referers.get(random.nextInt(refererCount))).append(" "); // referer builder.append(agents.get(random.nextInt(agentsCount))).append(" "); // agent //LOG.debug("Adding {}", builder.toString()); holdingBuffer.add(builder.toString()); if (maxDelay > 0) { try { Thread.sleep(random.nextInt(maxDelay)); } catch (InterruptedException e) { return; } } } } }); thread.start(); }
Example 23
Source Project: examples Source File: InputItemGenerator.java License: Apache License 2.0 | 5 votes |
@Override public void setup(OperatorContext context) { AggregatorRegistry.DEFAULT_AGGREGATOR_REGISTRY.setup(); schema = new DimensionalConfigurationSchema(eventSchemaJSON, AggregatorRegistry.DEFAULT_AGGREGATOR_REGISTRY); publisherID = schema.getKeysToEnumValuesList().get(PUBLISHER).size(); if (advertiserName == null) { advertiserID = schema.getKeysToEnumValuesList().get(ADVERTISER).size(); } else { advertiserID = advertiserName.size(); } locationID = schema.getKeysToEnumValuesList().get(LOCATION).size(); publisherName = schema.getKeysToEnumValuesList().get(PUBLISHER); if (advertiserName == null) { advertiserName = schema.getKeysToEnumValuesList().get(ADVERTISER); } locationName = schema.getKeysToEnumValuesList().get(LOCATION); publisherScaleArray = new double[publisherID]; initializeScaleArray(publisherScaleArray); advertiserScaleArray = new double[advertiserID]; initializeScaleArray(advertiserScaleArray); locationScaleArray = new double[locationID]; initializeScaleArray(locationScaleArray); publisherOffsetArray = new double[publisherID]; advertiserOffsetArray = new double[advertiserID]; locationOffsetArray = new double[locationID]; }
Example 24
Source Project: attic-apex-malhar Source File: TailFsInputOperator.java License: Apache License 2.0 | 5 votes |
@Override public void activate(OperatorContext ctx) { try { file = new File(filePath); reader = new RandomAccessFile(file, "r"); position = end ? file.length() : position; reader.seek(position); accessTime = System.currentTimeMillis(); } catch (Exception e) { throw new RuntimeException(e); } }
Example 25
Source Project: attic-apex-malhar Source File: WebSocketOutputOperator.java License: Apache License 2.0 | 5 votes |
@Override public void setup(OperatorContext context) { try { openConnection(); } catch (Exception ex) { LOG.warn("Cannot establish connection:", ex); } }
Example 26
Source Project: attic-apex-malhar Source File: RegexMatchMapOperator.java License: Apache License 2.0 | 5 votes |
@Override public void setup(OperatorContext context) { super.setup(context); if (this.regex != null) { pattern = Pattern.compile(this.regex); } }
Example 27
Source Project: attic-apex-malhar Source File: SeedEventClassifier.java License: Apache License 2.0 | 5 votes |
@Override public void setup(OperatorContext context) { if (s_start > s_end) { int temp = s_end; s_end = s_start; s_start = temp; } seed = s_start; }
Example 28
Source Project: attic-apex-malhar Source File: HBaseCsvMappingPutOperator.java License: Apache License 2.0 | 5 votes |
@Override public void setup(OperatorContext context) { super.setup(context); parseMapping(); lineListReader = new CsvListReader(lineSr, CsvPreference.STANDARD_PREFERENCE); }
Example 29
Source Project: attic-apex-malhar Source File: JMSObjectInputOperatorTest.java License: Apache License 2.0 | 5 votes |
@Override protected void starting(Description description) { testBase = new JMSTestBase(); try { testBase.beforTest(); } catch (Exception e) { throw new RuntimeException(e); } String methodName = description.getMethodName(); String className = description.getClassName(); baseDir = "target/" + className + "/" + methodName; Attribute.AttributeMap attributeMap = new Attribute.AttributeMap.DefaultAttributeMap(); attributeMap.put(Context.OperatorContext.SPIN_MILLIS, 500); attributeMap.put(Context.DAGContext.APPLICATION_PATH, baseDir); context = mockOperatorContext(1, attributeMap); operator = new JMSObjectInputOperator(); operator.setSubject("TEST.FOO"); operator.getConnectionFactoryProperties().put(JMSTestBase.AMQ_BROKER_URL, "vm://localhost"); sink = new CollectorTestSink<Object>(); operator.output.setSink(sink); operator.setup(context); operator.activate(context); }
Example 30
Source Project: attic-apex-malhar Source File: AbstractFlumeInputOperator.java License: Apache License 2.0 | 5 votes |
@Override public void connected() { super.connected(); byte[] address; synchronized (recoveryAddresses) { if (recoveryAddresses.size() > 0) { address = recoveryAddresses.get(recoveryAddresses.size() - 1).address; } else { address = new byte[8]; } } int len = 1 /* for the message type SEEK */ + 8 /* for the address */ + 8 /* for storing the current time stamp*/; byte[] array = new byte[len]; array[0] = Server.Command.SEEK.getOrdinal(); System.arraycopy(address, 0, array, 1, 8); Server.writeLong(array, 9, System.currentTimeMillis()); write(array); connected = true; ConnectionStatus connectionStatus = new ConnectionStatus(); connectionStatus.connected = true; connectionStatus.spec = connectionSpecs[0]; OperatorContext ctx = context; synchronized (ctx) { logger.debug("{} Submitting ConnectionStatus = {}", AbstractFlumeInputOperator.this, connectionStatus); context.setCounters(connectionStatus); } }