org.apache.storm.utils.Utils Java Examples

The following examples show how to use org.apache.storm.utils.Utils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: NiFiSpout.java    From localization_nifi with Apache License 2.0 6 votes vote down vote up
@Override
public void nextTuple() {
    NiFiDataPacket data = queue.poll();
    if (data == null) {
        Utils.sleep(50);
    } else {
        // always start with the data packet
        Values values = new Values(data);

        // add additional values based on the specified attribute names
        for (String attributeName : attributeNames) {
            if (data.getAttributes().containsKey(attributeName)) {
                values.add(data.getAttributes().get(attributeName));
            }
        }

        spoutOutputCollector.emit(values);
    }
}
 
Example #2
Source File: StormAtlasHook.java    From incubator-atlas with Apache License 2.0 6 votes vote down vote up
private void addTopologyInputs(Referenceable topologyReferenceable,
                               Map<String, SpoutSpec> spouts,
                               Map stormConf,
                               String topologyOwner, List<Referenceable> dependentEntities) throws IllegalAccessException {
    final ArrayList<Referenceable> inputDataSets = new ArrayList<>();
    for (Map.Entry<String, SpoutSpec> entry : spouts.entrySet()) {
        Serializable instance = Utils.javaDeserialize(
                entry.getValue().get_spout_object().get_serialized_java(), Serializable.class);

        String simpleName = instance.getClass().getSimpleName();
        final Referenceable datasetRef = createDataSet(simpleName, topologyOwner, instance, stormConf, dependentEntities);
        if (datasetRef != null) {
            inputDataSets.add(datasetRef);
        }
    }

    topologyReferenceable.set("inputs", inputDataSets);
}
 
Example #3
Source File: QuerySpout.java    From bullet-storm with Apache License 2.0 6 votes vote down vote up
@Override
public void nextTuple() {
    PubSubMessage message = null;
    try {
        message = subscriber.receive();
    } catch (Exception e) {
        log.error(e.getMessage());
    }
    if (message == null) {
        Utils.sleep(1);
        return;
    }
    String content = message.getContent();
    // If no content, it's a metadata only message. Send it on the METADATA_STREAM.
    if (content == null) {
        collector.emit(METADATA_STREAM, new Values(message.getId(), message.getMetadata()), message.getId());
    } else {
        collector.emit(QUERY_STREAM, new Values(message.getId(), message.getContent(), message.getMetadata()), message.getId());
    }
}
 
Example #4
Source File: EventCorrelatingOutputCollectorTest.java    From streamline with Apache License 2.0 6 votes vote down vote up
@Test
public void testFail() throws Exception {
    setupExpectationsForTuple();
    setupExpectationsForTopologyContextNoEmit();

    EventCorrelatingOutputCollector sut = getSystemUnderTest();

    Tuple anchor = new TupleImpl(mockedTopologyContext, new Values(PARENT_STREAMLINE_EVENT), TASK_0,
            Utils.DEFAULT_STREAM_ID);

    sut.fail(anchor);

    new Verifications() {{
        mockedOutputCollector.fail(anchor); times = 1;
    }};
}
 
Example #5
Source File: WARCHdfsBolt.java    From storm-crawler with Apache License 2.0 6 votes vote down vote up
@Override
protected AbstractHDFSWriter makeNewWriter(Path path, Tuple tuple)
        throws IOException {
    AbstractHDFSWriter writer = super.makeNewWriter(path, tuple);

    Instant now = Instant.now();

    // overrides the filename and creation date in the headers
    header_fields.put("WARC-Date", WARCRecordFormat.WARC_DF.format(now));
    header_fields.put("WARC-Filename", path.getName());

    byte[] header = WARCRecordFormat.generateWARCInfo(header_fields);

    // write the header at the beginning of the file
    if (header != null && header.length > 0) {
        super.out.write(Utils.gzip(header));
    }

    return writer;
}
 
Example #6
Source File: JSONScheme.java    From nightwatch with GNU Lesser General Public License v3.0 6 votes vote down vote up
@Override
public List<Object> deserialize(ByteBuffer ser) {
    String jsonStr = null;
    if (ser.hasArray()) {
        int base = ser.arrayOffset();
        jsonStr = new String(ser.array(), base + ser.position(), ser.remaining());
    } else {
        jsonStr = new String(Utils.toByteArray(ser), UTF8_CHARSET);
    }
    JSONObject jsonObject = JSONObject.fromObject(jsonStr);
    Values values = new Values();
    for (String outputField : outputFields) {
        if("jsonBody".equals(outputField)) {
            values.add(jsonStr);
        } else {
            if(!jsonObject.containsKey(outputField)) {
                JSONObject rcMsgpara = JSONObject.fromObject(jsonObject.get("rc_msg_para"));
                values.add(rcMsgpara.get(outputField));
            } else {
                values.add(jsonObject.get(outputField));
            }
        }
    }
    return values;
}
 
Example #7
Source File: MultiSpoutExclamationTopology.java    From incubator-heron with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("word0", new TestWordSpout(), 2);
  builder.setSpout("word1", new TestWordSpout(), 2);
  builder.setSpout("word2", new TestWordSpout(), 2);
  builder.setBolt("exclaim1", new ExclamationBolt(), 2)
      .shuffleGrouping("word0")
      .shuffleGrouping("word1")
      .shuffleGrouping("word2");

  Config conf = new Config();
  conf.setDebug(true);
  conf.setMaxSpoutPending(10);
  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
  if (args != null && args.length > 0) {
    conf.setNumWorkers(3);
    StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
  } else {
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.killTopology("test");
    cluster.shutdown();
  }
}
 
Example #8
Source File: PerformanceTestTopology.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public static void SetRemoteTopology()
        throws Exception {
    String streamName = (String) conf.get(Config.TOPOLOGY_NAME);
    if (streamName == null) {
        String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\.");
        streamName = className[className.length - 1];
    }
    
    TopologyBuilder builder = new TopologyBuilder();
    
    int spout_Parallelism_hint = Utils.getInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1);
    int bolt_Parallelism_hint = Utils.getInt(conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2);
    builder.setSpout("spout", new TestSpout(), spout_Parallelism_hint);
    
    BoltDeclarer boltDeclarer = builder.setBolt("bolt", new TestBolt(), bolt_Parallelism_hint);
    // localFirstGrouping is only for jstorm
    // boltDeclarer.localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME);
    boltDeclarer.shuffleGrouping("spout");
    // .addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 60);
    
    StormSubmitter.submitTopology(streamName, conf, builder.createTopology());
    
}
 
Example #9
Source File: EventCorrelatingOutputCollectorTest.java    From streamline with Apache License 2.0 6 votes vote down vote up
@Test
public void testAck() throws Exception {
    setupExpectationsForTuple();
    setupExpectationsForTopologyContextNoEmit();

    EventCorrelatingOutputCollector sut = getSystemUnderTest();

    Tuple anchor = new TupleImpl(mockedTopologyContext, new Values(PARENT_STREAMLINE_EVENT), TASK_0,
            Utils.DEFAULT_STREAM_ID);

    sut.ack(anchor);

    new Verifications() {{
        mockedOutputCollector.ack(anchor); times = 1;
    }};
}
 
Example #10
Source File: NiFiSpout.java    From nifi with Apache License 2.0 6 votes vote down vote up
@Override
public void nextTuple() {
    NiFiDataPacket data = queue.poll();
    if (data == null) {
        Utils.sleep(50);
    } else {
        // always start with the data packet
        Values values = new Values(data);

        // add additional values based on the specified attribute names
        for (String attributeName : attributeNames) {
            if (data.getAttributes().containsKey(attributeName)) {
                values.add(data.getAttributes().get(attributeName));
            }
        }

        spoutOutputCollector.emit(values);
    }
}
 
Example #11
Source File: RandomSentenceSpout.java    From hadoop-mini-clusters with Apache License 2.0 5 votes vote down vote up
@Override
public void nextTuple() {
    Utils.sleep(100);
    String[] sentences = new String[]{ "the cow jumped over the moon", "an apple a day keeps the doctor away",
            "four score and seven years ago", "snow white and the seven dwarfs", "i am at two with nature" };
    String sentence = sentences[_rand.nextInt(sentences.length)];
    _collector.emit(new Values(sentence));
}
 
Example #12
Source File: RandomSentenceSpout.java    From elasticsearch-hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public void nextTuple() {
    Utils.sleep(100);
    String[] sentences = new String[] { "the cow jumped over the moon", "an apple a day keeps the doctor away",
            "four score and seven years ago", "snow white and the seven dwarfs", "i am at two with nature" };
    String sentence = sentences[_rand.nextInt(sentences.length)];
    _collector.emit(new Values(sentence));
}
 
Example #13
Source File: StormAtlasHook.java    From atlas with Apache License 2.0 5 votes vote down vote up
private AtlasEntity createBoltInstance(String boltName, Bolt stormBolt) {
    AtlasEntity         bolt          = new AtlasEntity(StormDataTypes.STORM_BOLT.getName());
    Serializable        instance      = Utils.javaDeserialize(stormBolt.get_bolt_object().get_serialized_java(), Serializable.class);
    Map<String, String> flatConfigMap = StormTopologyUtil.getFieldValues(instance, true, null);

    bolt.setAttribute(AtlasClient.NAME, boltName);
    bolt.setAttribute("driverClass", instance.getClass().getName());
    bolt.setAttribute("conf", flatConfigMap);

    return bolt;
}
 
Example #14
Source File: GroupByKeyAndWindowExample.java    From storm-net-adapter with Apache License 2.0 5 votes vote down vote up
@Override
public void nextTuple() {
    Utils.sleep(5000);
    for (Values v : values.get(index)) {
        collector.emit(v);
    }
    index = (index + 1) % values.size();
}
 
Example #15
Source File: CheckpointSpout.java    From twister2 with Apache License 2.0 5 votes vote down vote up
@Override
public void nextTuple() {
  if (shouldRecover()) {
    handleRecovery();
    startProgress();
  } else if (shouldCheckpoint()) {
    doCheckpoint();
    startProgress();
  } else {
    Utils.sleep(sleepInterval);
  }
}
 
Example #16
Source File: NiFiStormTopology.java    From localization_nifi with Apache License 2.0 5 votes vote down vote up
public static void main( String[] args ) {
    // Build a Site-To-Site client config for pulling data
    final SiteToSiteClientConfig inputConfig = new SiteToSiteClient.Builder()
            .url("http://localhost:8080/nifi")
            .portName("Data for Storm")
            .buildConfig();

    // Build a Site-To-Site client config for pushing data
    final SiteToSiteClientConfig outputConfig = new SiteToSiteClient.Builder()
            .url("http://localhost:8080/nifi")
            .portName("Data from Storm")
            .buildConfig();

    final int tickFrequencySeconds = 5;
    final NiFiDataPacketBuilder niFiDataPacketBuilder = new SimpleNiFiDataPacketBuilder();
    final NiFiBolt niFiBolt = new NiFiBolt(outputConfig, niFiDataPacketBuilder, tickFrequencySeconds)
            //.withBatchSize(1)
            ;

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("nifiInput", new NiFiSpout(inputConfig));
    builder.setBolt("nifiOutput", niFiBolt).shuffleGrouping("nifiInput");

    // Submit the topology running in local mode
    Config conf = new Config();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());

    Utils.sleep(90000);
    cluster.shutdown();
}
 
Example #17
Source File: BatchTimeoutHelper.java    From metron with Apache License 2.0 5 votes vote down vote up
private Map readStormConfigWithoutCLI() {
  Map ret = Utils.readDefaultConfig();
  String confFile = System.getProperty("storm.conf.file");
  Map storm;
  if (confFile == null || confFile.equals("")) {
    storm = Utils.findAndReadConfigFile("storm.yaml", false);
  } else {
    storm = Utils.findAndReadConfigFile(confFile, true);
  }
  ret.putAll(storm);
  return ret;
}
 
Example #18
Source File: SidelineSpoutTest.java    From storm-dynamic-spout with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
/**
 * Provides various StreamIds to test emitting out of.
 */
public static Object[][] provideStreamIds() {
    return new Object[][]{
        // No explicitly defined streamId should use the default streamId.
        { null, Utils.DEFAULT_STREAM_ID },

        // Explicitly defined streamId should get used as is.
        { "SpecialStreamId", "SpecialStreamId" }
    };
}
 
Example #19
Source File: RandomNumberSpout.java    From tutorials with MIT License 5 votes vote down vote up
@Override
public void nextTuple() {
    Utils.sleep(1000);
    //This will select random int from the range (0, 100)
    int operation = random.nextInt(101);
    long timestamp = System.currentTimeMillis();

    Values values = new Values(operation, timestamp);
    collector.emit(values);
}
 
Example #20
Source File: KafkaConsumerSpoutTest.java    From storm-dynamic-spout with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
/**
 * Provides various StreamIds to test emitting out of.
 */
public static Object[][] provideStreamIds() {
    return new Object[][]{
        // No explicitly defined streamId should use the default streamId.
        { null, Utils.DEFAULT_STREAM_ID },

        // Explicitly defined streamId should get used as is.
        { "SpecialStreamId", "SpecialStreamId" }
    };
}
 
Example #21
Source File: KafkaProducerSpout.java    From storm_spring_boot_demo with MIT License 5 votes vote down vote up
@Override
public void nextTuple() {
    // 睡眠一段时间后再产生一个数据
    Utils.sleep(100);
    // 随机选择一个句子
    int index = random.nextInt(sentences.length);
    int count = arr.decrementAndGet(index);
    if (count >= 0) {
        String sentence = sentences[index];
        // 将句子写入kafka
        kafkaTemplate.send(topic, sentence);
    }
}
 
Example #22
Source File: AbstractQueryingSpout.java    From storm-crawler with Apache License 2.0 5 votes vote down vote up
@Override
public void nextTuple() {
    if (!active)
        return;

    // force the refresh of the buffer even if the buffer is not empty
    if (!isInQuery.get() && triggerQueries()) {
        populateBuffer();
        timeLastQuerySent = System.currentTimeMillis();
    }

    if (buffer.hasNext()) {
        // track how long the buffer had been empty for
        if (timestampEmptyBuffer != -1) {
            eventCounter.scope("empty.buffer").incrBy(
                    System.currentTimeMillis() - timestampEmptyBuffer);
            timestampEmptyBuffer = -1;
        }
        List<Object> fields = buffer.next();
        String url = fields.get(0).toString();
        this._collector.emit(fields, url);
        beingProcessed.put(url, null);
        eventCounter.scope("emitted").incrBy(1);
        return;
    } else if (timestampEmptyBuffer == -1) {
        timestampEmptyBuffer = System.currentTimeMillis();
    }

    if (isInQuery.get() || throttleQueries() > 0) {
        // sleep for a bit but not too much in order to give ack/fail a
        // chance
        Utils.sleep(10);
        return;
    }

    // re-populate the buffer
    populateBuffer();

    timeLastQuerySent = System.currentTimeMillis();
}
 
Example #23
Source File: PerformanceTestTopology.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    if (args.length == 0) {
        System.err.println("Please input configuration file");
        System.exit(-1);
    }
    
    conf = com.alibaba.jstorm.utils.Utils.LoadConf(args[0]);
    SetRemoteTopology();
}
 
Example #24
Source File: InOrderDeliveryTest.java    From storm-net-adapter with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {

        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout("spout", new InOrderSpout(), 8);
        builder.setBolt("count", new Check(), 8).fieldsGrouping("spout", new Fields("c1"));

        Config conf = new Config();
        conf.registerMetricsConsumer(org.apache.storm.metric.LoggingMetricsConsumer.class);

        String name = "in-order-test";
        if (args != null && args.length > 0) {
            name = args[0];
        }

        conf.setNumWorkers(1);
        StormSubmitter.submitTopologyWithProgressBar(name, conf, builder.createTopology());

        Map<String, Object> clusterConf = Utils.readStormConfig();
        clusterConf.putAll(Utils.readCommandLineOpts());
        Nimbus.Iface client = NimbusClient.getConfiguredClient(clusterConf).getClient();

        //Sleep for 50 mins
        for (int i = 0; i < 50; i++) {
            Thread.sleep(30 * 1000);
            printMetrics(client, name);
        }
        kill(client, name);
    }
 
Example #25
Source File: PulsarSpout.java    From pulsar with Apache License 2.0 5 votes vote down vote up
private boolean emitFailedMessage() {
    Message<byte[]> msg;

    while ((msg = failedMessages.peek()) != null) {
        MessageRetries messageRetries = pendingMessageRetries.get(msg.getMessageId());
        if (messageRetries != null) {
            // emit the tuple if retry doesn't need backoff else sleep with backoff time and return without doing
            // anything
            if (Backoff.shouldBackoff(messageRetries.getTimeStamp(), TimeUnit.NANOSECONDS,
                    messageRetries.getNumRetries(), clientConf.getInitialBackoffIntervalNanos(),
                    clientConf.getMaxBackoffIntervalNanos())) {
                Utils.sleep(TimeUnit.NANOSECONDS.toMillis(clientConf.getInitialBackoffIntervalNanos()));
            } else {
                // remove the message from the queue and emit to the topology, only if it should not be backedoff
                LOG.info("[{}] Retrying failed message {}", spoutId, msg.getMessageId());
                failedMessages.remove();
                mapToValueAndEmit(msg);
            }
            return true;
        }

        // messageRetries is null because messageRetries is already acked and removed from pendingMessageRetries
        // then remove it from failed message queue as well.
        if(LOG.isDebugEnabled()) {
            LOG.debug("[{}]-{} removing {} from failedMessage because it's already acked",
                    pulsarSpoutConf.getTopic(), spoutId, msg.getMessageId());
        }
        failedMessages.remove();
        // try to find out next failed message
        continue;
    }
    return false;
}
 
Example #26
Source File: FetcherBolt.java    From storm-crawler with Apache License 2.0 5 votes vote down vote up
public FetchItemQueues(Config conf) {
    this.conf = conf;
    this.defaultMaxThread = ConfUtils.getInt(conf,
            "fetcher.threads.per.queue", 1);
    queueMode = ConfUtils.getString(conf, "fetcher.queue.mode",
            QUEUE_MODE_HOST);
    // check that the mode is known
    if (!queueMode.equals(QUEUE_MODE_IP)
            && !queueMode.equals(QUEUE_MODE_DOMAIN)
            && !queueMode.equals(QUEUE_MODE_HOST)) {
        LOG.error("Unknown partition mode : {} - forcing to byHost",
                queueMode);
        queueMode = QUEUE_MODE_HOST;
    }
    LOG.info("Using queue mode : {}", queueMode);

    this.crawlDelay = (long) (ConfUtils.getFloat(conf,
            "fetcher.server.delay", 1.0f) * 1000);
    this.minCrawlDelay = (long) (ConfUtils.getFloat(conf,
            "fetcher.server.min.delay", 0.0f) * 1000);
    this.maxQueueSize = ConfUtils.getInt(conf,
            "fetcher.max.queue.size", -1);
    if (this.maxQueueSize == -1) {
        this.maxQueueSize = Integer.MAX_VALUE;
    }

    // order is not guaranteed
    for (Entry<String, Object> e : conf.entrySet()) {
        String key = e.getKey();
        if (!key.startsWith("fetcher.maxThreads."))
            continue;
        Pattern patt = Pattern.compile(key
                .substring("fetcher.maxThreads.".length()));
        customMaxThreads.put(patt, Utils.getInt(e.getValue()));
    }

}
 
Example #27
Source File: ExclamationTopology.java    From incubator-heron with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  TopologyBuilder builder = new TopologyBuilder();
  int parallelism = 2;

  int spouts = parallelism;
  builder.setSpout("word", new TestWordSpout(Duration.ofMillis(50)), spouts);
  int bolts = 2 * parallelism;
  builder.setBolt("exclaim1", new ExclamationBolt(), bolts)
      .shuffleGrouping("word");

  Config conf = new Config();
  conf.setDebug(true);
  conf.setMaxSpoutPending(10);
  conf.setMessageTimeoutSecs(600);
  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");

  if (args != null && args.length > 0) {
    conf.setNumWorkers(parallelism);
    StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
  } else {
    System.out.println("Topology name not provided as an argument, running in simulator mode.");
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.killTopology("test");
    cluster.shutdown();
  }
}
 
Example #28
Source File: ParserTopologyCLI.java    From metron with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {

    try {
      Options options = new Options();
      final CommandLine cmd = parse(options, args);
      if (cmd.hasOption("h")) {
        final HelpFormatter usageFormatter = new HelpFormatter();
        usageFormatter.printHelp("ParserTopologyCLI", null, options, null, true);
        System.exit(0);
      }
      ParserTopologyCLI cli = new ParserTopologyCLI();
      ParserTopologyBuilder.ParserTopology topology = cli.createParserTopology(cmd);
      String sensorTypes = ParserOptions.SENSOR_TYPES.get(cmd);
      String topologyName = sensorTypes.replaceAll(TOPOLOGY_OPTION_SEPARATOR, STORM_JOB_SEPARATOR);
      if (ParserOptions.TEST.has(cmd)) {
        topology.getTopologyConfig().put(Config.TOPOLOGY_DEBUG, true);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topologyName, topology.getTopologyConfig(), topology.getBuilder().createTopology());
        Utils.sleep(300000);
        cluster.shutdown();
      } else {
        StormSubmitter.submitTopology(topologyName, topology.getTopologyConfig(), topology.getBuilder().createTopology());
      }
    } catch (Exception e) {
      e.printStackTrace();
      System.exit(-1);
    }
  }
 
Example #29
Source File: TestMetadataSerialization.java    From storm-crawler with Apache License 2.0 5 votes vote down vote up
@Test
public void testSerialization() throws IOException {
    Map conf = Utils.readDefaultConfig();
    Config.registerSerialization(conf, Metadata.class);

    KryoValuesSerializer kvs = new KryoValuesSerializer(conf);
    Metadata md = new Metadata();
    md.addValue("this_key", "has a value");
    // defensive lock
    md.lock();

    boolean exception = false;
    try {
        md.addValue("this_should", "fail");
    } catch (Exception e) {
        exception = true;
    }

    assertTrue(exception);

    byte[] content = kvs.serializeObject(md);

    KryoValuesDeserializer kvd = new KryoValuesDeserializer(conf);
    Metadata md2 = (Metadata) kvd.deserializeObject(content);

    // compare md1 and md2
    assertEquals(md.toString(), md2.toString());

}
 
Example #30
Source File: TestNameSpout.java    From incubator-heron with Apache License 2.0 5 votes vote down vote up
public void nextTuple() {
  Utils.sleep(100);
  final String[] words = new String[] {"marge", "homer", "bart", "simpson", "lisa"};
  final Random rand = new Random();
  final String word = words[rand.nextInt(words.length)];
  collector.emit(new Values(word));
}