Java Code Examples for backtype.storm.task.TopologyContext.getThisTaskIndex()

The following are Jave code examples for showing how to use getThisTaskIndex() of the backtype.storm.task.TopologyContext class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: storm-scheduler   File: UuidSpout.java   Source Code and License Vote up 5 votes
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    MessageDigest md;
    int counter;

    this.thisTaskIndex = context.getThisTaskIndex();
    this.numSpouts = context.getComponentTasks(context.getThisComponentId()).size();
    counter = 0;

    try {
        md = MessageDigest.getInstance("MD5");
    } catch (NoSuchAlgorithmException e) {
        throw new RuntimeException("Couldn't find MD5 algorithm.", e);
    }

    // we want to create a message that hashes to exacly one of the following spouts. As there are the same number
    // of bolts on each level as there are spouts, we just keep looking until we find a uuid whose hash code would
    // be assigned to the id of this spout (if it were a bolt).
    do {
        if (++counter > 1000 * 1000) {
            throw new RuntimeException("Unable to generate required UUID in 1 mio tries.");
        }
        byte[] bytes = md.digest(UUID.randomUUID().toString().getBytes());
        this.uuid = new String(bytes);
    } while (this.uuid.hashCode() % this.numSpouts != this.thisTaskIndex);

    this.collector = collector;

    if (!this.disableAniello) {
        // this will create/configure the worker monitor once per worker
        WorkerMonitor.getInstance().setContextInfo(context);

        // this object is used in the emit/execute method to compute the number of inter-node messages
        this.taskMonitor = new TaskMonitor(context.getThisTaskId());
    }
}
 
Example 2
Project: es-hadoop-v2.2.0   File: EsSpout.java   Source Code and License Vote up 5 votes
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    this.collector = collector;

    LinkedHashMap copy = new LinkedHashMap(conf);
    copy.putAll(spoutConfig);

    StormSettings settings = new StormSettings(copy);

    InitializationUtils.setValueReaderIfNotSet(settings, JdkValueReader.class, log);

    ackReads = settings.getStormSpoutReliable();

    if (ackReads) {
        inTransitQueue = new LinkedHashMap<Object, Object>();
        replayQueue = new LinkedList<Object[]>();
        retries = new HashMap<Object, Integer>();
        queueSize = settings.getStormSpoutReliableQueueSize();
        tupleRetries = settings.getStormSpoutReliableRetriesPerTuple();
        tupleFailure = settings.getStormSpoutReliableTupleFailureHandling();
    }

    int totalTasks = context.getComponentTasks(context.getThisComponentId()).size();
    int currentTask = context.getThisTaskIndex();

    // match the partitions based on the current topology
    List<PartitionDefinition> partitions = RestService.findPartitions(settings, log);
    List<PartitionDefinition> assigned = RestService.assignPartitions(partitions, currentTask, totalTasks);
    iterator = RestService.multiReader(settings, assigned, log);
}