Java Code Examples for backtype.storm.utils.Utils#get()

The following examples show how to use backtype.storm.utils.Utils#get() . These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: storm-benchmark   File: Grep.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public StormTopology getTopology(Config config) {

  final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
  final int matBoltNum = BenchmarkUtils.getInt(config, FM_NUM, DEFAULT_MAT_BOLT_NUM);
  final int cntBoltNum = BenchmarkUtils.getInt(config, CM_NUM, DEFAULT_CNT_BOLT_NUM);
  final String ptnString = (String) Utils.get(config, PATTERN_STRING, DEFAULT_PATTERN_STR);

  spout = new KafkaSpout(KafkaUtils.getSpoutConfig(config, new SchemeAsMultiScheme(new StringScheme())));

  TopologyBuilder builder = new TopologyBuilder();
  builder.setSpout(SPOUT_ID, spout, spoutNum);
  builder.setBolt(FM_ID, new FindMatchingSentence(ptnString), matBoltNum)
          .localOrShuffleGrouping(SPOUT_ID);
  builder.setBolt(CM_ID, new CountMatchingSentence(), cntBoltNum)
          .fieldsGrouping(FM_ID, new Fields(FindMatchingSentence.FIELDS));

  return builder.createTopology();
}
 
Example 2
public MetricsCollectorConfig(Config stormConfig) {
  this.stormConfig  = stormConfig;
  name = (String) Utils.get(
          stormConfig, Config.TOPOLOGY_NAME, StormBenchmark.DEFAULT_TOPOLOGY_NAME);
  pollInterval = BenchmarkUtils.getInt(
          stormConfig, METRICS_POLL_INTERVAL, DEFAULT_POLL_INTERVAL);
  totalTime = BenchmarkUtils.getInt(
          stormConfig, METRICS_TOTAL_TIME, DEFAULT_TOTAL_TIME);
  path = (String) Utils.get(stormConfig, METRICS_PATH, DEFAULT_PATH);
}
 
Example 3
Source Project: storm-benchmark   File: KafkaProducer.java    License: Apache License 2.0 5 votes vote down vote up
private Map getKafkaConfig(Map options) {
  Map kafkaConfig = new HashMap();
  Map brokerConfig = new HashMap();
  String brokers = (String) Utils.get(options, BROKER_LIST, "localhost:9092");
  String topic = (String) Utils.get(options, TOPIC, KafkaUtils.DEFAULT_TOPIC);
  brokerConfig.put("metadata.broker.list", brokers);
  brokerConfig.put("serializer.class", "kafka.serializer.StringEncoder");
  brokerConfig.put("key.serializer.class", "kafka.serializer.StringEncoder");
  brokerConfig.put("request.required.acks", "1");
  kafkaConfig.put(KafkaBolt.KAFKA_BROKER_PROPERTIES, brokerConfig);
  kafkaConfig.put(KafkaBolt.TOPIC, topic);
  return kafkaConfig;
}
 
Example 4
Source Project: storm-benchmark   File: KafkaUtils.java    License: Apache License 2.0 5 votes vote down vote up
public static SpoutConfig getSpoutConfig(Map options, MultiScheme scheme) throws IllegalArgumentException {
  String zkServers = (String) Utils.get(options, ZOOKEEPER_SERVERS, "localhost:2181");
  String kafkaRoot = (String) Utils.get(options, KAFKA_ROOT_PATH, "/kafka");
  String connectString = zkServers + kafkaRoot;

  BrokerHosts hosts = new ZkHosts(connectString);
  String topic = (String) Utils.get(options, TOPIC, DEFAULT_TOPIC);
  String zkRoot = kafkaRoot + "/" + "storm-consumer-states";
  String appId = (String) Utils.get(options, CLIENT_ID, "storm-app");

  SpoutConfig config = new SpoutConfig(hosts, topic, zkRoot, appId);
  config.zkServers = new ArrayList<String>();

  String [] servers = zkServers.split(",");

  for (int i = 0; i < servers.length; i++) {
    String[] serverAndPort = servers[0].split(":");
    config.zkServers.add(serverAndPort[0]);
    int port = Integer.parseInt(serverAndPort[1]);
    if (i == 0) {
      config.zkPort = port;
    }

    if (config.zkPort != port) {
      throw new IllegalArgumentException("The zookeeper port on all  server must be same");
    }
  }
  config.scheme = scheme;
  return config;
}
 
Example 5
Source Project: storm-benchmark   File: KafkaUtils.java    License: Apache License 2.0 5 votes vote down vote up
public static TridentKafkaConfig getTridentKafkaConfig(Map options, MultiScheme scheme) {
  String zkServers = (String) Utils.get(options, ZOOKEEPER_SERVERS, "localhost:2181") ;
  String kafkaRoot = (String) Utils.get(options, KAFKA_ROOT_PATH, "/kafka");
  String connectString = zkServers + kafkaRoot;

  BrokerHosts hosts = new ZkHosts(connectString);
  String topic = (String) Utils.get(options, TOPIC, DEFAULT_TOPIC);
  String appId = (String) Utils.get(options, CLIENT_ID, "storm-app");

  TridentKafkaConfig config = new TridentKafkaConfig(hosts, topic, appId);
  config.scheme = scheme;
  return config;
}
 
Example 6
Source Project: jstorm   File: TridentBoltExecutor.java    License: Apache License 2.0 5 votes vote down vote up
private void updateTaskCounts(List<Integer> tasks) {
    synchronized (_currBatch) {
        if (_currBatch != null) {
            Map<Integer, Integer> taskEmittedTuples = _currBatch.taskEmittedTuples;
            for (Integer task : tasks) {
                int newCount = Utils.get(taskEmittedTuples, task, 0) + 1;
                taskEmittedTuples.put(task, newCount);
            }
        }
    }
}
 
Example 7
@Override
public MemoryTransactionalSpoutMeta emitPartitionBatch(TransactionAttempt tx, BatchOutputCollector collector, int partition,
        MemoryTransactionalSpoutMeta lastPartitionMeta) {
    if (!Boolean.FALSE.equals(getDisabledStatuses().get(partition))) {
        int index;
        if (lastPartitionMeta == null) {
            index = 0;
        } else {
            index = lastPartitionMeta.index + lastPartitionMeta.amt;
        }
        List<List<Object>> queue = getQueues().get(partition);
        int total = queue.size();
        int left = total - index;
        int toTake = Math.min(left, _takeAmt);

        MemoryTransactionalSpoutMeta ret = new MemoryTransactionalSpoutMeta(index, toTake);
        for (int i = ret.index; i < ret.index + ret.amt; i++) {
            List<Object> toEmit = new ArrayList<Object>(queue.get(i));
            toEmit.add(0, tx);
            collector.emit(toEmit);
        }
        if (toTake == 0) {
            // this is a pretty hacky way to determine when all the partitions have been committed
            // wait until we've emitted max-spout-pending empty partitions for the partition
            int curr = Utils.get(_emptyPartitions, partition, 0) + 1;
            _emptyPartitions.put(partition, curr);
            if (curr > _maxSpoutPending) {
                getFinishedStatuses().put(partition, true);
            }
        }
        return ret;
    } else {
        return null;
    }
}
 
Example 8
Source Project: jstorm   File: MemoryTransactionalSpout.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public MemoryTransactionalSpoutMeta emitPartitionBatchNew(TransactionAttempt tx, BatchOutputCollector collector, int partition,
        MemoryTransactionalSpoutMeta lastPartitionMeta) {
    int index;
    if (lastPartitionMeta == null) {
        index = 0;
    } else {
        index = lastPartitionMeta.index + lastPartitionMeta.amt;
    }
    List<List<Object>> queue = getQueues().get(partition);
    int total = queue.size();
    int left = total - index;
    int toTake = Math.min(left, _takeAmt);

    MemoryTransactionalSpoutMeta ret = new MemoryTransactionalSpoutMeta(index, toTake);
    emitPartitionBatch(tx, collector, partition, ret);
    if (toTake == 0) {
        // this is a pretty hacky way to determine when all the partitions have been committed
        // wait until we've emitted max-spout-pending empty partitions for the partition
        int curr = Utils.get(_emptyPartitions, partition, 0) + 1;
        _emptyPartitions.put(partition, curr);
        if (curr > _maxSpoutPending) {
            Map<Integer, Boolean> finishedStatuses = getFinishedStatuses();
            // will be null in remote mode
            if (finishedStatuses != null) {
                finishedStatuses.put(partition, true);
            }
        }
    }
    return ret;
}
 
Example 9
Source Project: jstorm   File: KeyedCountingBatchBolt.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public void execute(Tuple tuple) {
    Object key = tuple.getValue(1);
    int curr = Utils.get(_counts, key, 0);
    _counts.put(key, curr + 1);
}
 
Example 10
Source Project: jstorm   File: KeyedSummingBatchBolt.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public void execute(Tuple tuple) {
    Object key = tuple.getValue(1);
    Number curr = Utils.get(_sums, key, 0);
    _sums.put(key, Numbers.add(curr, tuple.getValue(2)));
}