Java Code Examples for backtype.storm.Config

The following examples show how to use backtype.storm.Config. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may want to check out the right sidebar which shows the related API usage.
Example 1
Source Project: jstorm   Source File: FastWordCountTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testFastWordCount()
{
    int spout_Parallelism_hint = 1;
    int split_Parallelism_hint = 1;
    int count_Parallelism_hint = 2;

    TopologyBuilder builder = new TopologyBuilder();

    boolean isLocalShuffle = false;

    builder.setSpout("spout", new FastWordCountTopology.FastRandomSentenceSpout(), spout_Parallelism_hint);
    if (isLocalShuffle)
        builder.setBolt("split", new FastWordCountTopology.SplitSentence(), split_Parallelism_hint).localFirstGrouping("spout");
    else
        builder.setBolt("split", new FastWordCountTopology.SplitSentence(), split_Parallelism_hint).shuffleGrouping("spout");
    builder.setBolt("count", new FastWordCountTopology.WordCount(), count_Parallelism_hint).fieldsGrouping("split", new Fields("word"));

    Map config = new HashMap();
    config.put(Config.TOPOLOGY_NAME, "FastWordCountTest");

    JStormUnitTestRunner.submitTopology(builder.createTopology(), config, 60, null);
}
 
Example 2
Source Project: jstorm   Source File: JStormDebugger.java    License: Apache License 2.0 6 votes vote down vote up
public static void update(Map conf) {
    boolean _isDebug = JStormUtils.parseBoolean(conf.get(Config.TOPOLOGY_DEBUG), isDebug);
    if (_isDebug != isDebug) {
        isDebug = _isDebug;
        LOG.info("switch topology.debug to {}", _isDebug);
    }
    boolean _isDebugRecv = ConfigExtension.isTopologyDebugRecvTuple(conf);
    if (_isDebugRecv != isDebugRecv) {
        isDebugRecv = _isDebugRecv;
        LOG.info("switch topology.debug.recv.tuple to {}", _isDebug);
    }
    double _sampleRate = ConfigExtension.getTopologyDebugSampleRate(conf);
    if (Double.compare(_sampleRate, sampleRate) != 0) {
        sampleRate = _sampleRate;
        LOG.info("switch topology.debug.sample.rate to {}", _sampleRate);
    }
}
 
Example 3
Source Project: storm-benchmark   Source File: RollingCount.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public StormTopology getTopology(Config config) {

  final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
  final int spBoltNum = BenchmarkUtils.getInt(config, SPLIT_NUM, DEFAULT_SP_BOLT_NUM);
  final int rcBoltNum = BenchmarkUtils.getInt(config, COUNTER_NUM, DEFAULT_RC_BOLT_NUM);
  final int windowLength = BenchmarkUtils.getInt(config, WINDOW_LENGTH,
          RollingBolt.DEFAULT_SLIDING_WINDOW_IN_SECONDS);
  final int emitFreq = BenchmarkUtils.getInt(config, EMIT_FREQ,
          RollingBolt.DEFAULT_EMIT_FREQUENCY_IN_SECONDS);

  spout = new FileReadSpout(BenchmarkUtils.ifAckEnabled(config));

  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout(SPOUT_ID, spout, spoutNum);
  builder.setBolt(SPLIT_ID, new WordCount.SplitSentence(), spBoltNum)
          .localOrShuffleGrouping(SPOUT_ID);
  builder.setBolt(COUNTER_ID, new RollingCountBolt(windowLength, emitFreq), rcBoltNum)
          .fieldsGrouping(SPLIT_ID, new Fields(WordCount.SplitSentence.FIELDS));
  return builder.createTopology();
}
 
Example 4
Source Project: jstorm   Source File: TestTridentTopology.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception{
	if (args.length == 0) {
		System.err.println("Please input configuration file");
		System.exit(-1);
	}

	Map conf = LoadConfig.LoadConf(args[0]);	

	if (conf == null) {
		LOG.error("Failed to load config");
	} else {
		Config config = new Config();
		config.putAll(conf);
        config.setMaxSpoutPending(10);
        config.put(LoadConfig.TOPOLOGY_TYPE, "Trident");
        StormSubmitter.submitTopology("WordCount", config, buildTopology());
	}
}
 
Example 5
Source Project: jstorm   Source File: TransactionSpout.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    this.conf = conf;
    this.topologyContext = context;
    this.topologyId = topologyContext.getTopologyId();
    this.taskId = topologyContext.getThisTaskId();
    this.topologyMasterId = topologyContext.getTopologyMasterId();
    this.componentId = topologyContext.getThisComponentId();
    this.taskStats = new TaskBaseMetric(topologyId, componentId, taskId);
    this.downstreamTasks = TransactionCommon.getDownstreamTasks(componentId, topologyContext);
    LOG.info("downstreamTasks: {}", downstreamTasks);

    this.outputCollector = new TransactionSpoutOutputCollector(collector, this);

    this.spoutStatus = State.INIT;
    this.committingBatches = new TreeMap<>();
    this.isMaxPending = false;
    this.MAX_PENDING_BATCH_NUM = ConfigExtension.getTransactionMaxPendingBatch(conf);

    int taskLaunchTimeout = JStormUtils.parseInt(conf.get(Config.NIMBUS_TASK_LAUNCH_SECS));
    int spoutInitRetryDelaySec = JStormUtils.parseInt(conf.get("transaction.spout.init.retry.secs"), taskLaunchTimeout);
    this.initRetryCheck = new IntervalCheck();
    initRetryCheck.setInterval(spoutInitRetryDelaySec);

    this.lock = new ReentrantLock(true);
}
 
Example 6
Source Project: jstorm   Source File: NimbusClient.java    License: Apache License 2.0 6 votes vote down vote up
public static NimbusClient getConfiguredClientAs(Map conf, Integer timeout, String asUser) {
    try {
        if (conf.containsKey(Config.STORM_DO_AS_USER)) {
            if (asUser != null && !asUser.isEmpty()) {
                LOG.warn("You have specified a doAsUser as param {} and a doAsParam as config, " +
                        "config will take precedence.", asUser, conf.get(Config.STORM_DO_AS_USER));
            }
            asUser = (String) conf.get(Config.STORM_DO_AS_USER);
        }

        NimbusClient client = new NimbusClient(conf, null, null, timeout, asUser);
        checkVersion(client);
        return client;
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
}
 
Example 7
Source Project: galaxy-sdk-java   Source File: SubmitTopologyHelper.java    License: Apache License 2.0 6 votes vote down vote up
public static void submitTopology(StormTopology stormTopology, Map topologyConfig) throws Exception {
    // setup StormTopology

    Config submitConfig = new Config();

    // set the configuration for topology
    submitConfig.put(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS, 5000);
    submitConfig.put(Config.TOPOLOGY_ACKER_EXECUTORS, 100);
    submitConfig.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, 20);

    // set the worker process number
    submitConfig.setNumWorkers(ConfigHelper.getInt(topologyConfig, ConfigKeys.STORM_WORKER_NUMBER));

    // get topologyName adn clusterMode;
    String topologyName = ConfigHelper.getString(topologyConfig, ConfigKeys.STORM_TOPOLOGY_NAME);
    String clusterMode = ConfigHelper.getString(topologyConfig, ConfigKeys.STORM_CLUSTER_MODE);

    if (clusterMode.equals("local")) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("storm-emq", submitConfig, stormTopology);
    } else {
        submitConfig.put(Config.NIMBUS_HOST, ConfigHelper.getString(topologyConfig, ConfigKeys.STORM_NIMBUS_HOSTNAME));
        StormSubmitter.submitTopology(topologyName, submitConfig, stormTopology);
    }

}
 
Example 8
Source Project: storm-hbase   Source File: WordCountTrident.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config conf = new Config();
    conf.setMaxSpoutPending(5);
    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("wordCounter", conf, buildTopology(args[0]));
        Thread.sleep(60 * 1000);
        cluster.killTopology("wordCounter");
        cluster.shutdown();
        System.exit(0);
    }
    else if(args.length == 2) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[1], conf, buildTopology(args[0]));
    } else{
        System.out.println("Usage: TridentFileTopology <hdfs url> [topology name]");
    }
}
 
Example 9
Source Project: jstorm   Source File: PartitionConsumer.java    License: Apache License 2.0 6 votes vote down vote up
public void commitState() {
    try {
        long lastOffset = 0;
        if (pendingOffsets.isEmpty() || pendingOffsets.size() <= 0) {
            lastOffset = emittingOffset;
        } else {
            lastOffset = pendingOffsets.first();
        }
        if (lastOffset != lastCommittedOffset) {
            Map<Object, Object> data = new HashMap<Object, Object>();
            data.put("topology", stormConf.get(Config.TOPOLOGY_NAME));
            data.put("offset", lastOffset);
            data.put("partition", partition);
            data.put("broker", ImmutableMap.of("host", consumer.getLeaderBroker().host(), "port", consumer.getLeaderBroker().port()));
            data.put("topic", config.topic);
            zkState.writeJSON(zkPath(), data);
            lastCommittedOffset = lastOffset;
        }
    } catch (Exception e) {
        LOG.error(e.getMessage(), e);
    }

}
 
Example 10
Source Project: jstorm   Source File: Drpc.java    License: Apache License 2.0 6 votes vote down vote up
private THsHaServer initHandlerServer(Map conf, final Drpc service) throws Exception {
    int port = JStormUtils.parseInt(conf.get(Config.DRPC_PORT));
    int workerThreadNum = JStormUtils.parseInt(conf.get(Config.DRPC_WORKER_THREADS));
    int queueSize = JStormUtils.parseInt(conf.get(Config.DRPC_QUEUE_SIZE));

    LOG.info("Begin to init DRPC handler server at port: " + port);

    TNonblockingServerSocket socket = new TNonblockingServerSocket(port);
    THsHaServer.Args targs = new THsHaServer.Args(socket);
    targs.workerThreads(64);
    targs.protocolFactory(new TBinaryProtocol.Factory());
    targs.processor(new DistributedRPC.Processor<DistributedRPC.Iface>(service));

    ThreadPoolExecutor executor = new ThreadPoolExecutor(
            workerThreadNum, workerThreadNum, 60, TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(queueSize));
    targs.executorService(executor);

    THsHaServer handlerServer = new THsHaServer(targs);
    LOG.info("Successfully inited DRPC handler server at port: " + port);

    return handlerServer;
}
 
Example 11
Source Project: jstorm   Source File: TridentTumblingCountWindowTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testTridentTumblingCountWindow()
{
    WindowsStoreFactory windowsStoreFactory = new InMemoryWindowsStoreFactory();
    FixedLimitBatchSpout spout = new FixedLimitBatchSpout(SPOUT_LIMIT, new Fields("sentence"), SPOUT_BATCH_SIZE,
                new Values("the cow jumped over the moon"),
                new Values("the man went to the store and bought some candy"),
                new Values("four score and seven years ago"), new Values("how many apples can you eat"),
                new Values("to be or not to be the person"));

    TridentTopology tridentTopology = new TridentTopology();

    Stream stream = tridentTopology.newStream("spout1", spout).parallelismHint(16)
                .each(new Fields("sentence"), new Split(), new Fields("word"))
                .window(windowConfig, windowsStoreFactory, new Fields("word"), new CountAsAggregator(), new Fields("count"))
                .peek(new ValidateConsumer());

    Map config = new HashMap();
    config.put(Config.TOPOLOGY_NAME, "TridentTumblingCountWindowTest");

    JStormUnitTestRunner.submitTopology(tridentTopology.build(), null, 120, null);
}
 
Example 12
Source Project: flowmix   Source File: SwitchBoltIT.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void test_timeDiffActivated_countEviction() throws InterruptedException {
    Flow flow = new FlowBuilder()
            .id("flow")
            .flowDefs()
            .stream("stream1")
            .stopGate().open(Policy.TIME_DELTA_LT, 1000).close(Policy.TIME, 5).evict(Policy.COUNT, 5).end()
            .endStream()   // send ALL results to stream2 and not to standard output
            .endDefs()
            .createFlow();

    StormTopology topology = buildTopology(flow, 50);
    Config conf = new Config();
    conf.setNumWorkers(20);
    conf.registerSerialization(BaseEvent.class, EventSerializer.class);
    conf.setSkipMissingKryoRegistrations(false);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, topology);

    Thread.sleep(5000);

    assertEquals(5, MockSinkBolt.getEvents().size());
}
 
Example 13
Source Project: jstorm   Source File: TridentTumblingDurationWindowTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testTridentTumblingDurationWindow()
{
        WindowsStoreFactory windowsStoreFactory = new InMemoryWindowsStoreFactory();
        FixedLimitBatchSpout spout = new FixedLimitBatchSpout(SPOUT_LIMIT, new Fields("sentence"), SPOUT_BATCH_SIZE,
                new Values("the cow jumped over the moon"),
                new Values("the man went to the store and bought some candy"),
                new Values("four score and seven years ago"), new Values("how many apples can you eat"),
                new Values("to be or not to be the person"));

        TridentTopology tridentTopology = new TridentTopology();

        Stream stream = tridentTopology.newStream("spout1", spout).parallelismHint(16)
                .each(new Fields("sentence"), new Split(), new Fields("word"))
                .window(windowConfig, windowsStoreFactory, new Fields("word"), new CountAsAggregator(), new Fields("count"))
                .peek(new ValidateConsumer());

        Map config = new HashMap();
        config.put(Config.TOPOLOGY_NAME, "TridentTumblingDurationWindowTest");

        JStormUnitTestRunner.submitTopology(tridentTopology.build(), null, 120, null);

}
 
Example 14
Source Project: jstorm   Source File: RollingTopWordsTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testRollingTopWords()
{
    TopologyBuilder topologyBuilder = new TopologyBuilder();
    topologyBuilder.setSpout("windowTestWordSpout", new WindowTestWordSpout(), 5);
    topologyBuilder.setBolt("windowTestRollingCountBolt", new WindowTestRollingCountBolt(9, 3), 4)
            .fieldsGrouping("windowTestWordSpout", new Fields("word")).addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 3);
    topologyBuilder.setBolt("windowTestIntermediateRankingBolt", new WindowTestIntermediateRankingBolt(DEFAULT_COUNT), 4)
            .fieldsGrouping("windowTestRollingCountBolt", new Fields("obj"));
    topologyBuilder.setBolt("windowTestTotalRankingsBolt", new WindowTestTotalRankingsBolt(DEFAULT_COUNT))
            .globalGrouping("windowTestIntermediateRankingBolt");

    Map config = new HashMap();
    config.put(Config.TOPOLOGY_NAME, "RollingTopWordsTest");

    //I really don't know how to validate if the result is right since
    //the tick time is not precise. It makes the output after passing
    //a window is unpredictable.
    //Now I just let it pass all the time.
    //TODO:FIX ME: how to validate if the result is right?
    JStormUnitTestRunner.submitTopology(topologyBuilder.createTopology(), config, 90, null);
}
 
Example 15
Source Project: jstorm   Source File: NimbusServer.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("rawtypes")
private void initThrift(Map conf) throws TTransportException {
    Integer thrift_port = JStormUtils.parseInt(conf.get(Config.NIMBUS_THRIFT_PORT));
    TNonblockingServerSocket socket = new TNonblockingServerSocket(thrift_port);

    Integer maxReadBufSize = JStormUtils.parseInt(conf.get(Config.NIMBUS_THRIFT_MAX_BUFFER_SIZE));

    THsHaServer.Args args = new THsHaServer.Args(socket);
    args.workerThreads(ServiceHandler.THREAD_NUM);
    args.protocolFactory(new TBinaryProtocol.Factory(false, true, maxReadBufSize, -1));

    args.processor(new Nimbus.Processor<Iface>(serviceHandler));
    args.maxReadBufferBytes = maxReadBufSize;

    thriftServer = new THsHaServer(args);

    LOG.info("Successfully started nimbus: started Thrift server...");
    thriftServer.serve();
}
 
Example 16
Source Project: jstorm   Source File: SandBoxMaker.java    License: Apache License 2.0 5 votes vote down vote up
public SandBoxMaker(Map conf) {
    this.conf = conf;
    isEnable = ConfigExtension.isJavaSandBoxEnable(conf);
    LOG.info("Java Sandbox Policy :" + String.valueOf(isEnable));

    String jstormHome = System.getProperty("jstorm.home");
    if (jstormHome == null) {
        jstormHome = "./";
    }

    replaceBaseMap.put(JSTORM_HOME_KEY, jstormHome);
    replaceBaseMap.put(LOCAL_DIR_KEY, (String) conf.get(Config.STORM_LOCAL_DIR));
    LOG.info("JSTORM_HOME is " + jstormHome);
}
 
Example 17
Source Project: jstorm   Source File: TestWordSpout.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Map<String, Object> getComponentConfiguration() {
    if (!_isDistributed) {
        Map<String, Object> ret = new HashMap<String, Object>();
        ret.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM, 1);
        return ret;
    } else {
        return null;
    }
}
 
Example 18
Source Project: jstorm   Source File: ServiceHandler.java    License: Apache License 2.0 5 votes vote down vote up
private void waitForDesiredCodeReplication(Map conf, String topologyId) {
    int minReplicationCount = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_MIN_REPLICATION_COUNT), 1);
    int maxWaitTime = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_MAX_REPLICATION_WAIT_TIME_SEC), 0);

    try {
        List<String> blobKeys = BlobStoreUtils.getKeyListFromId(data, topologyId);
        Map<String, Integer> blobKeysToReplicationCount = new HashMap<>();
        for (String key : blobKeys) {
            blobKeysToReplicationCount.put(key, 0);
        }
        refreshBlobReplicationCount(blobKeysToReplicationCount, minReplicationCount);
        int totalWaitTime = 0;
        while (isNeedWait(minReplicationCount, maxWaitTime, blobKeysToReplicationCount, totalWaitTime)) {
            Thread.sleep(1);
            LOG.info("waiting for desired replication to be achieved. min-replication-count = {}, " +
                            "max-replication-wait-time = {}, total-wait-time = {}, current key to replication count = {}",
                    minReplicationCount, maxWaitTime, blobKeysToReplicationCount);
            refreshBlobReplicationCount(blobKeysToReplicationCount, minReplicationCount);
            totalWaitTime++;
        }
        boolean isAllAchieved = true;
        for (Integer count : blobKeysToReplicationCount.values()) {
            if (count <= minReplicationCount) {
                isAllAchieved = false;
                break;
            }
        }
        if (isAllAchieved) {
            LOG.info("desired replication count {} achieved, current key to replication count = {}",
                    minReplicationCount, blobKeysToReplicationCount);
        } else {
            LOG.info("desired replication count of {} not achieved but we have hit the max wait time {}, " +
                    "so moving on with key to replication count {}", minReplicationCount, maxWaitTime, blobKeysToReplicationCount);
        }
    } catch (Exception e) {
        LOG.error("wait for desired code replication error", e);
    }
}
 
Example 19
Source Project: jstorm   Source File: JStormHelper.java    License: Apache License 2.0 5 votes vote down vote up
public static boolean localMode(Map conf) {
    String mode = (String) conf.get(Config.STORM_CLUSTER_MODE);
    if (mode != null) {
        if (mode.equals("local")) {
            return true;
        }
    }

    return false;
}
 
Example 20
Source Project: storm-example   Source File: ClickThruAnalyticsTopology.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    final Config conf = new Config();
    final LocalCluster cluster = new LocalCluster();
    LOG.info("Submitting topology.");
    cluster.submitTopology("financial", conf, buildTopology());
    LOG.info("Topology submitted.");
    Thread.sleep(600000);
}
 
Example 21
public static void main(String[] args) throws Exception {
        Config conf = new Config();
//        conf.put(Config.TOPOLOGY_DEBUG,true);
        LocalCluster cluster = new LocalCluster();

        // This time we use a "FeederBatchSpout", a spout designed for testing.
        FeederBatchSpout testSpout = new FeederBatchSpout(ImmutableList.of("name", "city", "age"));
        cluster.submitTopology("advanced_primitives", conf, advancedPrimitives(testSpout));

        // You can "hand feed" values to the topology by using this spout
        testSpout.feed(ImmutableList.of(new Values("rose", "Shanghai", 32), new Values("mary", "Shanghai", 51), new Values("pere", "Jakarta", 65), new Values("Tom", "Jakarta", 10)));
    }
 
Example 22
Source Project: incubator-heron   Source File: MultiStageAckingTopology.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  if (args.length != 1) {
    throw new RuntimeException("Please specify the name of the topology");
  }
  TopologyBuilder builder = new TopologyBuilder();

  int parallelism = 2;
  builder.setSpout("word", new AckingTestWordSpout(), parallelism);
  builder.setBolt("exclaim1", new ExclamationBolt(true), parallelism)
      .shuffleGrouping("word");
  builder.setBolt("exclaim2", new ExclamationBolt(false), parallelism)
      .shuffleGrouping("exclaim1");

  Config conf = new Config();
  conf.setDebug(true);

  // Put an arbitrary large number here if you don't want to slow the topology down
  conf.setMaxSpoutPending(1000 * 1000 * 1000);

  // To enable acking, we need to setEnableAcking true
  conf.setNumAckers(1);

  conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");

  conf.setNumWorkers(parallelism);
  StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
 
Example 23
Source Project: storm-solr   Source File: SpringBolt.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Map<String, Object> getComponentConfiguration() {
  Map<String, Object> conf = new HashMap<String, Object>();
  if (tickRate > 0)
    conf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, tickRate);
  return conf;
}
 
Example 24
Source Project: jstorm   Source File: WindowedBoltExecutor.java    License: Apache License 2.0 5 votes vote down vote up
private int getMaxSpoutPending(Map stormConf) {
    int maxPending = Integer.MAX_VALUE;
    if (stormConf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING) != null) {
        maxPending = ((Number) stormConf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING)).intValue();
    }
    return maxPending;
}
 
Example 25
Source Project: jstorm   Source File: BlobStoreTest.java    License: Apache License 2.0 5 votes vote down vote up
private LocalFsBlobStore initLocalFs() {
    LocalFsBlobStore store = new LocalFsBlobStore();
    // Spy object that tries to mock the real object store
    LocalFsBlobStore spy = spy(store);
    Mockito.doNothing().when(spy).checkForBlobUpdate("test");
    Mockito.doNothing().when(spy).checkForBlobUpdate("other");
    Mockito.doNothing().when(spy).checkForBlobUpdate("test-empty-subject-WE");
    Mockito.doNothing().when(spy).checkForBlobUpdate("test-empty-subject-DEF");
    Mockito.doNothing().when(spy).checkForBlobUpdate("test-empty-acls");
    Map conf = Utils.readStormConfig();
    conf.put(Config.STORM_LOCAL_DIR, baseFile.getAbsolutePath());
    conf.put(Config.STORM_PRINCIPAL_TO_LOCAL_PLUGIN,"org.apache.storm.security.auth.DefaultPrincipalToLocal");
    spy.prepare(conf, null, null);
    return spy;
}
 
Example 26
Source Project: jstorm   Source File: NimbusInfo.java    License: Apache License 2.0 5 votes vote down vote up
public static NimbusInfo fromConf(Map conf) {
    String host;
    if (!ConfigExtension.isNimbusUseIp(conf)) {
        host = NetWorkUtils.hostname();
    } else {
        host = NetWorkUtils.ip();
    }

    int port = Integer.parseInt(conf.get(Config.NIMBUS_THRIFT_PORT).toString());
    return new NimbusInfo(host, port, false);
}
 
Example 27
Source Project: storm-benchmark   Source File: GrepTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void componentParallelismCouldBeSetThroughConfig() {
  StormBenchmark benchmark = new Grep();
  Config config = new Config();
  config.put(Grep.SPOUT_NUM, 3);
  config.put(Grep.FM_NUM, 4);
  config.put(Grep.CM_NUM, 5);
  StormTopology topology = benchmark.getTopology(config);
  assertThat(topology).isNotNull();
  TestUtils.verifyParallelism(Utils.getComponentCommon(topology, Grep.SPOUT_ID), 3);
  TestUtils.verifyParallelism(Utils.getComponentCommon(topology, Grep.FM_ID), 4);
  TestUtils.verifyParallelism(Utils.getComponentCommon(topology, Grep.CM_ID), 5);
}
 
Example 28
Source Project: jstorm   Source File: FluxBuilder.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Given a topology definition, return a populated `org.apache.storm.Config` instance.
 *
 * @param topologyDef
 * @return
 */
public static Config buildConfig(TopologyDef topologyDef) {
    // merge contents of `config` into topology config
    Config conf = new Config();
    conf.putAll(topologyDef.getConfig());
    return conf;
}
 
Example 29
Source Project: jstorm   Source File: Utils.java    License: Apache License 2.0 5 votes vote down vote up
public static Map readCommandLineOpts() {
    Map ret = new HashMap();
    String commandOptions = System.getProperty("storm.options");
    if (commandOptions != null) {
        String[] configs = commandOptions.split(",(?![^\\[\\]{}]*(]|}))");
        for (String config : configs) {
            config = URLDecoder.decode(config);
            String[] options = config.split("=", 2);
            if (options.length == 2) {
                Object val = JSONValue.parse(options[1]);
                if (val == null) {
                    val = options[1];
                }
                ret.put(options[0], val);
            }
        }
    }

    String excludeJars = System.getProperty("exclude.jars");
    if (excludeJars != null) {
        ret.put("exclude.jars", excludeJars);
    }

    /*
     * Trident and old transaction implementation do not work on batch mode. So, for the relative topology builder
     */
    String batchOptions = System.getProperty(ConfigExtension.TASK_BATCH_TUPLE);
    if (!StringUtils.isBlank(batchOptions)) {
        boolean isBatched = JStormUtils.parseBoolean(batchOptions, true);
        ConfigExtension.setTaskBatchTuple(ret, isBatched);
        System.out.println(ConfigExtension.TASK_BATCH_TUPLE + " is " + batchOptions);
    }
    String ackerOptions = System.getProperty(Config.TOPOLOGY_ACKER_EXECUTORS);
    if (!StringUtils.isBlank(ackerOptions)) {
        Integer ackerNum = JStormUtils.parseInt(ackerOptions, 0);
        ret.put(Config.TOPOLOGY_ACKER_EXECUTORS, ackerNum);
        System.out.println(Config.TOPOLOGY_ACKER_EXECUTORS + " is " + ackerNum);
    }
    return ret;
}
 
Example 30
public Emitter(Map conf, TopologyContext context) {
    _emitter = _spout.getEmitter(conf, context);
    _index = context.getThisTaskIndex();
    _numTasks = context.getComponentTasks(context.getThisComponentId()).size();
    _state = TransactionalState.newUserState(
            conf, (String) conf.get(Config.TOPOLOGY_TRANSACTIONAL_ID), getComponentConfiguration());
    List<String> existingPartitions = _state.list("");
    for (String p : existingPartitions) {
        int partition = Integer.parseInt(p);
        if ((partition - _index) % _numTasks == 0) {
            _partitionStates.put(partition, new RotatingTransactionalState(_state, p));
        }
    }
}