backtype.storm.Config Java Examples

The following examples show how to use backtype.storm.Config. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: NimbusServer.java    From jstorm with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("rawtypes")
private void initThrift(Map conf) throws TTransportException {
    Integer thrift_port = JStormUtils.parseInt(conf.get(Config.NIMBUS_THRIFT_PORT));
    TNonblockingServerSocket socket = new TNonblockingServerSocket(thrift_port);

    Integer maxReadBufSize = JStormUtils.parseInt(conf.get(Config.NIMBUS_THRIFT_MAX_BUFFER_SIZE));

    THsHaServer.Args args = new THsHaServer.Args(socket);
    args.workerThreads(ServiceHandler.THREAD_NUM);
    args.protocolFactory(new TBinaryProtocol.Factory(false, true, maxReadBufSize, -1));

    args.processor(new Nimbus.Processor<Iface>(serviceHandler));
    args.maxReadBufferBytes = maxReadBufSize;

    thriftServer = new THsHaServer(args);

    LOG.info("Successfully started nimbus: started Thrift server...");
    thriftServer.serve();
}
 
Example #2
Source File: TransactionSpout.java    From jstorm with Apache License 2.0 6 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    this.conf = conf;
    this.topologyContext = context;
    this.topologyId = topologyContext.getTopologyId();
    this.taskId = topologyContext.getThisTaskId();
    this.topologyMasterId = topologyContext.getTopologyMasterId();
    this.componentId = topologyContext.getThisComponentId();
    this.taskStats = new TaskBaseMetric(topologyId, componentId, taskId);
    this.downstreamTasks = TransactionCommon.getDownstreamTasks(componentId, topologyContext);
    LOG.info("downstreamTasks: {}", downstreamTasks);

    this.outputCollector = new TransactionSpoutOutputCollector(collector, this);

    this.spoutStatus = State.INIT;
    this.committingBatches = new TreeMap<>();
    this.isMaxPending = false;
    this.MAX_PENDING_BATCH_NUM = ConfigExtension.getTransactionMaxPendingBatch(conf);

    int taskLaunchTimeout = JStormUtils.parseInt(conf.get(Config.NIMBUS_TASK_LAUNCH_SECS));
    int spoutInitRetryDelaySec = JStormUtils.parseInt(conf.get("transaction.spout.init.retry.secs"), taskLaunchTimeout);
    this.initRetryCheck = new IntervalCheck();
    initRetryCheck.setInterval(spoutInitRetryDelaySec);

    this.lock = new ReentrantLock(true);
}
 
Example #3
Source File: WordCountTrident.java    From storm-hbase with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config conf = new Config();
    conf.setMaxSpoutPending(5);
    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("wordCounter", conf, buildTopology(args[0]));
        Thread.sleep(60 * 1000);
        cluster.killTopology("wordCounter");
        cluster.shutdown();
        System.exit(0);
    }
    else if(args.length == 2) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[1], conf, buildTopology(args[0]));
    } else{
        System.out.println("Usage: TridentFileTopology <hdfs url> [topology name]");
    }
}
 
Example #4
Source File: PartitionConsumer.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public void commitState() {
    try {
        long lastOffset = 0;
        if (pendingOffsets.isEmpty() || pendingOffsets.size() <= 0) {
            lastOffset = emittingOffset;
        } else {
            lastOffset = pendingOffsets.first();
        }
        if (lastOffset != lastCommittedOffset) {
            Map<Object, Object> data = new HashMap<Object, Object>();
            data.put("topology", stormConf.get(Config.TOPOLOGY_NAME));
            data.put("offset", lastOffset);
            data.put("partition", partition);
            data.put("broker", ImmutableMap.of("host", consumer.getLeaderBroker().host(), "port", consumer.getLeaderBroker().port()));
            data.put("topic", config.topic);
            zkState.writeJSON(zkPath(), data);
            lastCommittedOffset = lastOffset;
        }
    } catch (Exception e) {
        LOG.error(e.getMessage(), e);
    }

}
 
Example #5
Source File: Drpc.java    From jstorm with Apache License 2.0 6 votes vote down vote up
private THsHaServer initHandlerServer(Map conf, final Drpc service) throws Exception {
    int port = JStormUtils.parseInt(conf.get(Config.DRPC_PORT));
    int workerThreadNum = JStormUtils.parseInt(conf.get(Config.DRPC_WORKER_THREADS));
    int queueSize = JStormUtils.parseInt(conf.get(Config.DRPC_QUEUE_SIZE));

    LOG.info("Begin to init DRPC handler server at port: " + port);

    TNonblockingServerSocket socket = new TNonblockingServerSocket(port);
    THsHaServer.Args targs = new THsHaServer.Args(socket);
    targs.workerThreads(64);
    targs.protocolFactory(new TBinaryProtocol.Factory());
    targs.processor(new DistributedRPC.Processor<DistributedRPC.Iface>(service));

    ThreadPoolExecutor executor = new ThreadPoolExecutor(
            workerThreadNum, workerThreadNum, 60, TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(queueSize));
    targs.executorService(executor);

    THsHaServer handlerServer = new THsHaServer(targs);
    LOG.info("Successfully inited DRPC handler server at port: " + port);

    return handlerServer;
}
 
Example #6
Source File: TestTridentTopology.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception{
	if (args.length == 0) {
		System.err.println("Please input configuration file");
		System.exit(-1);
	}

	Map conf = LoadConfig.LoadConf(args[0]);	

	if (conf == null) {
		LOG.error("Failed to load config");
	} else {
		Config config = new Config();
		config.putAll(conf);
        config.setMaxSpoutPending(10);
        config.put(LoadConfig.TOPOLOGY_TYPE, "Trident");
        StormSubmitter.submitTopology("WordCount", config, buildTopology());
	}
}
 
Example #7
Source File: RollingCount.java    From storm-benchmark with Apache License 2.0 6 votes vote down vote up
@Override
public StormTopology getTopology(Config config) {

  final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
  final int spBoltNum = BenchmarkUtils.getInt(config, SPLIT_NUM, DEFAULT_SP_BOLT_NUM);
  final int rcBoltNum = BenchmarkUtils.getInt(config, COUNTER_NUM, DEFAULT_RC_BOLT_NUM);
  final int windowLength = BenchmarkUtils.getInt(config, WINDOW_LENGTH,
          RollingBolt.DEFAULT_SLIDING_WINDOW_IN_SECONDS);
  final int emitFreq = BenchmarkUtils.getInt(config, EMIT_FREQ,
          RollingBolt.DEFAULT_EMIT_FREQUENCY_IN_SECONDS);

  spout = new FileReadSpout(BenchmarkUtils.ifAckEnabled(config));

  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout(SPOUT_ID, spout, spoutNum);
  builder.setBolt(SPLIT_ID, new WordCount.SplitSentence(), spBoltNum)
          .localOrShuffleGrouping(SPOUT_ID);
  builder.setBolt(COUNTER_ID, new RollingCountBolt(windowLength, emitFreq), rcBoltNum)
          .fieldsGrouping(SPLIT_ID, new Fields(WordCount.SplitSentence.FIELDS));
  return builder.createTopology();
}
 
Example #8
Source File: TridentTumblingCountWindowTest.java    From jstorm with Apache License 2.0 6 votes vote down vote up
@Test
public void testTridentTumblingCountWindow()
{
    WindowsStoreFactory windowsStoreFactory = new InMemoryWindowsStoreFactory();
    FixedLimitBatchSpout spout = new FixedLimitBatchSpout(SPOUT_LIMIT, new Fields("sentence"), SPOUT_BATCH_SIZE,
                new Values("the cow jumped over the moon"),
                new Values("the man went to the store and bought some candy"),
                new Values("four score and seven years ago"), new Values("how many apples can you eat"),
                new Values("to be or not to be the person"));

    TridentTopology tridentTopology = new TridentTopology();

    Stream stream = tridentTopology.newStream("spout1", spout).parallelismHint(16)
                .each(new Fields("sentence"), new Split(), new Fields("word"))
                .window(windowConfig, windowsStoreFactory, new Fields("word"), new CountAsAggregator(), new Fields("count"))
                .peek(new ValidateConsumer());

    Map config = new HashMap();
    config.put(Config.TOPOLOGY_NAME, "TridentTumblingCountWindowTest");

    JStormUnitTestRunner.submitTopology(tridentTopology.build(), null, 120, null);
}
 
Example #9
Source File: JStormDebugger.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public static void update(Map conf) {
    boolean _isDebug = JStormUtils.parseBoolean(conf.get(Config.TOPOLOGY_DEBUG), isDebug);
    if (_isDebug != isDebug) {
        isDebug = _isDebug;
        LOG.info("switch topology.debug to {}", _isDebug);
    }
    boolean _isDebugRecv = ConfigExtension.isTopologyDebugRecvTuple(conf);
    if (_isDebugRecv != isDebugRecv) {
        isDebugRecv = _isDebugRecv;
        LOG.info("switch topology.debug.recv.tuple to {}", _isDebug);
    }
    double _sampleRate = ConfigExtension.getTopologyDebugSampleRate(conf);
    if (Double.compare(_sampleRate, sampleRate) != 0) {
        sampleRate = _sampleRate;
        LOG.info("switch topology.debug.sample.rate to {}", _sampleRate);
    }
}
 
Example #10
Source File: NimbusClient.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public static NimbusClient getConfiguredClientAs(Map conf, Integer timeout, String asUser) {
    try {
        if (conf.containsKey(Config.STORM_DO_AS_USER)) {
            if (asUser != null && !asUser.isEmpty()) {
                LOG.warn("You have specified a doAsUser as param {} and a doAsParam as config, " +
                        "config will take precedence.", asUser, conf.get(Config.STORM_DO_AS_USER));
            }
            asUser = (String) conf.get(Config.STORM_DO_AS_USER);
        }

        NimbusClient client = new NimbusClient(conf, null, null, timeout, asUser);
        checkVersion(client);
        return client;
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
}
 
Example #11
Source File: SwitchBoltIT.java    From flowmix with Apache License 2.0 6 votes vote down vote up
@Test
public void test_timeDiffActivated_countEviction() throws InterruptedException {
    Flow flow = new FlowBuilder()
            .id("flow")
            .flowDefs()
            .stream("stream1")
            .stopGate().open(Policy.TIME_DELTA_LT, 1000).close(Policy.TIME, 5).evict(Policy.COUNT, 5).end()
            .endStream()   // send ALL results to stream2 and not to standard output
            .endDefs()
            .createFlow();

    StormTopology topology = buildTopology(flow, 50);
    Config conf = new Config();
    conf.setNumWorkers(20);
    conf.registerSerialization(BaseEvent.class, EventSerializer.class);
    conf.setSkipMissingKryoRegistrations(false);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, topology);

    Thread.sleep(5000);

    assertEquals(5, MockSinkBolt.getEvents().size());
}
 
Example #12
Source File: FastWordCountTest.java    From jstorm with Apache License 2.0 6 votes vote down vote up
@Test
public void testFastWordCount()
{
    int spout_Parallelism_hint = 1;
    int split_Parallelism_hint = 1;
    int count_Parallelism_hint = 2;

    TopologyBuilder builder = new TopologyBuilder();

    boolean isLocalShuffle = false;

    builder.setSpout("spout", new FastWordCountTopology.FastRandomSentenceSpout(), spout_Parallelism_hint);
    if (isLocalShuffle)
        builder.setBolt("split", new FastWordCountTopology.SplitSentence(), split_Parallelism_hint).localFirstGrouping("spout");
    else
        builder.setBolt("split", new FastWordCountTopology.SplitSentence(), split_Parallelism_hint).shuffleGrouping("spout");
    builder.setBolt("count", new FastWordCountTopology.WordCount(), count_Parallelism_hint).fieldsGrouping("split", new Fields("word"));

    Map config = new HashMap();
    config.put(Config.TOPOLOGY_NAME, "FastWordCountTest");

    JStormUnitTestRunner.submitTopology(builder.createTopology(), config, 60, null);
}
 
Example #13
Source File: TridentTumblingDurationWindowTest.java    From jstorm with Apache License 2.0 6 votes vote down vote up
@Test
public void testTridentTumblingDurationWindow()
{
        WindowsStoreFactory windowsStoreFactory = new InMemoryWindowsStoreFactory();
        FixedLimitBatchSpout spout = new FixedLimitBatchSpout(SPOUT_LIMIT, new Fields("sentence"), SPOUT_BATCH_SIZE,
                new Values("the cow jumped over the moon"),
                new Values("the man went to the store and bought some candy"),
                new Values("four score and seven years ago"), new Values("how many apples can you eat"),
                new Values("to be or not to be the person"));

        TridentTopology tridentTopology = new TridentTopology();

        Stream stream = tridentTopology.newStream("spout1", spout).parallelismHint(16)
                .each(new Fields("sentence"), new Split(), new Fields("word"))
                .window(windowConfig, windowsStoreFactory, new Fields("word"), new CountAsAggregator(), new Fields("count"))
                .peek(new ValidateConsumer());

        Map config = new HashMap();
        config.put(Config.TOPOLOGY_NAME, "TridentTumblingDurationWindowTest");

        JStormUnitTestRunner.submitTopology(tridentTopology.build(), null, 120, null);

}
 
Example #14
Source File: SubmitTopologyHelper.java    From galaxy-sdk-java with Apache License 2.0 6 votes vote down vote up
public static void submitTopology(StormTopology stormTopology, Map topologyConfig) throws Exception {
    // setup StormTopology

    Config submitConfig = new Config();

    // set the configuration for topology
    submitConfig.put(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS, 5000);
    submitConfig.put(Config.TOPOLOGY_ACKER_EXECUTORS, 100);
    submitConfig.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, 20);

    // set the worker process number
    submitConfig.setNumWorkers(ConfigHelper.getInt(topologyConfig, ConfigKeys.STORM_WORKER_NUMBER));

    // get topologyName adn clusterMode;
    String topologyName = ConfigHelper.getString(topologyConfig, ConfigKeys.STORM_TOPOLOGY_NAME);
    String clusterMode = ConfigHelper.getString(topologyConfig, ConfigKeys.STORM_CLUSTER_MODE);

    if (clusterMode.equals("local")) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("storm-emq", submitConfig, stormTopology);
    } else {
        submitConfig.put(Config.NIMBUS_HOST, ConfigHelper.getString(topologyConfig, ConfigKeys.STORM_NIMBUS_HOSTNAME));
        StormSubmitter.submitTopology(topologyName, submitConfig, stormTopology);
    }

}
 
Example #15
Source File: RollingTopWordsTest.java    From jstorm with Apache License 2.0 6 votes vote down vote up
@Test
public void testRollingTopWords()
{
    TopologyBuilder topologyBuilder = new TopologyBuilder();
    topologyBuilder.setSpout("windowTestWordSpout", new WindowTestWordSpout(), 5);
    topologyBuilder.setBolt("windowTestRollingCountBolt", new WindowTestRollingCountBolt(9, 3), 4)
            .fieldsGrouping("windowTestWordSpout", new Fields("word")).addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 3);
    topologyBuilder.setBolt("windowTestIntermediateRankingBolt", new WindowTestIntermediateRankingBolt(DEFAULT_COUNT), 4)
            .fieldsGrouping("windowTestRollingCountBolt", new Fields("obj"));
    topologyBuilder.setBolt("windowTestTotalRankingsBolt", new WindowTestTotalRankingsBolt(DEFAULT_COUNT))
            .globalGrouping("windowTestIntermediateRankingBolt");

    Map config = new HashMap();
    config.put(Config.TOPOLOGY_NAME, "RollingTopWordsTest");

    //I really don't know how to validate if the result is right since
    //the tick time is not precise. It makes the output after passing
    //a window is unpredictable.
    //Now I just let it pass all the time.
    //TODO:FIX ME: how to validate if the result is right?
    JStormUnitTestRunner.submitTopology(topologyBuilder.createTopology(), config, 90, null);
}
 
Example #16
Source File: TCKTest.java    From flux with Apache License 2.0 5 votes vote down vote up
@Test
public void testTridentTopologySource() throws Exception {
    TopologyDef topologyDef = FluxParser.parseResource("/configs/existing-topology-trident.yaml", false, true, null, false);
    assertTrue(topologyDef.validate());
    Config conf = FluxBuilder.buildConfig(topologyDef);
    ExecutionContext context = new ExecutionContext(topologyDef, conf);
    StormTopology topology = FluxBuilder.buildTopology(context);
    assertNotNull(topology);
    topology.validate();
}
 
Example #17
Source File: TransactionalState.java    From jstorm with Apache License 2.0 5 votes vote down vote up
protected TransactionalState(Map conf, String id, String subroot) {
    try {
        String className = null;
        if (conf.get(Config.STORM_TRANSATION_STATE_STORE_FACTORY) != null) {
            className = (String) conf.get(Config.STORM_TRANSATION_STATE_STORE_FACTORY);
        } else {
            className = "storm.trident.topology.state.TransactionalStateStorageZkFactory";
        }
        Class clazz = Class.forName(className);
        ITransactionalStateStorageFactory storageFactory = (ITransactionalStateStorageFactory) clazz.newInstance();
        transactionalStateStorage = storageFactory.mkTransactionalState(conf, id, subroot);
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}
 
Example #18
Source File: TridentFileTopology.java    From storm-hdfs with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config conf = new Config();
    conf.setMaxSpoutPending(5);
    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("wordCounter", conf, buildTopology(args[0]));
        Thread.sleep(120 * 1000);
    }
    else if(args.length == 2) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[1], conf, buildTopology(args[0]));
    } else{
        System.out.println("Usage: TridentFileTopology <hdfs url> [topology name]");
    }
}
 
Example #19
Source File: EMQSpout.java    From galaxy-sdk-java with Apache License 2.0 5 votes vote down vote up
private void checkTopologyTimeout(QueueService.Iface queueClient, Map map) {
    int topologyTimeout = Utils.getInt(map.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS));
    GetQueueInfoRequest getQueueInfoRequest = new GetQueueInfoRequest(emqConfig.queueName);
    GetQueueInfoResponse response = null;
    try {
        response = queueClient.getQueueInfo(getQueueInfoRequest);
    } catch (TException e) {
        throw new RuntimeException("Get EMQ queue info failed: " + e);
    }

    int emqInvisibleTime = response.getQueueAttribute().getInvisibilitySeconds();
    if (emqInvisibleTime < topologyTimeout)
        throw new RuntimeException("TOPOLOGY_MESSAGE_TIMEOUT_SECS(" + topologyTimeout +
                "s) must small than EMQ queue invisibilitySeconds(" + emqInvisibleTime + "s)");
}
 
Example #20
Source File: HdfsBlobStore.java    From jstorm with Apache License 2.0 5 votes vote down vote up
@Override
    public void setBlobMeta(String key, SettableBlobMeta meta)
            throws KeyNotFoundException {
        if (meta.get_replication_factor() <= 0) {
            meta.set_replication_factor((int)conf.get(Config.STORM_BLOBSTORE_REPLICATION_FACTOR));
        }
//        who = checkAndGetSubject(who);
        validateKey(key);
//        _aclHandler.normalizeSettableBlobMeta(key,  meta, who, ADMIN);
//        BlobStoreAclHandler.validateSettableACLs(key, meta.get_acl());
        getStoredBlobMeta(key);
//        _aclHandler.hasPermissions(orig.get_acl(), ADMIN, who, key);
//        BlobStoreFileOutputStream mOut = null;
        writeMetadata(key, meta);
    }
 
Example #21
Source File: BasicMetricsCollector.java    From storm-benchmark with Apache License 2.0 5 votes vote down vote up
public BasicMetricsCollector(Config stormConfig, StormTopology topology, Set<MetricsItem> items) {
  this.config = new MetricsCollectorConfig(stormConfig);
  this.topology = topology;
  collectSupervisorStats = collectSupervisorStats(items);
  collectTopologyStats = collectTopologyStats(items);
  collectExecutorStats = collectExecutorStats(items);
  collectThroughput = collectThroughput(items);
  collectThroughputMB = collectThroughputMB(items);
  collectSpoutThroughput = collectSpoutThroughput(items);
  collectSpoutLatency = collectSpoutLatency(items);
  msgSize = collectThroughputMB ?
          BenchmarkUtils.getInt(stormConfig, RandomMessageSpout.MESSAGE_SIZE,
                  RandomMessageSpout.DEFAULT_MESSAGE_SIZE) : 0;
}
 
Example #22
Source File: TCKTest.java    From flux with Apache License 2.0 5 votes vote down vote up
@Test
public void testShellComponents() throws Exception {
    TopologyDef topologyDef = FluxParser.parseResource("/configs/shell_test.yaml", false, true, null, false);
    Config conf = FluxBuilder.buildConfig(topologyDef);
    ExecutionContext context = new ExecutionContext(topologyDef, conf);
    StormTopology topology = FluxBuilder.buildTopology(context);
    assertNotNull(topology);
    topology.validate();
}
 
Example #23
Source File: ConfigurableIngestTopologyTest.java    From cognition with Apache License 2.0 5 votes vote down vote up
@Test
public void testConfigureTickFrequencyEmpty(
    @Injectable Configuration conf,
    @Injectable BoltDeclarer boltDeclarer) throws Exception {
  new Expectations() {{
    conf.getString(anyString);
    result = "";
    boltDeclarer.addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, any);
    times = 0;
  }};
  topology.configureTickFrequency(conf, boltDeclarer);
}
 
Example #24
Source File: LocalStormDoTask.java    From incubator-samoa with Apache License 2.0 5 votes vote down vote up
/**
 * The main method.
 * 
 * @param args
 *          the arguments
 */
public static void main(String[] args) {

  List<String> tmpArgs = new ArrayList<String>(Arrays.asList(args));

  int numWorker = StormSamoaUtils.numWorkers(tmpArgs);

  args = tmpArgs.toArray(new String[0]);

  // convert the arguments into Storm topology
  StormTopology stormTopo = StormSamoaUtils.argsToTopology(args);
  String topologyName = stormTopo.getTopologyName();

  Config conf = new Config();
  // conf.putAll(Utils.readStormConfig());
  conf.setDebug(false);

  // local mode
  conf.setMaxTaskParallelism(numWorker);

  backtype.storm.LocalCluster cluster = new backtype.storm.LocalCluster();
  cluster.submitTopology(topologyName, conf, stormTopo.getStormBuilder().createTopology());

  // Read local mode execution duration from property file
  Configuration stormConfig = StormSamoaUtils.getPropertyConfig(LocalStormDoTask.SAMOA_STORM_PROPERTY_FILE_LOC);
  long executionDuration= stormConfig.getLong(LocalStormDoTask.EXECUTION_DURATION_KEY);
  backtype.storm.utils.Utils.sleep(executionDuration * 1000);

  cluster.killTopology(topologyName);
  cluster.shutdown();

}
 
Example #25
Source File: SimpleTopologyWithConfigParam.java    From flux with Apache License 2.0 5 votes vote down vote up
public StormTopology getTopology(Config config) {
    TopologyBuilder builder = new TopologyBuilder();

    // spouts
    FluxShellSpout spout = new FluxShellSpout(
            new String[]{"node", "randomsentence.js"},
            new String[]{"word"});
    builder.setSpout("sentence-spout", spout, 1);

    // bolts
    builder.setBolt("log-bolt", new LogInfoBolt(), 1)
            .shuffleGrouping("sentence-spout");

    return builder.createTopology();
}
 
Example #26
Source File: TridentSequenceTopology.java    From storm-hdfs with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config conf = new Config();
    conf.setMaxSpoutPending(5);
    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("wordCounter", conf, buildTopology(args[0]));
        Thread.sleep(120 * 1000);
    }
    else if(args.length == 2) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[1], conf, buildTopology(args[0]));
    } else{
        System.out.println("Usage: TridentFileTopology <hdfs url> [topology name]");
    }
}
 
Example #27
Source File: WindowedBoltExecutor.java    From jstorm with Apache License 2.0 5 votes vote down vote up
private void ensureCountLessThanMaxPending(long count, long maxPending) {
    if (count > maxPending) {
        throw new IllegalArgumentException("Window count (length + sliding interval) value " + count +
                " is more than " + Config.TOPOLOGY_MAX_SPOUT_PENDING +
                " value " + maxPending);
    }
}
 
Example #28
Source File: SandBoxMaker.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public SandBoxMaker(Map conf) {
    this.conf = conf;
    isEnable = ConfigExtension.isJavaSandBoxEnable(conf);
    LOG.info("Java Sandbox Policy :" + String.valueOf(isEnable));

    String jstormHome = System.getProperty("jstorm.home");
    if (jstormHome == null) {
        jstormHome = "./";
    }

    replaceBaseMap.put(JSTORM_HOME_KEY, jstormHome);
    replaceBaseMap.put(LOCAL_DIR_KEY, (String) conf.get(Config.STORM_LOCAL_DIR));
    LOG.info("JSTORM_HOME is " + jstormHome);
}
 
Example #29
Source File: TransactionTopologyBuilder.java    From jstorm with Apache License 2.0 5 votes vote down vote up
@Override
public StormTopology createTopology() {
    TopologyBuilder.putStormConf(ConfigExtension.TASK_BATCH_TUPLE, "true");
    TopologyBuilder.putStormConf(Config.TOPOLOGY_ACKER_EXECUTORS, "0");
    TopologyBuilder.putStormConf(ConfigExtension.TRANSACTION_TOPOLOGY, true);
    return super.createTopology();
}
 
Example #30
Source File: BatchSpoutTrigger.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public void initMsgId() throws Exception {
    Long zkMsgId = null;
    byte[] data = zkClient.get_data(ZK_NODE_PATH, false);
    if (data != null) {
        String value = new String(data);
        try {
            zkMsgId = Long.valueOf(value);
            LOG.info("ZK msgId:" + zkMsgId);
        } catch (Exception e) {
            LOG.warn("Failed to get msgId ", e);
        }
    }

    if (zkMsgId != null) {
        BatchId.updateId(zkMsgId);
    }

    int max_spout_pending = JStormUtils.parseInt(conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING), 1);
    for (int i = 0; i < max_spout_pending; i++) {
        BatchSpoutMsgId msgId = BatchSpoutMsgId.mkInstance();
        if (currentBatchId == null) {
            currentBatchId = msgId.getBatchId();
        }
        batchQueue.offer(msgId);
        LOG.info("Push into queue," + msgId);
    }

}