Java Code Examples for backtype.storm.Config#setNumWorkers()
The following examples show how to use
backtype.storm.Config#setNumWorkers() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SwitchBoltIT.java From flowmix with Apache License 2.0 | 6 votes |
@Test public void test_timeDiffActivated_timeEviction() throws InterruptedException { Flow flow = new FlowBuilder() .id("flow") .flowDefs() .stream("stream1") .stopGate().open(Policy.TIME_DELTA_LT, 5).close(Policy.TIME, 1).evict(Policy.TIME, 1).end() .endStream() // send ALL results to stream2 and not to standard output .endDefs() .createFlow(); StormTopology topology = buildTopology(flow, 50); Config conf = new Config(); conf.setNumWorkers(20); conf.registerSerialization(BaseEvent.class, EventSerializer.class); conf.setSkipMissingKryoRegistrations(false); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("test", conf, topology); Thread.sleep(7000); int size = MockSinkBolt.getEvents().size(); System.out.println("SIZE: " + size); assertTrue(size >= 50 && size <= 65); }
Example 2
Source File: SubmitTopologyHelper.java From galaxy-sdk-java with Apache License 2.0 | 6 votes |
public static void submitTopology(StormTopology stormTopology, Map topologyConfig) throws Exception { // setup StormTopology Config submitConfig = new Config(); // set the configuration for topology submitConfig.put(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS, 5000); submitConfig.put(Config.TOPOLOGY_ACKER_EXECUTORS, 100); submitConfig.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, 20); // set the worker process number submitConfig.setNumWorkers(ConfigHelper.getInt(topologyConfig, ConfigKeys.STORM_WORKER_NUMBER)); // get topologyName adn clusterMode; String topologyName = ConfigHelper.getString(topologyConfig, ConfigKeys.STORM_TOPOLOGY_NAME); String clusterMode = ConfigHelper.getString(topologyConfig, ConfigKeys.STORM_CLUSTER_MODE); if (clusterMode.equals("local")) { LocalCluster cluster = new LocalCluster(); cluster.submitTopology("storm-emq", submitConfig, stormTopology); } else { submitConfig.put(Config.NIMBUS_HOST, ConfigHelper.getString(topologyConfig, ConfigKeys.STORM_NIMBUS_HOSTNAME)); StormSubmitter.submitTopology(topologyName, submitConfig, stormTopology); } }
Example 3
Source File: ExampleRunner.java From flowmix with Apache License 2.0 | 6 votes |
public void run() { StormTopology topology = new FlowmixBuilder() .setFlowLoader(new SimpleFlowLoaderSpout(provider.getFlows(), 60000)) .setEventsLoader(new MockEventGeneratorSpout(getMockEvents(), 10)) .setOutputBolt(new PrinterBolt()) .setParallelismHint(6) .create() .createTopology(); Config conf = new Config(); conf.setNumWorkers(20); conf.setMaxSpoutPending(5000); conf.setDebug(false); conf.registerSerialization(BaseEvent.class, EventSerializer.class); conf.setSkipMissingKryoRegistrations(false); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("example-topology", conf, topology); }
Example 4
Source File: TridentThroughput.java From flink-perf with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { ParameterTool pt = ParameterTool.fromArgs(args); int par = pt.getInt("para"); TridentTopology topology = new TridentTopology(); Stream sourceStream = topology.newStream("source", new Generator(pt)).parallelismHint(pt.getInt("sourceParallelism")); Stream repart = sourceStream.partitionBy(new Fields("id")); for(int i = 0; i < pt.getInt("repartitions", 1) - 1; i++) { repart = repart.each(new Fields("id"), new IdentityEach(), new Fields("id"+i)).partitionBy(new Fields("id"+i)); } repart.each(new Fields("id", "host", "time", "payload"), new Sink(pt), new Fields("dontcare")).parallelismHint(pt.getInt("sinkParallelism")); Config conf = new Config(); conf.setDebug(false); if (!pt.has("local")) { conf.setNumWorkers(par); StormSubmitter.submitTopologyWithProgressBar("throughput-"+pt.get("name", "no_name"), conf, topology.build()); } else { conf.setMaxTaskParallelism(par); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("throughput", conf, topology.build()); Thread.sleep(30000); cluster.shutdown(); } }
Example 5
Source File: SelectorBoltIT.java From flowmix with Apache License 2.0 | 5 votes |
@Test public void testSelection_basic() { Flow flow = new FlowBuilder() .id("myflow") .flowDefs() .stream("stream1") .select().fields("key1", "key2").end() .endStream() .endDefs() .createFlow(); StormTopology topology = buildTopology(flow, 10); Config conf = new Config(); conf.registerSerialization(Event.class, EventSerializer.class); conf.setSkipMissingKryoRegistrations(false); conf.setNumWorkers(20); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("test", conf, topology); try { Thread.sleep(3000); } catch (InterruptedException e) { e.printStackTrace(); } cluster.shutdown(); System.out.println(MockSinkBolt.getEvents()); assertTrue(MockSinkBolt.getEvents().size() > 0); for(Event event : MockSinkBolt.getEvents()) { assertNotNull(event.get("key1")); assertNotNull(event.get("key2")); assertNull(event.get("key3")); assertNull(event.get("key4")); assertNull(event.get("key5")); } }
Example 6
Source File: MultiStageAckingTopology.java From incubator-heron with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { if (args.length != 1) { throw new RuntimeException("Please specify the name of the topology"); } TopologyBuilder builder = new TopologyBuilder(); int parallelism = 2; builder.setSpout("word", new AckingTestWordSpout(), parallelism); builder.setBolt("exclaim1", new ExclamationBolt(true), parallelism) .shuffleGrouping("word"); builder.setBolt("exclaim2", new ExclamationBolt(false), parallelism) .shuffleGrouping("exclaim1"); Config conf = new Config(); conf.setDebug(true); // Put an arbitrary large number here if you don't want to slow the topology down conf.setMaxSpoutPending(1000 * 1000 * 1000); // To enable acking, we need to setEnableAcking true conf.setNumAckers(1); conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError"); conf.setNumWorkers(parallelism); StormSubmitter.submitTopology(args[0], conf, builder.createTopology()); }
Example 7
Source File: TaskHookTopology.java From incubator-heron with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { if (args.length != 1) { throw new RuntimeException("Specify topology name"); } TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("word", new AckingTestWordSpout(), 2); builder.setBolt("count", new CountBolt(), 2) .shuffleGrouping("word"); Config conf = new Config(); conf.setDebug(true); // Put an arbitrary large number here if you don't want to slow the topology down conf.setMaxSpoutPending(1000 * 1000 * 1000); // To enable acking, we need to setEnableAcking true conf.setNumAckers(1); conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError"); // Set the task hook List<String> taskHooks = new LinkedList<>(); taskHooks.add("org.apache.heron.examples.TaskHookTopology$TestTaskHook"); org.apache.heron.api.Config.setAutoTaskHooks(conf, taskHooks); // component resource configuration org.apache.heron.api.Config.setComponentRam(conf, "word", ByteAmount.fromMegabytes(512)); org.apache.heron.api.Config.setComponentRam(conf, "count", ByteAmount.fromMegabytes(512)); // container resource configuration org.apache.heron.api.Config.setContainerDiskRequested(conf, ByteAmount.fromGigabytes(2)); org.apache.heron.api.Config.setContainerRamRequested(conf, ByteAmount.fromGigabytes(2)); org.apache.heron.api.Config.setContainerCpuRequested(conf, 2); conf.setNumWorkers(2); StormSubmitter.submitTopology(args[0], conf, builder.createTopology()); }
Example 8
Source File: WordCountTopology.java From storm-example with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { SentenceSpout spout = new SentenceSpout(); SplitSentenceBolt splitBolt = new SplitSentenceBolt(); WordCountBolt countBolt = new WordCountBolt(); ReportBolt reportBolt = new ReportBolt(); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout(SENTENCE_SPOUT_ID, spout, 2); // SentenceSpout --> SplitSentenceBolt builder.setBolt(SPLIT_BOLT_ID, splitBolt, 2) .setNumTasks(4) .shuffleGrouping(SENTENCE_SPOUT_ID); // SplitSentenceBolt --> WordCountBolt builder.setBolt(COUNT_BOLT_ID, countBolt, 4) .fieldsGrouping(SPLIT_BOLT_ID, new Fields("word")); // WordCountBolt --> ReportBolt builder.setBolt(REPORT_BOLT_ID, reportBolt) .globalGrouping(COUNT_BOLT_ID); Config config = new Config(); config.setNumWorkers(2); LocalCluster cluster = new LocalCluster(); cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology()); waitForSeconds(10); cluster.killTopology(TOPOLOGY_NAME); cluster.shutdown(); }
Example 9
Source File: TridentSequenceTopology.java From storm-hdfs with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { Config conf = new Config(); conf.setMaxSpoutPending(5); if (args.length == 1) { LocalCluster cluster = new LocalCluster(); cluster.submitTopology("wordCounter", conf, buildTopology(args[0])); Thread.sleep(120 * 1000); } else if(args.length == 2) { conf.setNumWorkers(3); StormSubmitter.submitTopology(args[1], conf, buildTopology(args[0])); } else{ System.out.println("Usage: TridentFileTopology <hdfs url> [topology name]"); } }
Example 10
Source File: StormAbstractCloudLiveTest.java From brooklyn-library with Apache License 2.0 | 5 votes |
public boolean submitTopology(StormTopology stormTopology, String topologyName, int numOfWorkers, boolean debug, long timeoutMs) { if (log.isDebugEnabled()) log.debug("Connecting to NimbusClient: {}", nimbus.getConfig(Storm.NIMBUS_HOSTNAME)); Config conf = new Config(); conf.setDebug(debug); conf.setNumWorkers(numOfWorkers); // TODO - confirm this creats the JAR correctly String jar = createJar( new File(Os.mergePaths(ResourceUtils.create(this).getClassLoaderDir(), "org/apache/brooklyn/entity/messaging/storm/topologies")), "org/apache/brooklyn/entity/messaging/storm/"); System.setProperty("storm.jar", jar); long startMs = System.currentTimeMillis(); long endMs = (timeoutMs == -1) ? Long.MAX_VALUE : (startMs + timeoutMs); long currentTime = startMs; Throwable lastError = null; int attempt = 0; while (currentTime <= endMs) { currentTime = System.currentTimeMillis(); if (attempt != 0) Time.sleep(Duration.ONE_SECOND); if (log.isTraceEnabled()) log.trace("trying connection to {} at time {}", nimbus.getConfig(Storm.NIMBUS_HOSTNAME), currentTime); try { StormSubmitter.submitTopology(topologyName, conf, stormTopology); return true; } catch (Exception e) { if (shouldRetryOn(e)) { if (log.isDebugEnabled()) log.debug("Attempt {} failed connecting to {} ({})", new Object[] {attempt + 1, nimbus.getConfig(Storm.NIMBUS_HOSTNAME), e.getMessage()}); lastError = e; } else { throw Throwables.propagate(e); } } attempt++; } log.warn("unable to connect to Nimbus client: ", lastError); Assert.fail(); return false; }
Example 11
Source File: StormDoTask.java From incubator-samoa with Apache License 2.0 | 4 votes |
/** * The main method. * * @param args * the arguments */ public static void main(String[] args) { List<String> tmpArgs = new ArrayList<String>(Arrays.asList(args)); boolean isLocal = isLocal(tmpArgs); int numWorker = StormSamoaUtils.numWorkers(tmpArgs); args = tmpArgs.toArray(new String[0]); // convert the arguments into Storm topology StormTopology stormTopo = StormSamoaUtils.argsToTopology(args); String topologyName = stormTopo.getTopologyName(); Config conf = new Config(); conf.putAll(Utils.readStormConfig()); conf.setDebug(false); if (isLocal) { // local mode conf.setMaxTaskParallelism(numWorker); backtype.storm.LocalCluster cluster = new backtype.storm.LocalCluster(); cluster.submitTopology(topologyName, conf, stormTopo.getStormBuilder().createTopology()); backtype.storm.utils.Utils.sleep(600 * 1000); cluster.killTopology(topologyName); cluster.shutdown(); } else { // cluster mode conf.setNumWorkers(numWorker); try { backtype.storm.StormSubmitter.submitTopology(topologyName, conf, stormTopo.getStormBuilder().createTopology()); } catch (backtype.storm.generated.AlreadyAliveException ale) { ale.printStackTrace(); } catch (backtype.storm.generated.InvalidTopologyException ite) { ite.printStackTrace(); } } }
Example 12
Source File: SampleTopology.java From aws-big-data-blog with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws IllegalArgumentException, KeeperException, InterruptedException, AlreadyAliveException, InvalidTopologyException, IOException { String propertiesFile = null; String mode = null; if (args.length != 2) { printUsageAndExit(); } else { propertiesFile = args[0]; mode = args[1]; } configure(propertiesFile); final KinesisSpoutConfig config = new KinesisSpoutConfig(streamName, zookeeperEndpoint).withZookeeperPrefix(zookeeperPrefix) .withInitialPositionInStream(initialPositionInStream) .withRegion(Regions.fromName(regionName)); final KinesisSpout spout = new KinesisSpout(config, new CustomCredentialsProviderChain(), new ClientConfiguration()); TopologyBuilder builder = new TopologyBuilder(); LOG.info("Using Kinesis stream: " + config.getStreamName()); // Using number of shards as the parallelism hint for the spout. builder.setSpout("Kinesis", spout, 2); builder.setBolt("Parse", new ParseReferrerBolt(), 6).shuffleGrouping("Kinesis"); builder.setBolt("Count", new RollingCountBolt(5, 2,elasticCacheRedisEndpoint), 6).fieldsGrouping("Parse", new Fields("referrer")); //builder.setBolt("Count", new CountReferrerBolt(), 12).fieldsGrouping("Parse", new Fields("referrer")); Config topoConf = new Config(); topoConf.setFallBackOnJavaSerialization(true); topoConf.setDebug(false); if (mode.equals("LocalMode")) { LOG.info("Starting sample storm topology in LocalMode ..."); new LocalCluster().submitTopology("test_spout", topoConf, builder.createTopology()); } else if (mode.equals("RemoteMode")) { topoConf.setNumWorkers(1); topoConf.setMaxSpoutPending(5000); LOG.info("Submitting sample topology " + topologyName + " to remote cluster."); StormSubmitter.submitTopology(topologyName, topoConf, builder.createTopology()); } else { printUsageAndExit(); } }
Example 13
Source File: AuditActiveLoginsTopology.java From Kafka-Storm-ElasticSearch with Apache License 2.0 | 4 votes |
private static void loadTopologyPropertiesAndSubmit(Properties properties, Config config) throws Exception { String stormExecutionMode = properties.getProperty("storm.execution.mode","local"); int stormWorkersNumber = Integer.parseInt(properties.getProperty("storm.workers.number","2")); int maxTaskParallism = Integer.parseInt(properties.getProperty("storm.max.task.parallelism","2")); String topologyName = properties.getProperty("storm.topology.name","topologyName"); String zookeeperHosts = properties.getProperty("zookeeper.hosts"); int topologyBatchEmitMillis = Integer.parseInt( properties.getProperty("storm.topology.batch.interval.miliseconds","2000")); // How often a batch can be emitted in a Trident topology. config.put(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS, topologyBatchEmitMillis); config.setNumWorkers(stormWorkersNumber); config.setMaxTaskParallelism(maxTaskParallism); AuditActiveLoginsTopology auditActiveLoginsTopology = new AuditActiveLoginsTopology(zookeeperHosts); StormTopology stormTopology = auditActiveLoginsTopology.buildTopology(properties); // Elastic Search specific properties config.put(StormElasticSearchConstants.ES_HOST, properties.getProperty("elasticsearch.host", "localhost")); config.put(StormElasticSearchConstants.ES_PORT, (Integer.parseInt(properties.getProperty("elasticsearch.port", "9300")))); config.put(StormElasticSearchConstants.ES_CLUSTER_NAME, properties.getProperty("elasticsearch.cluster.name")); config.put("elasticsearch.index", properties.getProperty("elasticsearch.index")); config.put("elasticsearch.type", properties.getProperty("elasticsearch.type")); switch (stormExecutionMode){ case ("cluster"): String nimbusHost = properties.getProperty("storm.nimbus.host","localhost"); String nimbusPort = properties.getProperty("storm.nimbus.port","6627"); config.put(Config.NIMBUS_HOST, nimbusHost); config.put(Config.NIMBUS_THRIFT_PORT, Integer.parseInt(nimbusPort)); config.put(Config.STORM_ZOOKEEPER_PORT, parseZkPort(zookeeperHosts)); config.put(Config.STORM_ZOOKEEPER_SERVERS, parseZkHosts(zookeeperHosts)); StormSubmitter.submitTopology(topologyName, config, stormTopology); break; case ("local"): default: int localTimeExecution = Integer.parseInt(properties.getProperty("storm.local.execution.time","20000")); LocalCluster cluster = new LocalCluster(); cluster.submitTopology(topologyName, config, stormTopology); Thread.sleep(localTimeExecution); cluster.killTopology(topologyName); cluster.shutdown(); System.exit(0); } }
Example 14
Source File: Throughput.java From flink-perf with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { ParameterTool pt = ParameterTool.fromArgs(args); int par = pt.getInt("para"); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("source0", new Generator(pt), pt.getInt("sourceParallelism")); int i = 0; for(; i < pt.getInt("repartitions", 1) - 1;i++) { System.out.println("adding source"+i+" --> source"+(i+1)); builder.setBolt("source"+(i+1), new RepartPassThroughBolt(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source" + i, new Fields("id")); } System.out.println("adding final source"+i+" --> sink"); builder.setBolt("sink", new Sink(pt), pt.getInt("sinkParallelism")).fieldsGrouping("source"+i, new Fields("id")); Config conf = new Config(); conf.setDebug(false); //System.exit(1); if(pt.has("ft") || pt.has("maxPending")) { conf.setMaxSpoutPending(pt.getInt("maxPending", 1000)); } if (!pt.has("local")) { conf.setNumWorkers(par); StormSubmitter.submitTopologyWithProgressBar("throughput-"+pt.get("name", "no_name"), conf, builder.createTopology()); } else { conf.setMaxTaskParallelism(par); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("throughput", conf, builder.createTopology()); Thread.sleep(300000); cluster.shutdown(); } }
Example 15
Source File: HdfsTopology.java From storm-kafka-examples with Apache License 2.0 | 4 votes |
public static void main(String[] args) { try{ String zkhost = "wxb-1:2181,wxb-2:2181,wxb-3:2181"; String topic = "order"; String groupId = "id"; int spoutNum = 3; int boltNum = 1; ZkHosts zkHosts = new ZkHosts(zkhost);//kafaka所在的zookeeper SpoutConfig spoutConfig = new SpoutConfig(zkHosts, topic, "/order", groupId); // create /order /id spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme()); KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig); // HDFS bolt // use "|" instead of "," for field delimiter RecordFormat format = new DelimitedRecordFormat() .withFieldDelimiter("|"); // sync the filesystem after every 1k tuples SyncPolicy syncPolicy = new CountSyncPolicy(1000); // rotate files when they reach 5MB FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(5.0f, FileSizeRotationPolicy.Units.MB); // FileRotationPolicy rotationPolicy = new TimedRotationPolicy(1.0f, TimedRotationPolicy.TimeUnit.MINUTES); FileNameFormat fileNameFormat = new DefaultFileNameFormat() .withPath("/tmp/").withPrefix("order_").withExtension(".log"); HdfsBolt hdfsBolt = new HdfsBolt() .withFsUrl("hdfs://wxb-1:8020") .withFileNameFormat(fileNameFormat) .withRecordFormat(format) .withRotationPolicy(rotationPolicy) .withSyncPolicy(syncPolicy); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", kafkaSpout, spoutNum); builder.setBolt("check", new CheckOrderBolt(), boltNum).shuffleGrouping("spout"); builder.setBolt("counter", new CounterBolt(),boltNum).shuffleGrouping("check"); builder.setBolt("hdfs", hdfsBolt,boltNum).shuffleGrouping("counter"); Config config = new Config(); config.setDebug(true); if(args!=null && args.length > 0) { config.setNumWorkers(2); StormSubmitter.submitTopology(args[0], config, builder.createTopology()); } else { config.setMaxTaskParallelism(2); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("Wordcount-Topology", config, builder.createTopology()); Thread.sleep(500000); cluster.shutdown(); } }catch (Exception e) { e.printStackTrace(); } }
Example 16
Source File: WordCountTopologyNode.java From flink-perf with Apache License 2.0 | 3 votes |
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new RandomSentence(), 5); builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout"); builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word")); Config conf = new Config(); conf.setDebug(true); if (args != null && args.length > 0) { conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); } else { conf.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", conf, builder.createTopology()); Thread.sleep(10000); cluster.shutdown(); } }
Example 17
Source File: WordCountTopology.java From flink-perf with Apache License 2.0 | 3 votes |
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("spout", new RandomSentenceSpout(), 5); builder.setBolt("split", new SplitSentence(), 8).shuffleGrouping("spout"); builder.setBolt("count", new WordCount(), 12).fieldsGrouping("split", new Fields("word")); Config conf = new Config(); conf.setDebug(true); if (args != null && args.length > 0) { conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology()); } else { conf.setMaxTaskParallelism(3); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("word-count", conf, builder.createTopology()); Thread.sleep(10000); cluster.shutdown(); } }
Example 18
Source File: TestStormParallelism.java From eagle with Apache License 2.0 | 3 votes |
/** * When run this test, please check the following through jstack and log * 1) for blue-spout, num of executors is 2, # of tasks is 2 * <p> * Expected: * <p> * a. 2 threads uniquely named Thread-*-blue-spout-executor[*,*] * b. each thread will have single task * <p> * 2) for green-bolt, num of executors is 2, # of tasks is 4 * <p> * Expected: * <p> * a. 2 threads uniquely named Thread-*-green-bolt-executor[*,*] * b. each thread will have 2 tasks * <p> * 3) for yellow-bolt, num of executors is 6, # of tasks is 6 * <p> * Expected: * <p> * a. 6 threads uniquely named Thread-*-yellow-bolt-executor[*,*] * b. each thread will have 1 tasks * <p> * <p> * Continue to think: * <p> * For alter engine, if we use multiple tasks per component instead of one task per component, * what will the parallelism mechanism affect? * * @throws Exception */ @Ignore @Test public void testParallelism() throws Exception { Config conf = new Config(); conf.setNumWorkers(2); // use two worker processes TopologyBuilder topologyBuilder = new TopologyBuilder(); topologyBuilder.setSpout("blue-spout", new BlueSpout(), 2); // parallelism hint topologyBuilder.setBolt("green-bolt", new GreenBolt(), 2) .setNumTasks(4) .shuffleGrouping("blue-spout"); topologyBuilder.setBolt("yellow-bolt", new YellowBolt(), 6) .shuffleGrouping("green-bolt"); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("mytopology", new HashMap(), topologyBuilder.createTopology()); while (true) { try { Thread.sleep(1000); } catch (Exception e) { e.printStackTrace(); } } }
Example 19
Source File: ClusterInfoTopology.java From jstorm with Apache License 2.0 | 3 votes |
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); builder.setBolt("ClusterInfo", new ClusterInfoBolt(), 1); Config conf = new Config(); conf.setNumWorkers(1); StormSubmitter.submitTopology("ClusterMonitor", conf, builder.createTopology()); }
Example 20
Source File: ForwardThroughput.java From flink-perf with Apache License 2.0 | 3 votes |
public static void main(String[] args) throws Exception { ParameterTool pt = ParameterTool.fromArgs(args); int par = pt.getInt("para"); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("source0", new Generator(pt), pt.getInt("sourceParallelism")); //builder.setBolt("sink", new Sink(pt), pt.getInt("sinkParallelism")).noneGrouping("source0"); builder.setBolt("sink", new Sink(pt), pt.getInt("sinkParallelism")).localOrShuffleGrouping("source0"); Config conf = new Config(); conf.setDebug(false); conf.setMaxSpoutPending(pt.getInt("maxPending", 1000)); //System.exit(1); if (!pt.has("local")) { conf.setNumWorkers(par); StormSubmitter.submitTopologyWithProgressBar("forward-throughput-"+pt.get("name", "no_name"), conf, builder.createTopology()); } else { conf.setMaxTaskParallelism(par); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("forward-throughput", conf, builder.createTopology()); Thread.sleep(300000); cluster.shutdown(); } }