Java Code Examples for backtype.storm.LocalCluster

The following are top voted examples for showing how to use backtype.storm.LocalCluster. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: LearnStorm   File: ReachTopology.java   View source code 7 votes vote down vote up
public static void main(String[] args) throws Exception {
  LinearDRPCTopologyBuilder builder = construct();


  Config conf = new Config();

  if (args == null || args.length == 0) {
    conf.setMaxTaskParallelism(3);
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("reach-drpc", conf, builder.createLocalTopology(drpc));

    String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" };
    for (String url : urlsToTry) {
      System.out.println("Reach of " + url + ": " + drpc.execute("reach", url));
    }

    cluster.shutdown();
    drpc.shutdown();
  }
  else {
    conf.setNumWorkers(6);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
  }
}
 
Example 2
Project: storm-hbase-1.0.x   File: WordCountTrident.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config conf = new Config();
    conf.setMaxSpoutPending(5);
    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("wordCounter", conf, buildTopology(args[0]));
        Thread.sleep(60 * 1000);
        cluster.killTopology("wordCounter");
        cluster.shutdown();
        System.exit(0);
    }
    else if(args.length == 2) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[1], conf, buildTopology(args[0]));
    } else{
        System.out.println("Usage: TridentFileTopology <hdfs url> [topology name]");
    }
}
 
Example 3
Project: splice-community-sample-code   File: MySqlToSpliceTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws SQLException {

        // tableName is the name of the table in splice to insert records to
        // server is the server instance running splice
        String tableName = "students";
        String server = "localhost";
        TopologyBuilder builder = new TopologyBuilder();

        // set the spout for the topology
        builder.setSpout("seedDataFromMySql", new MySqlSpout());

        // dump the stream data into splice       
        builder.setBolt("dbRowProcessing", new MySqlSpliceBolt(server, tableName), 1).shuffleGrouping("seedDataFromMySql");

        Config conf = new Config();
        conf.setDebug(true);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("mysql-splice-topology", conf, builder.createTopology());
        Utils.sleep(3000);
        cluster.shutdown();
    }
 
Example 4
Project: hadooparchitecturebook   File: MovingAvgLocalTopologyRunner.java   View source code 6 votes vote down vote up
public static void main(String[] args) {

    Config config = new Config();
    config.setDebug(true);

    StormTopology topology = buildTopology();
    // Un-comment to run locally:
    LocalCluster localCluster = new LocalCluster();
    localCluster.submitTopology("local-moving-avg", config, topology);
    // Un-comment to run as part of a Storm cluster:
    // try {
    //   StormSubmitter.submitTopology("cluster-moving-average",
    //  				    config,
    // 				    topology);
    // } catch(AlreadyAliveException e) {
    //   e.printStackTrace();
    // } catch(InvalidTopologyException e) {
    //   e.printStackTrace();
    //}
  }
 
Example 5
Project: jstorm-0.9.6.3-   File: TestTopology.java   View source code 6 votes vote down vote up
private static void submitTopology(TopologyBuilder builder) {
	try {
		if (local_mode(conf)) {

			LocalCluster cluster = new LocalCluster();

			cluster.submitTopology(
					String.valueOf(conf.get("topology.name")), conf,
					builder.createTopology());

			Thread.sleep(200000);

			cluster.shutdown();
		} else {
			StormSubmitter.submitTopology(
					String.valueOf(conf.get("topology.name")), conf,
					builder.createTopology());
		}

	} catch (Exception e) {
		LOG.error(e.getMessage(), e.getCause());
	}
}
 
Example 6
Project: web-crawler   File: WebCrawlerTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {

        if(args.length < 1){
            System.err.println("[ERROR] Configuration File Required");
        }
        Config conf = new Config();

        // Store all the configuration in the Storm conf object
        conf.putAll(readConfigFile(args[0]));

        //Second arg should be local in order to run locally
        if(args.length  < 2 || (args.length  == 2 && !args[1].equals("local"))) {
            StormSubmitter.submitTopologyWithProgressBar("crawler_topology", conf, buildTopology(conf, null));
        }
        else {
            LocalDRPC drpc = new LocalDRPC();
            LocalCluster localcluster = new LocalCluster();
            localcluster.submitTopology("crawler_topology",conf,buildTopology(conf, drpc));

            String searchQuery = "elasticsearch";
            System.out.println("---* Result (search): " + drpc.execute("search",  searchQuery));
        }
    }
 
Example 7
Project: learn_jstorm   File: TestTopology.java   View source code 6 votes vote down vote up
private static void submitTopology(TopologyBuilder builder) {
	try {
		if (local_mode(conf)) {

			LocalCluster cluster = new LocalCluster();

			cluster.submitTopology(
					String.valueOf(conf.get("topology.name")), conf,
					builder.createTopology());

			Thread.sleep(200000);

			cluster.shutdown();
		} else {
			StormSubmitter.submitTopology(
					String.valueOf(conf.get("topology.name")), conf,
					builder.createTopology());
		}

	} catch (Exception e) {
		LOG.error(e.getMessage(), e.getCause());
	}
}
 
Example 8
Project: StreamBench   File: IterativeTest.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws WorkloadException {
    TopologyBuilder builder = new TopologyBuilder();


    builder.setSpout("spout", new NumberSpout());
    builder.setBolt("minusone", new MinusOne())
            .shuffleGrouping("spout")
            .shuffleGrouping("DoNothing", "GreaterThanZero");


    builder.setBolt("DoNothing", new Filter())
            .shuffleGrouping("minusone");

    Config conf = new Config();
    conf.setDebug(true);
    conf.setNumWorkers(3);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
 
Example 9
Project: StreamBench   File: KMeansTest.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws WorkloadException {
    TopologyBuilder builder = new TopologyBuilder();


    builder.setSpout("points", new PointSpout());
    builder.setBolt("assign", new Assign())
            .shuffleGrouping("points")
            .allGrouping("aggregator", "centroids");

    builder.setBolt("aggregator", new Aggregator())
            .fieldsGrouping("assign", new Fields("centroid_index"));

    Config conf = new Config();
    conf.setDebug(true);
    conf.setNumWorkers(3);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
 
Example 10
Project: StreamBench   File: TickTest.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws WorkloadException {
    TopologyBuilder builder = new TopologyBuilder();
    BrokerHosts hosts = new ZkHosts("localhost:2181");
    SpoutConfig spoutConfig = new SpoutConfig(hosts, "WordCount", "/" + "WordCount", UUID.randomUUID().toString());
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    spoutConfig.ignoreZkOffsets = true;

    builder.setSpout("spout", new KafkaSpout(spoutConfig));
    builder.setBolt("split", new SplitSentence()).shuffleGrouping("spout");
    builder.setBolt("counter", new CounterBolt(), 3).shuffleGrouping("split");
    builder.setBolt("aggregator", new AggregatorBolt(), 1)
            .fieldsGrouping("counter", Utils.DEFAULT_STREAM_ID, new Fields("word"))
            .allGrouping("counter", "tick");

    Config conf = new Config();
    conf.setDebug(true);
    conf.setNumWorkers(3);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
 
Example 11
Project: StreamBench   File: AppTest.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws WorkloadException {
    TopologyBuilder builder = new TopologyBuilder();
    BrokerHosts hosts = new ZkHosts("localhost:2181");
    SpoutConfig spoutConfig = new SpoutConfig(hosts, "WordCount", "/" + "WordCount", UUID.randomUUID().toString());
    spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
    spoutConfig.ignoreZkOffsets = true;

    builder.setSpout("spout", new KafkaSpout(spoutConfig));
    builder.setBolt("split", new SplitSentence()).shuffleGrouping("spout");
    builder.setBolt("counter", new CounterBolt(), 3).fieldsGrouping("split", new Fields("wordCountPair"));

    Config conf = new Config();
    conf.setDebug(true);
    conf.setNumWorkers(3);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("kafka-spout", conf, builder.createTopology());
}
 
Example 12
Project: LogRTA   File: KafkaTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new KafkaSpoutTest(""), 1);
    builder.setBolt("bolt1", new Bolt1(), 2).shuffleGrouping("spout");
    builder.setBolt("bolt2", new Bolt2(), 2).fieldsGrouping("bolt1",new Fields("word"));
 
    Map conf = new HashMap();
    conf.put(Config.TOPOLOGY_WORKERS, 1);
    conf.put(Config.TOPOLOGY_DEBUG, true);
 
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("flume-kafka-storm-integration", conf, builder.createTopology());
     
    Utils.sleep(1000*60*5);
    cluster.shutdown();
}
 
Example 13
Project: spiderz   File: WikiCrawlerTopology.java   View source code 6 votes vote down vote up
public static void main(String args[]) throws Exception {
	
	// check validity of command line arguments
	if(args.length != 2) {
		System.out.println("Command line arguments missing\n");
		System.out.println("Pass redis IP and port\n");
		return;
	}
	
	// configure the topology
	Config conf = new Config();
	conf.setDebug(false);
	conf.setNumWorkers(numWorkers);

	LocalCluster cluster = new LocalCluster();
	StormTopology topology = buildTopology(args[0], args[1]);
	cluster.submitTopology("crawler", conf, topology);

	System.out.println("\n>>>> TOPOLOGY - STATUS OK\n");
}
 
Example 14
Project: learningJava   File: TopologyMain.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws InterruptedException {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("word-reader", new WordReader());
    builder.setBolt("word-normalizer", new WordNormalizer()).shuffleGrouping("word-reader");
    builder.setBolt("word-counter", new WordCounter(), 2).fieldsGrouping("word-normalizer", new Fields("word"));

    Config conf = new Config();
    conf.setMaxTaskParallelism(3);
    conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 3);
    conf.setDebug(false);

    //Topology run
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("storm-wordcount", conf, builder.createTopology());
    Thread.sleep(30000);
    cluster.shutdown();
}
 
Example 15
Project: alfresco-apache-storm-demo   File: ConfigurableTopology.java   View source code 6 votes vote down vote up
protected int submit(String name, Config conf, TopologyBuilder builder) {

        // register Metadata for serialization with FieldsSerializer
        Config.registerSerialization(conf, Metadata.class);

        if (isLocal) {
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology(name, conf, builder.createTopology());
            if (ttl != -1) {
                Utils.sleep(ttl * 1000);
                cluster.shutdown();
            }
        }

        else {
            try {
                StormSubmitter.submitTopology(name, conf,
                        builder.createTopology());
            } catch (Exception e) {
                e.printStackTrace();
                return -1;
            }
        }
        return 0;
    }
 
Example 16
Project: LogRTA   File: SimpleJoinTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) {
    NginxSplitBolt nginxBolt = new NginxSplitBolt();
    ServiceLogBolt serviceBolt = new ServiceLogBolt();

    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("nginx", new KafkaSpoutTest("log.accesslog"), 1);
    builder.setSpout("service", new KafkaSpoutTest("log.servicelog"), 1);

    builder.setBolt("nginxlog", nginxBolt).shuffleGrouping("nginx");
    builder.setBolt("servicelog", serviceBolt).shuffleGrouping("service");

    builder.setBolt("join", new SingleJoinBolt(new Fields("method", "time", "usetime", "params")))
            .fieldsGrouping("nginxlog", new Fields("ip", "utime"))
            .fieldsGrouping("servicelog", new Fields("ip", "utime"));

    Config conf = new Config();
    conf.setDebug(true);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("log - join", conf, builder.createTopology());

    Utils.sleep(2000);
    cluster.shutdown();
}
 
Example 17
Project: big-data-system   File: TridentWordCount.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  Config conf = new Config();
  conf.setMaxSpoutPending(20);
  if (args.length == 0) {
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("wordCounter", conf, buildTopology(drpc));
    for (int i = 0; i < 100; i++) {
      System.out.println("DRPC RESULT: " + drpc.execute("words", "cat the dog jumped"));
      Thread.sleep(1000);
    }
  }
  else {
    conf.setNumWorkers(3);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
  }
}
 
Example 18
Project: big-data-system   File: TransactionalWords.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
  TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("top-n-words", "spout", spout, 2);
  builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping("spout", new Fields("word"));
  builder.setBolt("bucketize", new Bucketize()).noneGrouping("count");
  builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping("bucketize", new Fields("bucket"));


  LocalCluster cluster = new LocalCluster();

  Config config = new Config();
  config.setDebug(true);
  config.setMaxSpoutPending(3);

  cluster.submitTopology("top-n-topology", config, builder.buildTopology());

  Thread.sleep(3000);
  cluster.shutdown();
}
 
Example 19
Project: big-data-system   File: ManualDRPC.java   View source code 6 votes vote down vote up
public static void main(String[] args) {
  TopologyBuilder builder = new TopologyBuilder();
  LocalDRPC drpc = new LocalDRPC();

  DRPCSpout spout = new DRPCSpout("exclamation", drpc);
  builder.setSpout("drpc", spout);
  builder.setBolt("exclaim", new ExclamationBolt(), 3).shuffleGrouping("drpc");
  builder.setBolt("return", new ReturnResults(), 3).shuffleGrouping("exclaim");

  LocalCluster cluster = new LocalCluster();
  Config conf = new Config();
  cluster.submitTopology("exclaim", conf, builder.createTopology());

  System.out.println(drpc.execute("exclamation", "aaa"));
  System.out.println(drpc.execute("exclamation", "bbb"));

}
 
Example 20
Project: big-data-system   File: ReachTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  LinearDRPCTopologyBuilder builder = construct();


  Config conf = new Config();

  if (args == null || args.length == 0) {
    conf.setMaxTaskParallelism(3);
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("reach-drpc", conf, builder.createLocalTopology(drpc));

    String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" };
    for (String url : urlsToTry) {
      System.out.println("Reach of " + url + ": " + drpc.execute("reach", url));
    }

    cluster.shutdown();
    drpc.shutdown();
  }
  else {
    conf.setNumWorkers(6);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
  }
}
 
Example 21
Project: big-data-system   File: ExclamationTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("word", new TestWordSpout(), 10);
  builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
  builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");

  Config conf = new Config();
  conf.setDebug(true);

  if (args != null && args.length > 0) {
    conf.setNumWorkers(3);

    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  }
  else {

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.killTopology("test");
    cluster.shutdown();
  }
}
 
Example 22
Project: big-data-system   File: CopyOfPrintSampleStream.java   View source code 6 votes vote down vote up
public static void main(String[] args) {
    String consumerKey = args[0]; 
    String consumerSecret = args[1]; 
    String accessToken = args[2]; 
    String accessTokenSecret = args[3];
    String[] arguments = args.clone();
    String[] keyWords = Arrays.copyOfRange(arguments, 4, arguments.length);
    
    TopologyBuilder builder = new TopologyBuilder();
    
    builder.setSpout("twitter", new TwitterSampleSpout(consumerKey, consumerSecret,
                            accessToken, accessTokenSecret, keyWords));
    builder.setBolt("print", new PrinterBolt())
            .shuffleGrouping("twitter");
            
            
    Config conf = new Config();
    
    
    LocalCluster cluster = new LocalCluster();
    
    cluster.submitTopology("test", conf, builder.createTopology());
    
    Utils.sleep(10000);
    cluster.shutdown();
}
 
Example 23
Project: big-data-system   File: BasicDRPCTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("exclamation");
  builder.addBolt(new ExclaimBolt(), 3);

  Config conf = new Config();

  if (args == null || args.length == 0) {
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();

    cluster.submitTopology("drpc-demo", conf, builder.createLocalTopology(drpc));

    for (String word : new String[]{ "hello", "goodbye" }) {
      System.out.println("Result for \"" + word + "\": " + drpc.execute("exclamation", word));
    }

    cluster.shutdown();
    drpc.shutdown();
  }
  else {
    conf.setNumWorkers(3);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
  }
}
 
Example 24
Project: jstrom   File: TestTopology.java   View source code 6 votes vote down vote up
private static void submitTopology(TopologyBuilder builder) {
	try {
		if (local_mode(conf)) {

			LocalCluster cluster = new LocalCluster();

			cluster.submitTopology(
					String.valueOf(conf.get("topology.name")), conf,
					builder.createTopology());

			Thread.sleep(200000);

			cluster.shutdown();
		} else {
			StormSubmitter.submitTopology(
					String.valueOf(conf.get("topology.name")), conf,
					builder.createTopology());
		}

	} catch (Exception e) {
		LOG.error(e.getMessage(), e.getCause());
	}
}
 
Example 25
Project: jstrom   File: TestTopology.java   View source code 6 votes vote down vote up
private static void submitTopology(TopologyBuilder builder) {
	try {
		if (local_mode(conf)) {

			LocalCluster cluster = new LocalCluster();

			cluster.submitTopology(String.valueOf(conf.get("topology.name")), conf, builder.createTopology());

			Thread.sleep(200000);

			cluster.shutdown();
		} else {
			StormSubmitter.submitTopology(String.valueOf(conf.get("topology.name")), conf,
					builder.createTopology());
		}

	} catch (Exception e) {
		e.printStackTrace();
	}
}
 
Example 26
Project: jstrom   File: TransactionalWordsTest.java   View source code 6 votes vote down vote up
@Test
public void test_transaction_word() {
    try {
        MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
        TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("top-n-words", "spout", spout, 2);
        builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping("spout", new Fields("word"));
        builder.setBolt("bucketize", new Bucketize()).shuffleGrouping("count");
        builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping("bucketize", new Fields("bucket"));

        LocalCluster cluster = new LocalCluster();

        Config config = new Config();
        config.setDebug(true);
        config.setMaxSpoutPending(3);

        cluster.submitTopology("top-n-topology", config, builder.buildTopology());

        JStormUtils.sleepMs(60 * 1000);
        cluster.shutdown();
    } catch (Exception e) {
        Assert.fail("Failed to run simple transaction");
    }

}
 
Example 27
Project: Infrastructure   File: AbstractTopology.java   View source code 6 votes vote down vote up
/**
 * Creates a standalone topology.
 * 
 * @param args the topology arguments
 * @param topo the topology instance
 * @throws Exception in case of creation problems
 */
public static void main(String[] args, AbstractTopology topo) throws Exception {
    Config config = new Config();
    config.setMessageTimeoutSecs(100);
    PipelineOptions options = new PipelineOptions(args);
    RecordingTopologyBuilder b = new RecordingTopologyBuilder(options);
    topo.createTopology(config, b);
    
    // main topology: int numWorkers = options.getNumberOfWorkers(2);
    options.toConf(config);
    
    if (args != null && args.length > 0) {
        config.setNumWorkers(2);
        StormSubmitter.submitTopology(args[0], config, b.createTopology());
    } else {
        config.setMaxTaskParallelism(2);
        final LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("testGenPip", config, b.createTopology());
    }
}
 
Example 28
Project: Infrastructure   File: Topology.java   View source code 6 votes vote down vote up
/**
 * Creates a standalone topology.
 * 
 * @param args the topology arguments
 * @throws Exception in case of creation problems
 */
public static void main(String[] args) throws Exception {
    Config config = new Config();
    config.setMessageTimeoutSecs(100);
    PipelineOptions options = new PipelineOptions(args);
    RecordingTopologyBuilder b = new RecordingTopologyBuilder(options);
    createTopology(b);
    b.close(args[0], config);
    
    // main topology: int numWorkers = options.getNumberOfWorkers(2);
    options.toConf(config);
    
    if (args != null && args.length > 0) {
        config.setNumWorkers(2);
        StormSubmitter.submitTopology(args[0], config, b.createTopology());
    } else {
        config.setMaxTaskParallelism(2);
        final LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(Naming.PIPELINE_NAME, config, b.createTopology());
    }
}
 
Example 29
Project: Infrastructure   File: TestTopology.java   View source code 6 votes vote down vote up
/**
 * Creates a standalone topology.
 * 
 * @param args the topology arguments
 * @throws Exception in case of creation problems
 */
public static void main(String[] args) throws Exception {
    Config config = new Config();
    Naming.setDefaultInitializeAlgorithms(config, defaultInitAlgorithms);
    config.setMessageTimeoutSecs(100);
    PipelineOptions options = new PipelineOptions(args);
    RecordingTopologyBuilder b = new RecordingTopologyBuilder(options);
    createTopology(b);
    b.close(args[0], config);
    
    // main topology: int numWorkers = options.getNumberOfWorkers(2);
    options.toConf(config);
    
    if (args != null && args.length > 0) {
        config.setNumWorkers(2);
        StormSubmitter.submitTopology(args[0], config, b.createTopology());
    } else {
        config.setMaxTaskParallelism(2);
        final LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(PIP_NAME, config, b.createTopology());
    }
}
 
Example 30
Project: Infrastructure   File: TestTopology.java   View source code 6 votes vote down vote up
/**
 * Main method.
 * @param args the arguments
 */
public static void main(String[] args) {
  //create the main topology.
    options = new PipelineOptions(args);
    MainTopologyCreator topoCreator = new MainTopologyCreator();
    TopologyOutput topo = topoCreator.createMainTopology();
    //get the topology information
    config = topo.getConfig();
    TopologyBuilder builder = topo.getBuilder();
    int defNumWorkers = topo.getNumWorkers();
    options.toConf(config);
    
    
    if (args != null && args.length > 0) {
        config.setNumWorkers(defNumWorkers);
        try {
            StormSubmitter.submitTopology(args[0], config, builder.createTopology());
        } catch (AlreadyAliveException | InvalidTopologyException e) {
            e.printStackTrace();
        }
    } else {
        final LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
    }
}
 
Example 31
Project: storm-lib   File: FunctionTopology.java   View source code 6 votes vote down vote up
void run(String[] args) {
TopologyBuilder builder = new TopologyBuilder();

builder.setSpout("word_spout", new TestWordSpout(), 2);
builder.setBolt("tuple_double", new Function(new Double(), new Fields("word1", "word2")), 2).shuffleGrouping("word_spout");
	
Map conf = new HashMap();
conf.put(Config.TOPOLOGY_WORKERS, 4);
conf.put(Config.TOPOLOGY_DEBUG, true);
	
if(args.length==0) {
           LocalCluster cluster = new LocalCluster();
           cluster.submitTopology("MockIngest", conf, builder.createTopology());
       } else {
    try {
	StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } catch(Exception e) {
	e.printStackTrace();
    }
}
   }
 
Example 32
Project: LogRTA   File: ExclamationTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("word", new TestWordSpout(), 10);
    builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
    builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");

    Config conf = new Config();
    conf.setDebug(true);

    if (args != null && args.length > 0) {
        conf.setNumWorkers(3);

        StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
    }
    else {

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("www_nginx_accesslog_stat", conf, builder.createTopology());
        Utils.sleep(10000);
        cluster.killTopology("www_nginx_accesslog_stat");
        cluster.shutdown();
    }
}
 
Example 33
Project: storm-lib   File: BatchMutationTopology.java   View source code 6 votes vote down vote up
void run(String[] args) {
TopologyBuilder builder = new TopologyBuilder();

KVSchema names = new KVSchema("names", "word");
names.colFam.addStatic("NAME");
names.colQual.add("word").addStatic("\u0000").add("word");

builder.setSpout("word_spout", new TestWordSpout(), 2);
builder.setBolt("mutate", new BatchMutation(names, true), 2).shuffleGrouping("word_spout");
	
Map conf = new HashMap();
conf.put(Config.TOPOLOGY_WORKERS, 4);
conf.put(Config.TOPOLOGY_DEBUG, true);
	
if(args.length==0) {
           LocalCluster cluster = new LocalCluster();
           cluster.submitTopology("MockIngest", conf, builder.createTopology());
       } else {
    try {
	StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } catch(Exception e) {
	e.printStackTrace();
    }
}
   }
 
Example 34
Project: cdh-storm   File: TransactionalGlobalCount.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
  TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3);
  builder.setBolt("partial-count", new BatchCount(), 5).noneGrouping("spout");
  builder.setBolt("sum", new UpdateGlobalCount()).globalGrouping("partial-count");

  LocalCluster cluster = new LocalCluster();

  Config config = new Config();
  config.setDebug(true);
  config.setMaxSpoutPending(3);

  cluster.submitTopology("global-count-topology", config, builder.buildTopology());

  Thread.sleep(3000);
  cluster.shutdown();
}
 
Example 35
Project: cdh-storm   File: TridentWordCount.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  Config conf = new Config();
  conf.setMaxSpoutPending(20);
  if (args.length == 0) {
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("wordCounter", conf, buildTopology(drpc));
    for (int i = 0; i < 100; i++) {
      System.out.println("DRPC RESULT: " + drpc.execute("words", "cat the dog jumped"));
      Thread.sleep(1000);
    }
  }
  else {
    conf.setNumWorkers(3);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, buildTopology(null));
  }
}
 
Example 36
Project: cdh-storm   File: TransactionalWords.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
  TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("top-n-words", "spout", spout, 2);
  builder.setBolt("count", new KeyedCountUpdater(), 5).fieldsGrouping("spout", new Fields("word"));
  builder.setBolt("bucketize", new Bucketize()).noneGrouping("count");
  builder.setBolt("buckets", new BucketCountUpdater(), 5).fieldsGrouping("bucketize", new Fields("bucket"));


  LocalCluster cluster = new LocalCluster();

  Config config = new Config();
  config.setDebug(true);
  config.setMaxSpoutPending(3);

  cluster.submitTopology("top-n-topology", config, builder.buildTopology());

  Thread.sleep(3000);
  cluster.shutdown();
}
 
Example 37
Project: cdh-storm   File: ManualDRPC.java   View source code 6 votes vote down vote up
public static void main(String[] args) {
  TopologyBuilder builder = new TopologyBuilder();
  LocalDRPC drpc = new LocalDRPC();

  DRPCSpout spout = new DRPCSpout("exclamation", drpc);
  builder.setSpout("drpc", spout);
  builder.setBolt("exclaim", new ExclamationBolt(), 3).shuffleGrouping("drpc");
  builder.setBolt("return", new ReturnResults(), 3).shuffleGrouping("exclaim");

  LocalCluster cluster = new LocalCluster();
  Config conf = new Config();
  cluster.submitTopology("exclaim", conf, builder.createTopology());

  System.out.println(drpc.execute("exclamation", "aaa"));
  System.out.println(drpc.execute("exclamation", "bbb"));

}
 
Example 38
Project: cdh-storm   File: ReachTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  LinearDRPCTopologyBuilder builder = construct();


  Config conf = new Config();

  if (args == null || args.length == 0) {
    conf.setMaxTaskParallelism(3);
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("reach-drpc", conf, builder.createLocalTopology(drpc));

    String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" };
    for (String url : urlsToTry) {
      System.out.println("Reach of " + url + ": " + drpc.execute("reach", url));
    }

    cluster.shutdown();
    drpc.shutdown();
  }
  else {
    conf.setNumWorkers(6);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
  }
}
 
Example 39
Project: cdh-storm   File: PrintSampleStream.java   View source code 6 votes vote down vote up
public static void main(String[] args) {
    String consumerKey = args[0]; 
    String consumerSecret = args[1]; 
    String accessToken = args[2]; 
    String accessTokenSecret = args[3];
    String[] arguments = args.clone();
    String[] keyWords = Arrays.copyOfRange(arguments, 4, arguments.length);
    
    TopologyBuilder builder = new TopologyBuilder();
    
    builder.setSpout("spoutId", new TwitterSampleSpout(consumerKey, consumerSecret,
                            accessToken, accessTokenSecret, keyWords));
    builder.setBolt("print", new PrinterBolt())
            .shuffleGrouping("spout");
            
            
    Config conf = new Config();
    
    
    LocalCluster cluster = new LocalCluster();
    
    cluster.submitTopology("test", conf, builder.createTopology());
    
    Utils.sleep(10000);
    cluster.shutdown();
}
 
Example 40
Project: cdh-storm   File: ExclamationTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("word", new TestWordSpout(), 10);
  builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
  builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");

  Config conf = new Config();
  conf.setDebug(true);

  if (args != null && args.length > 0) {
    conf.setNumWorkers(3);

    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  }
  else {

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.killTopology("test");
    cluster.shutdown();
  }
}