Java Code Examples for backtype.storm.generated.StormTopology

The following examples show how to use backtype.storm.generated.StormTopology. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: jstorm   Source File: ServiceHandler.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * get StormTopology throw deserialize local files
 *
 * @param id String: topology id
 * @return StormTopology
 */
@Override
public StormTopology getTopology(String id) throws TException {
    StormTopology topology;
    try {
        StormTopology stormtopology = StormConfig.read_nimbus_topology_code(id, data.getBlobStore());
        if (stormtopology == null) {
            throw new NotAliveException("No topology of " + id);
        }

        Map<Object, Object> topologyConf = (Map<Object, Object>) StormConfig.read_nimbus_topology_conf(id, data.getBlobStore());
        topology = Common.system_topology(topologyConf, stormtopology);
    } catch (Exception e) {
        LOG.error("Failed to get topology " + id + ",", e);
        throw new TException("Failed to get system_topology");
    }
    return topology;
}
 
Example 2
Source Project: storm-solr   Source File: EventsimTopology.java    License: Apache License 2.0 6 votes vote down vote up
public StormTopology build(StreamingApp app) throws Exception {
  SpringSpout eventsimSpout = new SpringSpout("eventsimSpout", spoutFields);
  SpringBolt collectionPerTimeFrameSolrBolt = new SpringBolt("collectionPerTimeFrameSolrBoltAction",
      app.tickRate("collectionPerTimeFrameSolrBoltAction"));

  // Send all docs for the same hash range to the same bolt instance,
  // which allows us to use a streaming approach to send docs to the leader
  int numShards = Integer.parseInt(String.valueOf(app.getStormConfig().get("spring.eventsimNumShards")));
  HashRangeGrouping hashRangeGrouping = new HashRangeGrouping(app.getStormConfig(), numShards);
  int tasksPerShard = hashRangeGrouping.getNumShards()*2;

  TopologyBuilder builder = new TopologyBuilder();
  builder.setSpout("eventsimSpout", eventsimSpout, app.parallelism("eventsimSpout"));
  builder.setBolt("collectionPerTimeFrameSolrBolt", collectionPerTimeFrameSolrBolt, tasksPerShard)
         .customGrouping("eventsimSpout", hashRangeGrouping);

  return builder.createTopology();
}
 
Example 3
Source Project: flink-perf   Source File: TridentWordCount.java    License: Apache License 2.0 6 votes vote down vote up
public static StormTopology buildTopology(LocalDRPC drpc) {
  FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence"), 3, new Values("the cow jumped over the moon"),
      new Values("the man went to the store and bought some candy"), new Values("four score and seven years ago"),
      new Values("how many apples can you eat"), new Values("to be or not to be the person"));
  spout.setCycle(true);

  TridentTopology topology = new TridentTopology();
  TridentState wordCounts = topology.newStream("spout1", spout).parallelismHint(16).each(new Fields("sentence"),
      new Split(), new Fields("word")).groupBy(new Fields("word")).persistentAggregate(new MemoryMapState.Factory(),
      new Count(), new Fields("count")).parallelismHint(16);

  topology.newDRPCStream("words", drpc).each(new Fields("args"), new Split(), new Fields("word")).groupBy(new Fields(
      "word")).stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count")).each(new Fields("count"),
      new FilterNull()).aggregate(new Fields("count"), new Sum(), new Fields("sum"));
  return topology.build();
}
 
Example 4
Source Project: trident-tutorial   Source File: TopHashtagByCountry.java    License: Apache License 2.0 6 votes vote down vote up
public static StormTopology buildTopology(TransactionalTridentKafkaSpout spout) throws IOException {

        TridentTopology topology = new TridentTopology();
        TridentState count =
        topology
                .newStream("tweets", spout)
                .each(new Fields("str"), new ParseTweet(), new Fields("status", "content", "user"))
                .project(new Fields("content", "user", "status"))
                .each(new Fields("content"), new OnlyHashtags())
                .each(new Fields("status"), new OnlyGeo())
                .each(new Fields("status", "content"), new ExtractLocation(), new Fields("country", "contentName"))
                .groupBy(new Fields("country", "contentName"))
                .persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
        ;


        topology
                .newDRPCStream("location_hashtag_count")
                .stateQuery(count, new TupleCollectionGet(), new Fields("country", "contentName"))
                .stateQuery(count, new Fields("country", "contentName"), new MapGet(), new Fields("count"))
                .groupBy(new Fields("country"))
                .aggregate(new Fields("contentName", "count"), new FirstN.FirstNSortedAgg(3,"count", true), new Fields("contentName", "count"))
        ;

        return topology.build();
    }
 
Example 5
public static void main(String[] args) {

    Config config = new Config();
    config.setDebug(true);

    StormTopology topology = buildTopology();
    // Un-comment to run locally:
    LocalCluster localCluster = new LocalCluster();
    localCluster.submitTopology("local-moving-avg", config, topology);
    // Un-comment to run as part of a Storm cluster:
    // try {
    //   StormSubmitter.submitTopology("cluster-moving-average",
    //  				    config,
    // 				    topology);
    // } catch(AlreadyAliveException e) {
    //   e.printStackTrace();
    // } catch(InvalidTopologyException e) {
    //   e.printStackTrace();
    //}
  }
 
Example 6
Source Project: storm-benchmark   Source File: RollingSort.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public StormTopology getTopology(Config config) {
  final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
  final int boltNum =  BenchmarkUtils.getInt(config, SORT_BOLT_NUM, DEFAULT_SORT_BOLT_NUM);
  final int msgSize = BenchmarkUtils.getInt(config, RandomMessageSpout.MESSAGE_SIZE,
          RandomMessageSpout.DEFAULT_MESSAGE_SIZE);
  final int chunkSize = BenchmarkUtils.getInt(config, SortBolt.CHUNK_SIZE,
          SortBolt.DEFAULT_CHUNK_SIZE);
  final int emitFreq = BenchmarkUtils.getInt(config, SortBolt.EMIT_FREQ,
          SortBolt.DEFAULT_EMIT_FREQ);
  spout = new RandomMessageSpout(msgSize, BenchmarkUtils.ifAckEnabled(config));
  TopologyBuilder builder = new TopologyBuilder();
  builder.setSpout(SPOUT_ID, spout, spoutNum);
  builder.setBolt(SORT_BOLT_ID, new SortBolt(emitFreq, chunkSize), boltNum)
          .localOrShuffleGrouping(SPOUT_ID);
  return builder.createTopology();
}
 
Example 7
Source Project: jstorm   Source File: TopologyContext.java    License: Apache License 2.0 6 votes vote down vote up
public TopologyContext(StormTopology topology, Map stormConf, Map<Integer, String> taskToComponent,
                       Map<String, List<Integer>> componentToSortedTasks,
                       Map<String, Map<String, Fields>> componentToStreamToFields,
                       String stormId, String codeDir, String workerId, Integer taskId, Integer workerPort,
                       List<Integer> workerTasks, Map<String, Object> defaultResources,
                       Map<String, Object> userResources, Map<String, Object> executorData,
                       Map registeredMetrics, clojure.lang.Atom openOrPrepareWasCalled, StormClusterState zkCluster) {
    super(topology, stormConf, taskToComponent, componentToSortedTasks, componentToStreamToFields,
            stormId, codeDir, workerId, workerPort, workerTasks,
            defaultResources, userResources);
    _taskId = taskId;
    _executorData = executorData;
    _registeredMetrics = registeredMetrics;
    _openOrPrepareWasCalled = openOrPrepareWasCalled;
    _zkCluster = zkCluster;
}
 
Example 8
Source Project: jstorm   Source File: FluxBuilder.java    License: Apache License 2.0 6 votes vote down vote up
private static StormTopology buildExternalTopology(ObjectDef def, ExecutionContext context)
        throws ClassNotFoundException, IllegalAccessException, InstantiationException, NoSuchMethodException,
        InvocationTargetException {

    Object topologySource = buildObject(def, context);

    String methodName = context.getTopologyDef().getTopologySource().getMethodName();
    Method getTopology = findGetTopologyMethod(topologySource, methodName);
    if(getTopology.getParameterTypes()[0].equals(Config.class)){
        Config config = new Config();
        config.putAll(context.getTopologyDef().getConfig());
        return (StormTopology) getTopology.invoke(topologySource, config);
    } else {
        return (StormTopology) getTopology.invoke(topologySource, context.getTopologyDef().getConfig());
    }
}
 
Example 9
Source Project: trident-tutorial   Source File: GlobalTop20Hashtags.java    License: Apache License 2.0 6 votes vote down vote up
public static StormTopology buildTopology(TransactionalTridentKafkaSpout spout) throws IOException {

        TridentTopology topology = new TridentTopology();
        TridentState count =
        topology
                .newStream("tweets", spout)
                .each(new Fields("str"), new ParseTweet(), new Fields("text", "content", "user"))
                .project(new Fields("content", "user"))
                .each(new Fields("content"), new OnlyHashtags())
                .each(new Fields("user"), new OnlyEnglish())
                .each(new Fields("content", "user"), new ExtractFollowerClassAndContentName(), new Fields("followerClass", "contentName"))
                .groupBy(new Fields("followerClass", "contentName"))
                .persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
        ;


        topology
                .newDRPCStream("top_hashtags")
                .stateQuery(count, new TupleCollectionGet(), new Fields("followerClass", "contentName"))
                .stateQuery(count, new Fields("followerClass", "contentName"), new MapGet(), new Fields("count"))
                .aggregate(new Fields("contentName", "count"), new FirstN.FirstNSortedAgg(5,"count", true), new Fields("contentName", "count"))
        ;

        return topology.build();
    }
 
Example 10
Source Project: galaxy-sdk-java   Source File: SimpleEMQTopology.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {

        if (args == null || args.length != 1) {
            System.err.println("Usage: ./bin/storm jar "
                    + "${your_topology-jar-with-dependencies.jar}"
                    + "${package.path.main.class} ${config_file_for_the_topology}");
            System.exit(-1);
        }

        // setup StormTopology
        String topologyConfigFile = args[0];
        Map topologyConfig = ConfigHelper.getTopologyConfig(topologyConfigFile);
        SimpleEMQTopology topology = new SimpleEMQTopology(topologyConfig);
        StormTopology stormTopology = topology.buildTopology();

        SubmitTopologyHelper.submitTopology(stormTopology, topologyConfig);
    }
 
Example 11
Source Project: flux   Source File: TridentTopologySource.java    License: Apache License 2.0 6 votes vote down vote up
public StormTopology getTopology(Config config) {

        this.spout = new FixedBatchSpout(new Fields("sentence"), 20,
                new Values("one two"),
                new Values("two three"),
                new Values("three four"),
                new Values("four five"),
                new Values("five six")
        );


        TridentTopology trident = new TridentTopology();

        trident.newStream("wordcount", spout).name("sentence").parallelismHint(1).shuffle()
                .each(new Fields("sentence"), new Split(), new Fields("word"))
                .parallelismHint(1)
                .groupBy(new Fields("word"))
                .persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
                .parallelismHint(1);
        return trident.build();
    }
 
Example 12
Source Project: flux   Source File: SimpleTopologySource.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public StormTopology getTopology(Map<String, Object> config) {
    TopologyBuilder builder = new TopologyBuilder();

    // spouts
    FluxShellSpout spout = new FluxShellSpout(
            new String[]{"node", "randomsentence.js"},
            new String[]{"word"});
    builder.setSpout("sentence-spout", spout, 1);

    // bolts
    builder.setBolt("log-bolt", new LogInfoBolt(), 1)
            .shuffleGrouping("sentence-spout");

    return builder.createTopology();
}
 
Example 13
Source Project: jstorm   Source File: ContextMaker.java    License: Apache License 2.0 6 votes vote down vote up
public TopologyContext makeTopologyContext(StormTopology topology, Integer taskId,
                                           clojure.lang.Atom openOrPrepareWasCalled) {
    Map stormConf = new HashMap();
    stormConf.putAll(workerData.getStormConf());
    String topologyId = workerData.getTopologyId();

    HashMap<String, Map<String, Fields>> componentToStreamToFields =
            workerData.generateComponentToStreamToFields(topology);

    return new TopologyContext(topology, stormConf, workerData.getTasksToComponent(),
            workerData.getComponentToSortedTasks(), componentToStreamToFields,
            topologyId, resourcePath, workerId, taskId, workerData.getPort(), workerTasks,
            workerData.getDefaultResources(), workerData.getUserResources(),
            workerData.getExecutorData(), workerData.getRegisteredMetrics(),
            openOrPrepareWasCalled, workerData.getZkCluster());
}
 
Example 14
Source Project: storm-benchmark   Source File: LocalRunner.java    License: Apache License 2.0 6 votes vote down vote up
private static void run(String name)
        throws ClassNotFoundException, IllegalAccessException,
        InstantiationException, AlreadyAliveException, InvalidTopologyException {
  LOG.info("running benchmark " + name);
  IBenchmark benchmark =  (IBenchmark) Runner.getApplicationFromName(PACKAGE + "." + name);
  Config config = new Config();
  config.putAll(Utils.readStormConfig());
  config.setDebug(true);
  StormTopology topology = benchmark.getTopology(config);
  LocalCluster localCluster = new LocalCluster();
  localCluster.submitTopology(name, config, topology);
  final int runtime = BenchmarkUtils.getInt(config, MetricsCollectorConfig.METRICS_TOTAL_TIME,
          MetricsCollectorConfig.DEFAULT_TOTAL_TIME);
  IMetricsCollector collector = benchmark.getMetricsCollector(config, topology);
  collector.run();
  try {
    Thread.sleep(runtime);
  } catch (InterruptedException e) {
    LOG.error("benchmark interrupted", e);
  }
  localCluster.shutdown();
}
 
Example 15
Source Project: flowmix   Source File: ExampleRunner.java    License: Apache License 2.0 6 votes vote down vote up
public void run() {

    StormTopology topology = new FlowmixBuilder()
        .setFlowLoader(new SimpleFlowLoaderSpout(provider.getFlows(), 60000))
        .setEventsLoader(new MockEventGeneratorSpout(getMockEvents(), 10))
        .setOutputBolt(new PrinterBolt())
        .setParallelismHint(6)
      .create()
    .createTopology();

    Config conf = new Config();
    conf.setNumWorkers(20);
    conf.setMaxSpoutPending(5000);
    conf.setDebug(false);
    conf.registerSerialization(BaseEvent.class, EventSerializer.class);
    conf.setSkipMissingKryoRegistrations(false);

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("example-topology", conf, topology);
  }
 
Example 16
public StormTopology buildTopology(Properties properties) {
	
	// Load properties for the storm topology
	String kafkaTopic = properties.getProperty("kafka.topic");
	
	SpoutConfig kafkaConfig = new SpoutConfig(kafkaBrokerHosts, kafkaTopic, "",	"storm");
	kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
	TopologyBuilder builder = new TopologyBuilder();

	// Specific audit logs analysis bolts
	AuditLoginsCounterBolt loginCounterbolt = new AuditLoginsCounterBolt();
	AuditParserBolt auditParserBolt = new AuditParserBolt();
	
	// Elastic search bolt
	TupleMapper tupleMapper = new DefaultTupleMapper();
	ElasticSearchBolt elasticSearchBolt = new ElasticSearchBolt(tupleMapper);

	// Topology scheme: KafkaSpout -> auditParserBolt -> loginCounterBolt -> elasticSearchBolt
	builder.setSpout("KafkaSpout", new KafkaSpout(kafkaConfig), 1);
	builder.setBolt("ParseBolt", auditParserBolt, 1).shuffleGrouping("KafkaSpout");
	builder.setBolt("CountBolt", loginCounterbolt, 1).shuffleGrouping("ParseBolt");
	builder.setBolt("ElasticSearchBolt", elasticSearchBolt, 1)
	.fieldsGrouping("CountBolt", new Fields("id", "index", "type", "document"));

	return builder.createTopology();
}
 
Example 17
Source Project: storm-benchmark   Source File: TridentWordCount.java    License: Apache License 2.0 6 votes vote down vote up
@Override
  public StormTopology getTopology(Config config) {
    final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
    final int splitNum = BenchmarkUtils.getInt(config, SPLIT_NUM, DEFAULT_SPLIT_BOLT_NUM);
    final int countNum = BenchmarkUtils.getInt(config, COUNT_NUM, DEFAULT_COUNT_BOLT_NUM);

    spout  = new TransactionalTridentKafkaSpout(
            KafkaUtils.getTridentKafkaConfig(config, new SchemeAsMultiScheme(new StringScheme())));

    TridentTopology trident = new TridentTopology();

    trident.newStream("wordcount", spout).name("sentence").parallelismHint(spoutNum).shuffle()
            .each(new Fields(StringScheme.STRING_SCHEME_KEY), new WordSplit(), new Fields("word"))
            .parallelismHint(splitNum)
            .groupBy(new Fields("word"))
            .persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"))
            .parallelismHint(countNum);
/*    trident.newStream("wordcount", spout)
      .each(new Fields(StringScheme.STRING_SCHEME_KEY), new WordSplit(), new Fields("word"))
      .groupBy(new Fields("word"))
      .persistentAggregate(new MemoryMapState.Factory(), new Count(), new Fields("count"));*/


    return trident.build();
  }
 
Example 18
Source Project: jstorm   Source File: TCKTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testTopologySourceWithConfigMethods() throws Exception {
    TopologyDef topologyDef = FluxParser.parseResource("/configs/config-methods-test.yaml", false, true, null, false);
    assertTrue(topologyDef.validate());
    Config conf = FluxBuilder.buildConfig(topologyDef);
    ExecutionContext context = new ExecutionContext(topologyDef, conf);
    StormTopology topology = FluxBuilder.buildTopology(context);
    assertNotNull(topology);
    topology.validate();

    // make sure the property was actually set
    TestBolt bolt = (TestBolt)context.getBolt("bolt-1");
    assertTrue(bolt.getFoo().equals("foo"));
    assertTrue(bolt.getBar().equals("bar"));
    assertTrue(bolt.getFooBar().equals("foobar"));
}
 
Example 19
Source Project: storm-benchmark   Source File: GrepTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void componentParallelismCouldBeSetThroughConfig() {
  StormBenchmark benchmark = new Grep();
  Config config = new Config();
  config.put(Grep.SPOUT_NUM, 3);
  config.put(Grep.FM_NUM, 4);
  config.put(Grep.CM_NUM, 5);
  StormTopology topology = benchmark.getTopology(config);
  assertThat(topology).isNotNull();
  TestUtils.verifyParallelism(Utils.getComponentCommon(topology, Grep.SPOUT_ID), 3);
  TestUtils.verifyParallelism(Utils.getComponentCommon(topology, Grep.FM_ID), 4);
  TestUtils.verifyParallelism(Utils.getComponentCommon(topology, Grep.CM_ID), 5);
}
 
Example 20
Source Project: incubator-heron   Source File: LocalCluster.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public StormTopology getTopology(String topoName) {
  try {
    assertAlive(topoName);
    return this.topology;
  } catch (NotAliveException ex) {
    return null;
  }
}
 
Example 21
Source Project: incubator-heron   Source File: GeneralTopologyContext.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("rawtypes")
public GeneralTopologyContext(StormTopology topology, Map stormConf,
                              Map<Integer, String> taskToComponent,
                              Map<String, List<Integer>> componentToSortedTasks,
                              Map<String, Map<String, Fields>> componentToStreamToFields,
                              String stormId) {
  throw new RuntimeException("GeneralTopologyContext should never be initiated this way");
}
 
Example 22
Source Project: eagle   Source File: AggregationApplication.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public StormTopology execute(Config config, StormEnvironment environment) {
    List<String> metricNames = new ArrayList<>();
    String[] metricNamesArr = config.getString("aggregate.counters.metrics").split(",");
    for (int i = 0; i < metricNamesArr.length; i++) {
        metricNames.add(metricNamesArr[i].trim().toLowerCase());
    }
    List<String> groupByColumns = new ArrayList<>();

    String[] groupByColumnsArr = config.getString("aggregate.counters.groupBys").split(";");
    for (int i = 0; i < groupByColumnsArr.length; i++) {
        groupByColumns.add(groupByColumnsArr[i].trim());
    }

    Map<String, List<List<String>>> metrics = new HashMap<>();
    for (String metric : metricNames) {
        metrics.put(metric, new ArrayList<>());
        for (String cols : groupByColumns) {
            metrics.get(metric).add(Arrays.asList(cols.replaceAll(" ", "").split(",")));
        }
    }

    TopologyBuilder topologyBuilder = new TopologyBuilder();
    String spoutName = "mrHistoryAggregationSpout";
    String boltName = "mrHistoryAggregationBolt";
    AggregationConfig aggregationConfig = AggregationConfig.newInstance(config);
    int tasks = aggregationConfig.getConfig().getInt("stormConfig." + spoutName + "Tasks");
    topologyBuilder.setSpout(
        spoutName,
        new AggregationSpout(aggregationConfig, new MRMetricsAggregateContainer(metrics, aggregationConfig)),
        tasks
    ).setNumTasks(tasks);

    tasks = aggregationConfig.getConfig().getInt("stormConfig." + boltName + "Tasks");
    topologyBuilder.setBolt(boltName,
        new AggregationBolt(aggregationConfig.getStormConfig(), new MRMetricsAggregateContainer(metrics, aggregationConfig)),
        tasks).setNumTasks(tasks).shuffleGrouping(spoutName);

    return topologyBuilder.createTopology();
}
 
Example 23
Source Project: eagle   Source File: SparkRunningJobApp.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public StormTopology execute(Config config, StormEnvironment environment) {
    //1. trigger prepare conf
    SparkRunningJobAppConfig sparkRunningJobAppConfig = SparkRunningJobAppConfig.newInstance(config);

    //2. prepare topology
    TopologyBuilder topologyBuilder = new TopologyBuilder();
    final String spoutName = SparkRunningJobAppConfig.JOB_FETCH_SPOUT_NAME;
    final String boltName = SparkRunningJobAppConfig.JOB_PARSE_BOLT_NAME;
    int parallelism = sparkRunningJobAppConfig.getJobExtractorConfig().jobFetchSpoutParallism;
    int tasks = sparkRunningJobAppConfig.getJobExtractorConfig().jobFetchSpoutTasksNum;
    if (parallelism > tasks) {
        parallelism = tasks;
    }
    topologyBuilder.setSpout(
            spoutName,
            new SparkRunningJobFetchSpout(
                    sparkRunningJobAppConfig.getJobExtractorConfig(),
                    sparkRunningJobAppConfig.getEndpointConfig(),
                    sparkRunningJobAppConfig.getZkStateConfig()),
            parallelism
    ).setNumTasks(tasks);

    parallelism = sparkRunningJobAppConfig.getJobExtractorConfig().jobParseBoltParallism;
    tasks = sparkRunningJobAppConfig.getJobExtractorConfig().jobParseBoltTasksNum;
    if (parallelism > tasks) {
        parallelism = tasks;
    }
    topologyBuilder.setBolt(boltName,
            new SparkRunningJobParseBolt(
                    sparkRunningJobAppConfig.getZkStateConfig(),
                    sparkRunningJobAppConfig.getEagleServiceConfig(),
                    sparkRunningJobAppConfig.getEndpointConfig(),
                    sparkRunningJobAppConfig.getJobExtractorConfig()),
            parallelism).setNumTasks(tasks).fieldsGrouping(spoutName, new Fields("appId"));

    return topologyBuilder.createTopology();
}
 
Example 24
Source Project: jstorm   Source File: TridentMinMaxOfVehiclesTopology.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Creates a topology which demonstrates min/max operations on tuples of
 * stream which contain vehicle and driver fields with values
 * {@link TridentMinMaxOfVehiclesTopology.Vehicle} and
 * {@link TridentMinMaxOfVehiclesTopology.Driver} respectively.
 */
public static StormTopology buildVehiclesTopology() {
    Fields driverField = new Fields(Driver.FIELD_NAME);
    Fields vehicleField = new Fields(Vehicle.FIELD_NAME);
    Fields allFields = new Fields(Vehicle.FIELD_NAME, Driver.FIELD_NAME);
    
    FixedBatchSpout spout = new FixedBatchSpout(allFields, 10, Vehicle.generateVehicles(20));
    spout.setCycle(true);
    
    TridentTopology topology = new TridentTopology();
    Stream vehiclesStream = topology.newStream("spout1", spout).each(allFields, new Debug("##### vehicles"));
    
    Stream slowVehiclesStream = vehiclesStream.min(new SpeedComparator()).each(vehicleField,
            new Debug("#### slowest vehicle"));
            
    Stream slowDriversStream = slowVehiclesStream.project(driverField).each(driverField,
            new Debug("##### slowest driver"));
            
    vehiclesStream.max(new SpeedComparator()).each(vehicleField, new Debug("#### fastest vehicle"))
            .project(driverField).each(driverField, new Debug("##### fastest driver"));
            
    vehiclesStream.minBy(Vehicle.FIELD_NAME, new EfficiencyComparator()).each(vehicleField,
            new Debug("#### least efficient vehicle"));
            
    vehiclesStream.maxBy(Vehicle.FIELD_NAME, new EfficiencyComparator()).each(vehicleField,
            new Debug("#### most efficient vehicle"));
            
    return topology.build();
}
 
Example 25
Source Project: jea   Source File: StormCluster.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("static-access")
public void submitTopology(String topologyName, Config conf, StormTopology topology) throws Exception {
	if(cluster instanceof LocalCluster){
		((LocalCluster)cluster).submitTopology(topologyName, conf, topology);
	} else if (cluster instanceof StormSubmitter) {
		((StormSubmitter)cluster).submitTopology(topologyName, conf, topology);
	} else {
		throw new Exception("Storm的集群设置不正确,请设置为LocalCluster或StormSubmitter");
	}
}
 
Example 26
Source Project: trident-tutorial   Source File: RealTimeTextSearch.java    License: Apache License 2.0 5 votes vote down vote up
public static StormTopology buildTopology(TransactionalTridentKafkaSpout spout)
        throws IOException {

    TridentTopology topology = new TridentTopology();
    /**
     * As a first thing, we need a stream of tweets which we can parse and extract
     * only the text and its id. As you will notice, we're going to store the stream
     * using the {@link ElasticSearchState} implementation using its {@link StateUpdater}.
     * Check their implementations for details.
     */
    topology
            .newStream("tweets", spout)
            .each(new Fields("str"), new ParseTweet(), new Fields("text", "content", "user"))
            .each(new Fields("text", "content"), new TweetIdExtractor(), new Fields("tweetId"))
            .project(new Fields("tweetId", "text"))
            .each(new Fields("tweetId", "text"), new Print())
            .partitionPersist(new ElasticSearchStateFactory(), new Fields("tweetId", "text"), new ElasticSearchStateUpdater());

    /**
     * Now we need a DRPC stream to query the state where the tweets are stored.
     * To do that, as shown below, we need an implementation of {@link QueryFunction} to
     * access our {@link ElasticSearchState}.
     */
    TridentState elasticSearchState = topology.newStaticState(new ElasticSearchStateFactory());
    topology
            .newDRPCStream("search")
            .each(new Fields("args"), new Split(" "), new Fields("keywords")) // let's split the arguments
            .stateQuery(elasticSearchState, new Fields("keywords"), new TweetQuery(), new Fields("ids")) // and pass them as query parameters
            .project(new Fields("ids"));
    return topology.build();
}
 
Example 27
@Before
public void setUp() {
    esSetup = new EsSetup(settings);
    esSetup.execute(createIndex(index));

    drpc = new LocalDRPC();
    StormTopology topology = buildTopology();

    cluster = new LocalCluster();
    cluster.submitTopology("elastic-storm", new Config(), topology);

    Utils.sleep(10000); // let's do some work
}
 
Example 28
Source Project: eagle   Source File: TestStormApplication.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public StormTopology execute(Config config, StormEnvironment environment) {
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("metric_spout", new RandomEventSpout(), config.getInt("spoutNum"));
    builder.setBolt("sink_1",environment.getStreamSink("TEST_STREAM_1",config)).fieldsGrouping("metric_spout",new Fields("metric"));
    builder.setBolt("sink_2",environment.getStreamSink("TEST_STREAM_2",config)).fieldsGrouping("metric_spout",new Fields("metric"));
    return builder.createTopology();
}
 
Example 29
Source Project: eagle   Source File: TopologyMgmtResourceImpl.java    License: Apache License 2.0 5 votes vote down vote up
private StormTopology createTopology(Topology topologyDef) {
    com.typesafe.config.Config topologyConf = ConfigFactory.load("topology-sample-definition.conf");
    String stormJarPath = "";
    if (topologyConf.hasPath(STORM_JAR_PATH)) {
        stormJarPath = topologyConf.getString(STORM_JAR_PATH);
    }
    System.setProperty("storm.jar", stormJarPath);
    createTopologyHelper(topologyDef, topologyConf);
    return UnitTopologyMain.createTopology(topologyConf);
}
 
Example 30
Source Project: jstorm   Source File: SequenceTopologyTool.java    License: Apache License 2.0 5 votes vote down vote up
public void SetLocalTopology() throws Exception {
    Config conf = getConf();
    
    StormTopology topology = buildTopology();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("SplitMerge", conf, topology);
    Thread.sleep(60000);
    cluster.shutdown();
}