Java Code Examples for backtype.storm.Config

The following are top voted examples for showing how to use backtype.storm.Config. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: RealEstate-Streaming   File: PhoenixTest.java   Source Code and License 7 votes vote down vote up
public void buildAndSubmit() throws Exception {
	 TopologyBuilder builder = new TopologyBuilder();
	 Config config = new Config();
     config.setDebug(true);
     // String nimbusHost = topologyConfig.getProperty("nimbus.host");
     config.put(Config.NIMBUS_HOST, "localhost");
     
     configureKafkaSpout(builder);
     //configureRouteBolt(builder);
     configurePhoenixTest(builder);
     
     /*
     builder.setBolt("submitter", new SubmitBolt())
        .shuffleGrouping(ROUTE_BOLT);
     */
     
     try {
         StormSubmitter.submitTopology("simple-topology", config, builder.createTopology());
     } catch (Exception e) {
         LOG.error("Error submiting Topology", e);
     }

}
 
Example 2
Project: Mastering-Mesos   File: LogViewerController.java   Source Code and License 6 votes vote down vote up
/**
 * Create a process builder to launch the log viewer
 * @param logDirectory
 * @return
 */
protected ProcessBuilder createProcessBuilder() {
  ProcessBuilder pb = new ProcessBuilder(
      Paths.get(System.getProperty("user.dir"), "/bin/storm").toString(),
      "logviewer",
      "-c",
      "storm.log.dir=" + System.getenv("MESOS_SANDBOX") + "/logs",
      "-c",
      Config.LOGVIEWER_PORT + "=" + port
  );

  // If anything goes wrong at startup we want to see it.
  Path logPath = Paths.get(System.getenv("MESOS_SANDBOX"), "/logs");
  if (!logPath.toFile().exists() && !logPath.toFile().mkdirs()) {
    throw new RuntimeException("Couldn't create log directory");
  }
  File log = Paths.get(System.getenv("MESOS_SANDBOX"), "/logs/logviewer-startup.log").toFile();
  pb.redirectErrorStream(true);
  pb.redirectOutput(Redirect.appendTo(log));
  return pb;
}
 
Example 3
Project: rb-bi   File: PartitionManager.java   Source Code and License 6 votes vote down vote up
public void commit() {
    long lastCompletedOffset = lastCompletedOffset();
    if (_committedTo != lastCompletedOffset) {
        LOG.debug("Writing last completed offset (" + lastCompletedOffset + ") to ZK for " + _partition + " for topology: " + _topologyInstanceId);
        Map<Object, Object> data = (Map<Object, Object>) ImmutableMap.builder()
                .put("topology", ImmutableMap.of("id", _topologyInstanceId,
                        "name", _stormConf.get(Config.TOPOLOGY_NAME)))
                .put("offset", lastCompletedOffset)
                .put("partition", _partition.partition)
                .put("broker", ImmutableMap.of("host", _partition.host.host,
                        "port", _partition.host.port))
                .put("topic", _spoutConfig.topic).build();
        _state.writeJSON(committedPath(), data);

        _committedTo = lastCompletedOffset;
        LOG.debug("Wrote last completed offset (" + lastCompletedOffset + ") to ZK for " + _partition + " for topology: " + _topologyInstanceId);
    } else {
        LOG.debug("No new offset for " + _partition + " for topology: " + _topologyInstanceId);
    }
}
 
Example 4
Project: storm-hbase-1.0.x   File: WordCountTrident.java   Source Code and License 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config conf = new Config();
    conf.setMaxSpoutPending(5);
    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("wordCounter", conf, buildTopology(args[0]));
        Thread.sleep(60 * 1000);
        cluster.killTopology("wordCounter");
        cluster.shutdown();
        System.exit(0);
    }
    else if(args.length == 2) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[1], conf, buildTopology(args[0]));
    } else{
        System.out.println("Usage: TridentFileTopology <hdfs url> [topology name]");
    }
}
 
Example 5
Project: splice-community-sample-code   File: MySqlToSpliceTopology.java   Source Code and License 6 votes vote down vote up
public static void main(String[] args) throws SQLException {

        // tableName is the name of the table in splice to insert records to
        // server is the server instance running splice
        String tableName = "students";
        String server = "localhost";
        TopologyBuilder builder = new TopologyBuilder();

        // set the spout for the topology
        builder.setSpout("seedDataFromMySql", new MySqlSpout());

        // dump the stream data into splice       
        builder.setBolt("dbRowProcessing", new MySqlSpliceBolt(server, tableName), 1).shuffleGrouping("seedDataFromMySql");

        Config conf = new Config();
        conf.setDebug(true);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("mysql-splice-topology", conf, builder.createTopology());
        Utils.sleep(3000);
        cluster.shutdown();
    }
 
Example 6
Project: preliminary.demo   File: RaceTopology.java   Source Code and License 6 votes vote down vote up
public static void main(String[] args) throws Exception {

        Config conf = new Config();
        int spout_Parallelism_hint = 1;
        int split_Parallelism_hint = 2;
        int count_Parallelism_hint = 2;

        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout("spout", new RaceSentenceSpout(), spout_Parallelism_hint);
        builder.setBolt("split", new SplitSentence(), split_Parallelism_hint).shuffleGrouping("spout");
        builder.setBolt("count", new WordCount(), count_Parallelism_hint).fieldsGrouping("split", new Fields("word"));
        String topologyName = RaceConfig.JstormTopologyName;

        try {
            StormSubmitter.submitTopology(topologyName, conf, builder.createTopology());
            //begin by Young
            
            //end by Young
        } catch (Exception e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
    }
 
Example 7
Project: hadooparchitecturebook   File: MovingAvgLocalTopologyRunner.java   Source Code and License 6 votes vote down vote up
public static void main(String[] args) {

    Config config = new Config();
    config.setDebug(true);

    StormTopology topology = buildTopology();
    // Un-comment to run locally:
    LocalCluster localCluster = new LocalCluster();
    localCluster.submitTopology("local-moving-avg", config, topology);
    // Un-comment to run as part of a Storm cluster:
    // try {
    //   StormSubmitter.submitTopology("cluster-moving-average",
    //  				    config,
    // 				    topology);
    // } catch(AlreadyAliveException e) {
    //   e.printStackTrace();
    // } catch(InvalidTopologyException e) {
    //   e.printStackTrace();
    //}
  }
 
Example 8
Project: RealEstate-Streaming   File: KafkaPhoenixTopology.java   Source Code and License 6 votes vote down vote up
public void buildAndSubmit() throws Exception {
	 TopologyBuilder builder = new TopologyBuilder();
	 Config config = new Config();
     config.setDebug(true);
     // String nimbusHost = topologyConfig.getProperty("nimbus.host");
     config.put(Config.NIMBUS_HOST, "localhost");
     
     configureKafkaSpout(builder);
     configureRouteBolt(builder);
     configureInsertBolt(builder);
     
     //builder.setBolt("submitter", new SubmitBolt())
     //   .shuffleGrouping(ROUTE_BOLT);
     
     try {
         StormSubmitter.submitTopology("realestate-topology", config, builder.createTopology());
     } catch (Exception e) {
         LOG.error("Error submiting Topology", e);
     }

}
 
Example 9
Project: storm-demo   File: LogStatisticsTopology.java   Source Code and License 6 votes vote down vote up
public static void main(String[] args) {
    Config config = new Config();

    HdfsBolt hdfsBolt = makeHdfsBolt();
    KafkaSpout kafkaSpout = makeKafkaSpout(TOPIC, TOPOLOGY_NAME);

    LOG.info("Topology name is {}", TOPOLOGY_NAME);

    TopologyBuilder topologyBuilder = new TopologyBuilder();
    topologyBuilder.setSpout(KAFKA_SPOUT_ID, kafkaSpout, 10);
    topologyBuilder.setBolt(CROP_BOLT_ID, new CropBolt(), 10).shuffleGrouping(KAFKA_SPOUT_ID);
    topologyBuilder.setBolt(SPLIT_FIELDS_BOLT_ID, new SplitFieldsBolt(), 10).shuffleGrouping(CROP_BOLT_ID);
    topologyBuilder.setBolt(STORM_HDFS_BOLT_ID, hdfsBolt, 4).fieldsGrouping(SPLIT_FIELDS_BOLT_ID, new Fields("timestamp", "fieldvalues"));

    if (args != null && args.length > 0) {
        config.setDebug(false);
        config.setNumWorkers(3);

        try {
            StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
        } catch (InvalidTopologyException | AlreadyAliveException | AuthorizationException e) {
            e.printStackTrace();
        }
    }
}
 
Example 10
Project: storm-demo   File: AutoHDFS.java   Source Code and License 6 votes vote down vote up
@SuppressWarnings("unchecked")
public static void main(String[] args) throws Exception {
    Map conf = new HashMap();
    conf.put(Config.TOPOLOGY_SUBMITTER_PRINCIPAL, args[0]); //with realm e.g. [email protected]
    conf.put(STORM_USER_NAME_KEY, args[1]); //with realm e.g. [email protected]
    conf.put(STORM_KEYTAB_FILE_KEY, args[2]);// /etc/security/keytabs/storm.keytab

    Configuration configuration = new Configuration();
    AutoHDFS autoHDFS = new AutoHDFS();
    autoHDFS.prepare(conf);

    Map<String,String> creds  = new HashMap<String, String>();
    autoHDFS.populateCredentials(creds, conf);
    LOG.info("Got HDFS credentials", autoHDFS.getCredentials(creds));

    Subject s = new Subject();
    autoHDFS.populateSubject(s, creds);
    LOG.info("Got a Subject "+ s);

    autoHDFS.renew(creds, conf);
    LOG.info("renewed credentials", autoHDFS.getCredentials(creds));
}
 
Example 11
Project: learn_jstorm   File: TridentTopology.java   Source Code and License 6 votes vote down vote up
private static Integer getMaxParallelism(Set<Group> groups) {
    Integer ret = null;
    for(Group g: groups) {
        if(isSpoutGroup(g)) {
            SpoutNode n = (SpoutNode) g.nodes.iterator().next();
            Map conf = getSpoutComponentConfig(n.spout);
            if(conf==null) conf = new HashMap();
            Number maxP = (Number) conf.get(Config.TOPOLOGY_MAX_TASK_PARALLELISM);
            if(maxP!=null) {
                if(ret==null) ret = maxP.intValue();
                else ret = Math.min(ret, maxP.intValue());
            }
        }
    }
    return ret;
}
 
Example 12
Project: learn_jstorm   File: SequenceTopologyTool.java   Source Code and License 6 votes vote down vote up
public  void SetRemoteTopology() throws AlreadyAliveException,
		InvalidTopologyException, TopologyAssignException {
	 Config conf = getConf();
	StormTopology topology = buildTopology();

	conf.put(Config.STORM_CLUSTER_MODE, "distributed");
	String streamName = (String) conf.get(Config.TOPOLOGY_NAME);
	if (streamName == null) {
		streamName = "SequenceTest";
	}

	if (streamName.contains("zeromq")) {
		conf.put(Config.STORM_MESSAGING_TRANSPORT,
				"com.alibaba.jstorm.message.zeroMq.MQContext");

	} else {
		conf.put(Config.STORM_MESSAGING_TRANSPORT,
				"com.alibaba.jstorm.message.netty.NettyContext");
	}

	StormSubmitter.submitTopology(streamName, conf,topology);
	
}
 
Example 13
Project: big-data-system   File: CopyOfPrintSampleStream.java   Source Code and License 6 votes vote down vote up
public static void main(String[] args) {
    String consumerKey = args[0]; 
    String consumerSecret = args[1]; 
    String accessToken = args[2]; 
    String accessTokenSecret = args[3];
    String[] arguments = args.clone();
    String[] keyWords = Arrays.copyOfRange(arguments, 4, arguments.length);
    
    TopologyBuilder builder = new TopologyBuilder();
    
    builder.setSpout("twitter", new TwitterSampleSpout(consumerKey, consumerSecret,
                            accessToken, accessTokenSecret, keyWords));
    builder.setBolt("print", new PrinterBolt())
            .shuffleGrouping("twitter");
            
            
    Config conf = new Config();
    
    
    LocalCluster cluster = new LocalCluster();
    
    cluster.submitTopology("test", conf, builder.createTopology());
    
    Utils.sleep(10000);
    cluster.shutdown();
}
 
Example 14
Project: cdh-storm   File: ReachTopology.java   Source Code and License 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  LinearDRPCTopologyBuilder builder = construct();


  Config conf = new Config();

  if (args == null || args.length == 0) {
    conf.setMaxTaskParallelism(3);
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("reach-drpc", conf, builder.createLocalTopology(drpc));

    String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" };
    for (String url : urlsToTry) {
      System.out.println("Reach of " + url + ": " + drpc.execute("reach", url));
    }

    cluster.shutdown();
    drpc.shutdown();
  }
  else {
    conf.setNumWorkers(6);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
  }
}
 
Example 15
Project: jstorm-0.9.6.3-   File: NimbusServer.java   Source Code and License 6 votes vote down vote up
private void initThrift(Map conf) throws TTransportException {
	Integer thrift_port = JStormUtils.parseInt(conf
			.get(Config.NIMBUS_THRIFT_PORT));
	TNonblockingServerSocket socket = new TNonblockingServerSocket(
			thrift_port);

	Integer maxReadBufSize = JStormUtils.parseInt(conf
			.get(Config.NIMBUS_THRIFT_MAX_BUFFER_SIZE));

	THsHaServer.Args args = new THsHaServer.Args(socket);
	args.workerThreads(ServiceHandler.THREAD_NUM);
	args.protocolFactory(new TBinaryProtocol.Factory(false, true,
			maxReadBufSize));

	args.processor(new Nimbus.Processor<Iface>(serviceHandler));
	args.maxReadBufferBytes = maxReadBufSize;

	thriftServer = new THsHaServer(args);

	LOG.info("Successfully started nimbus: started Thrift server...");
	thriftServer.serve();
}
 
Example 16
Project: jstrom   File: IsolatedPool.java   Source Code and License 6 votes vote down vote up
@Override
public void addTopology(TopologyDetails td) {
    String topId = td.getId();
    LOG.debug("Adding in Topology {}", topId);
    SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
    Set<Node> assignedNodes = new HashSet<Node>();
    if (assignment != null) {
        for (WorkerSlot ws : assignment.getSlots()) {
            Node n = _nodeIdToNode.get(ws.getNodeId());
            assignedNodes.add(n);
        }
    }
    _usedNodes += assignedNodes.size();
    _topologyIdToNodes.put(topId, assignedNodes);
    _tds.put(topId, td);
    if (td.getConf().get(Config.TOPOLOGY_ISOLATED_MACHINES) != null) {
        _isolated.add(topId);
    }
}
 
Example 17
Project: big-data-system   File: TransactionalGlobalCount.java   Source Code and License 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
  TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3);
  builder.setBolt("partial-count", new BatchCount(), 5).noneGrouping("spout");
  builder.setBolt("sum", new UpdateGlobalCount()).globalGrouping("partial-count");

  LocalCluster cluster = new LocalCluster();

  Config config = new Config();
  config.setDebug(true);
  config.setMaxSpoutPending(3);

  cluster.submitTopology("global-count-topology", config, builder.buildTopology());

  Thread.sleep(3000);
  cluster.shutdown();
}
 
Example 18
Project: learn_jstorm   File: Heartbeat.java   Source Code and License 6 votes vote down vote up
/**
 * @param conf
 * @param stormClusterState
 * @param supervisorId
 * @param myHostName
 */
@SuppressWarnings({ "rawtypes", "unchecked" })
public Heartbeat(Map conf, StormClusterState stormClusterState,
		String supervisorId, AtomicBoolean active) {

	String myHostName = JStormServerUtils.getHostName(conf);

	this.stormClusterState = stormClusterState;
	this.supervisorId = supervisorId;
	this.conf = conf;
	this.myHostName = myHostName;
	this.startTime = TimeUtils.current_time_secs();
	this.active = active;
	this.frequence = JStormUtils.parseInt(conf
			.get(Config.SUPERVISOR_HEARTBEAT_FREQUENCY_SECS));

	initSupervisorInfo(conf);
	
	LOG.info("Successfully init supervisor heartbeat thread, " + supervisorInfo);
}
 
Example 19
Project: jstorm-0.9.6.3-   File: SequenceSpout.java   Source Code and License 6 votes vote down vote up
public long getMaxPending(Map conf) {
	// if single spout thread, MAX_PENDING should be Long.MAX_VALUE
	if (ConfigExtension.isSpoutSingleThread(conf)) {
		return Long.MAX_VALUE;
	}

	Object pending = conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING);
	if (pending == null) {
		return Long.MAX_VALUE;
	}

	int pendingNum = JStormUtils.parseInt(pending);
	if (pendingNum == 1) {
		return Long.MAX_VALUE;
	}

	return pendingNum;
}
 
Example 20
Project: jstorm-0.9.6.3-   File: SimpleBatchTopology.java   Source Code and License 6 votes vote down vote up
private static void LoadYaml(String confPath) {

		Yaml yaml = new Yaml();

		try {
			InputStream stream = new FileInputStream(confPath);

			conf = (Map) yaml.load(stream);
			if (conf == null || conf.isEmpty() == true) {
				throw new RuntimeException("Failed to read config file");
			}

		} catch (FileNotFoundException e) {
			System.out.println("No such file " + confPath);
			throw new RuntimeException("No config file");
		} catch (Exception e1) {
			e1.printStackTrace();
			throw new RuntimeException("Failed to read config file");
		}

		topologyName = (String) conf.get(Config.TOPOLOGY_NAME);
		return;
	}
 
Example 21
Project: jstorm-0.9.6.3-   File: TransactionalGlobalCount.java   Source Code and License 6 votes vote down vote up
public static void main(String[] args) throws Exception {
	    MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
	    TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 2);
	    builder.setBolt("partial-count", new BatchCount(), 3).noneGrouping("spout");
	    builder.setBolt("sum", new UpdateGlobalCount(), 1).globalGrouping("partial-count");

//	    LocalCluster cluster = new LocalCluster();

	    Config config = new Config();
	    config.setDebug(true);
	    config.setMaxSpoutPending(3);
	    config.put(Config.TOPOLOGY_WORKERS, 9);
	    Config.setNumAckers(config, 0);
	    
	    StormSubmitter.submitTopology("global-count-topology", config, builder.buildTopology());

//	    Thread.sleep(3000);
//	    cluster.shutdown();
	  }
 
Example 22
Project: cdh-storm   File: ExclamationTopology.java   Source Code and License 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("word", new TestWordSpout(), 10);
  builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
  builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");

  Config conf = new Config();
  conf.setDebug(true);

  if (args != null && args.length > 0) {
    conf.setNumWorkers(3);

    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  }
  else {

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.killTopology("test");
    cluster.shutdown();
  }
}
 
Example 23
Project: Mastering-Apache-Storm   File: Topology.java   Source Code and License 5 votes vote down vote up
public static void main(String[] args) throws AlreadyAliveException,
			InvalidTopologyException {
		TopologyBuilder builder = new TopologyBuilder();
		
		List<String> zks = new ArrayList<String>();
		zks.add("192.168.41.122");
		
		List<String> cFs = new ArrayList<String>();
		cFs.add("personal");
		cFs.add("company");
		
		// set the spout class
		builder.setSpout("spout", new SampleSpout(), 2);
		// set the bolt class
		builder.setBolt("bolt", new StormRedisBolt("192.168.41.122",2181), 2).shuffleGrouping("spout");

		Config conf = new Config();
		conf.setDebug(true);
		// create an instance of LocalCluster class for
		// executing topology in local mode.
		LocalCluster cluster = new LocalCluster();

		// LearningStormTopolgy is the name of submitted topology.
		cluster.submitTopology("StormRedisTopology", conf,
				builder.createTopology());
		try {
			Thread.sleep(10000);
		} catch (Exception exception) {
			System.out.println("Thread interrupted exception : " + exception);
		}
		// kill the LearningStormTopology
		cluster.killTopology("StormRedisTopology");
		// shutdown the storm test cluster
		cluster.shutdown();
}
 
Example 24
Project: Mastering-Mesos   File: MesosNimbus.java   Source Code and License 5 votes vote down vote up
@SuppressWarnings("unchecked")
protected void initialize(Map conf, String localDir) throws Exception {
  _conf = new HashMap();
  _conf.putAll(conf);

  _state = new LocalStateShim(localDir);
  _allowedHosts = listIntoSet((List<String>) conf.get(CONF_MESOS_ALLOWED_HOSTS));
  _disallowedHosts = listIntoSet((List<String>) conf.get(CONF_MESOS_DISALLOWED_HOSTS));
  Boolean preferReservedResources = (Boolean) conf.get(CONF_MESOS_PREFER_RESERVED_RESOURCES);
  if (preferReservedResources != null) {
    _preferReservedResources = preferReservedResources;
  }
  _container = Optional.fromNullable((String) conf.get(CONF_MESOS_CONTAINER_DOCKER_IMAGE));
  _scheduler = new NimbusScheduler(this);

  // Generate YAML to be served up to clients
  _generatedConfPath = Paths.get(
      Optional.fromNullable((String) conf.get(Config.STORM_LOCAL_DIR)).or("./"),
      "generated-conf");
  if (!_generatedConfPath.toFile().exists() && !_generatedConfPath.toFile().mkdirs()) {
    throw new RuntimeException("Couldn't create generated-conf dir, _generatedConfPath=" + _generatedConfPath.toString());
  }

  createLocalServerPort();
  setupHttpServer();

  _conf.put(Config.NIMBUS_HOST, _configUrl.getHost());

  File generatedConf = Paths.get(_generatedConfPath.toString(), "storm.yaml").toFile();
  Yaml yaml = new Yaml();
  FileWriter writer = new FileWriter(generatedConf);
  yaml.dump(_conf, writer);
}
 
Example 25
Project: rb-bi   File: TridentKafkaEmitter.java   Source Code and License 5 votes vote down vote up
public TridentKafkaEmitter(Map conf, TopologyContext context, TridentKafkaConfig config, String topologyInstanceId) {
    _config = config;
    _topologyInstanceId = topologyInstanceId;
    _connections = new DynamicPartitionConnections(_config, KafkaUtils.makeBrokerReader(conf, _config));
    _topologyName = (String) conf.get(Config.TOPOLOGY_NAME);
    _kafkaOffsetMetric = new KafkaUtils.KafkaOffsetMetric(_config.topic, _connections);
    context.registerMetric("kafkaOffset", _kafkaOffsetMetric, _config.metricsTimeBucketSizeInSecs);
    _kafkaMeanFetchLatencyMetric = context.registerMetric("kafkaFetchAvg", new MeanReducer(), _config.metricsTimeBucketSizeInSecs);
    _kafkaMaxFetchLatencyMetric = context.registerMetric("kafkaFetchMax", new MaxMetric(), _config.metricsTimeBucketSizeInSecs);
}
 
Example 26
Project: rb-bi   File: ZkState.java   Source Code and License 5 votes vote down vote up
private CuratorFramework newCurator(Map stateConf) throws Exception {
    Integer port = (Integer) stateConf.get(Config.TRANSACTIONAL_ZOOKEEPER_PORT);
    String serverPorts = "";
    for (String server : (List<String>) stateConf.get(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS)) {
        serverPorts = serverPorts + server + ":" + port + ",";
    }
    return CuratorFrameworkFactory.newClient(serverPorts,
            Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
            15000,
            new RetryNTimes(Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
                    Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
}
 
Example 27
Project: rb-bi   File: DynamicBrokersReader.java   Source Code and License 5 votes vote down vote up
public DynamicBrokersReader(Map conf, String zkStr, String zkPath, String topic) {
    _zkPath = zkPath;
    _topic = topic;
    try {
        _curator = CuratorFrameworkFactory.newClient(
                zkStr,
                Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
                Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT)),
                new RetryNTimes(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
                        Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
        _curator.start();
    } catch (Exception ex) {
        LOG.error("Couldn't connect to zookeeper", ex);
    }
}
 
Example 28
Project: rb-bi   File: ConfigData.java   Source Code and License 5 votes vote down vote up
public ConfigData() {
    _conf = new Config();
    _configFile = new ConfigFile(debug);
    _kafkaPartitions = new HashMap<>();
    _topics = _configFile.getAvailableTopics();
    _tranquilityPartitions = new HashMap<>();
    _zookeeper = getZkHost();
    debug = false;
    getZkData();
    getTranquilityPartitions();
}
 
Example 29
Project: rb-bi   File: ConfigData.java   Source Code and License 5 votes vote down vote up
public Config setConfig(String mode) {
    if (mode.equals("local")) {
        _conf.setMaxTaskParallelism(1);
        _conf.setDebug(false);
    } else if (mode.equals("cluster")) {
        _conf.put(Config.TOPOLOGY_WORKERS, getWorkers());
        _conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, getMaxSpoutPending());
        _conf.put(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS, getEmitBatchInterval());
        _conf.put("rbDebug", debug);
        Boolean hash_mac = _configFile.getFromGeneral("hash_mac");

        if (hash_mac != null)
            _conf.put("hash_mac", hash_mac);
        else
            _conf.put("hash_mac", false);

        /*  Metrics  */
        Map<String, Object> zkMetricsConf = new HashMap<>();
        zkMetricsConf.put("zookeeper", _zookeeper);
        _conf.registerMetricsConsumer(KafkaConsumerMonitorMetrics.class, zkMetricsConf, 1);


        if (getMetrics()) {

            Map<String, Object> functionMetricsConf = new HashMap<>();
            List<String> metrics = new ArrayList<>();

            metrics.add("throughput");
            functionMetricsConf.put("zookeeper", _zookeeper);
            functionMetricsConf.put("metrics", metrics);
            functionMetricsConf.put("topic", "rb_monitor");

            _conf.registerMetricsConsumer(Metrics2KafkaProducer.class, functionMetricsConf, 1);

        }
    }

    return _conf;
}
 
Example 30
Project: rb-bi   File: TopologyFunctionTest.java   Source Code and License 5 votes vote down vote up
@Test
public void macVendorTest() throws FileNotFoundException {

    File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/flows.json").getPath());
    File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/macVendorFlows.json").getPath());

    Scanner flows = new Scanner(fileFlow);
    Scanner checkFlows = new Scanner(checkFlow);

    MacVendorFunction._ouiFilePath = Thread.currentThread().getContextClassLoader().getResource("db/oui-vendors").getPath();

    List<String> fieldsFlow = new ArrayList<String>();

    fieldsFlow.add("flows");
    fieldsFlow.add("macVendor");

    LocalDRPC drpc = new LocalDRPC();

    TridentTopology topology = new TridentTopology();
    topology.newDRPCStream("test", drpc)
            .each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
            .each(new Fields("flows"), new MacVendorFunction(), new Fields("macVendor"))
            .each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
            .project(new Fields("finalMap"))
            .each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));

    Config conf = new Config();
    conf.put("rbDebug", true);
    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("testing-topology", conf, topology.build());

    while (flows.hasNextLine()) {
        String stormFlow = drpc.execute("test", flows.nextLine());
        stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
        Assert.assertEquals(checkFlows.nextLine(), stormFlow);
    }
}
 
Example 31
Project: rb-bi   File: TopologyFunctionTest.java   Source Code and License 5 votes vote down vote up
@Test
public void nonTimestampTest() throws FileNotFoundException {

    File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/nonTimestampFlows.json").getPath());

    Scanner flows = new Scanner(fileFlow);


    List<String> fieldsFlow = new ArrayList<String>();

    fieldsFlow.add("flows");

    LocalDRPC drpc = new LocalDRPC();

    TridentTopology topology = new TridentTopology();
    topology.newDRPCStream("test", drpc)
            .each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
            .each(new Fields("flows"), new CheckTimestampFunction(), new Fields("finalMap"))
            .project(new Fields("finalMap"))
            .each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));

    Config conf = new Config();
    conf.put("rbDebug", true);
    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("testing-topology", conf, topology.build());

    while (flows.hasNextLine()) {
        String stormFlow = drpc.execute("test", flows.nextLine());
        stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
        Assert.assertEquals(true, stormFlow.contains("timestamp"));
    }
}
 
Example 32
Project: rb-bi   File: TopologyFunctionTest.java   Source Code and License 5 votes vote down vote up
@Test
public void analizeHttpUrlTest() throws FileNotFoundException {

    File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/httpFlows.json").getPath());
    File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/httpFlows.json").getPath());

    Scanner flows = new Scanner(fileFlow);
    Scanner checkFlows = new Scanner(checkFlow);

    List<String> fieldsFlow = new ArrayList<String>();

    fieldsFlow.add("flows");
    fieldsFlow.add("httpUrlMap");

    LocalDRPC drpc = new LocalDRPC();

    TridentTopology topology = new TridentTopology();
    topology.newDRPCStream("test", drpc)
            .each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
            .each(new Fields("flows"), new AnalizeHttpUrlFunction(), new Fields("httpUrlMap"))
            .each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
            .project(new Fields("finalMap"))
            .each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));

    Config conf = new Config();
    conf.put("rbDebug", true);
    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("testing-topology", conf, topology.build());

    while (flows.hasNextLine()) {
        String stormFlow = drpc.execute("test", flows.nextLine());
        stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
        Assert.assertEquals(checkFlows.nextLine(), stormFlow);
    }
}
 
Example 33
Project: storm-scheduler   File: MonitoringMetricsToGraphiteWriter.java   Source Code and License 5 votes vote down vote up
/**
 * This method computes and returns the path in Graphite, under which the metrics are stored. Because
 * this path is configurable through the property {@value #CONF_MONITORING_GRAPHITE_BASE_PATH} the storm
 * configuration map must be passed as an argument. It will contain the last "." of the path if necessary.
 * <p/>
 * Example: stools.topoXYZ.
 *
 * @param stormConf the storm configuration map.
 * @return the path in Graphite under which the metric data is stored.
 */
public static String getConfiguredGraphitBasePath(Map stormConf) {
    String path;

    if (stormConf.containsKey(CONF_MONITORING_GRAPHITE_BASE_PATH)) {
        path = (String) stormConf.get(CONF_MONITORING_GRAPHITE_BASE_PATH);
    } else {
        path = DEFAULT_GRAPHITE_BASE_PATH;
    }

    path += "." + stormConf.get(Config.TOPOLOGY_NAME) + ".";

    return path;
}
 
Example 34
Project: storm-scheduler   File: Stootils.java   Source Code and License 5 votes vote down vote up
/**
 * Creates or returns the curator zookeeper client object for the given storm configuration object. According to
 * http://curator.apache.org/curator-framework instances of CuratorFramework are fully thread-safe and should be shared
 * within an application per zk-cluster. We assume that there is only one version of the storm configuration object
 * and return a singleton instance of the zkClient.
 *
 * @param stormConf the storm configuration object, which will be used to create the CuratorFramework instance in
 *                  the case that the singleton instance is null.
 * @return a singleton instance created from the first call of this method.
 */
@SuppressWarnings("unchecked") // the list of zookeeper servers is a list, otherwise we have bigger problems
public static synchronized CuratorFramework getConfiguredZkClient(Map stormConf) {
    if (zkClientSingleton == null) {
        LOG.debug("Creating CuratorFramework client for ZK server at {}:{}", stormConf.get(Config.STORM_ZOOKEEPER_SERVERS), stormConf.get(Config.STORM_ZOOKEEPER_PORT));
        zkClientSingleton = backtype.storm.utils.Utils.newCurator(stormConf,
                (List<String>) stormConf.get(Config.STORM_ZOOKEEPER_SERVERS),
                stormConf.get(Config.STORM_ZOOKEEPER_PORT),
                "/",
                new ZookeeperAuthInfo(stormConf));
        zkClientSingleton.start();
    }

    return zkClientSingleton;
}
 
Example 35
Project: fiware-sinfonier   File: DynamicTopology.java   Source Code and License 5 votes vote down vote up
public static void main(String[] args) throws Exception {

        LOG.info("Reading JSON file configuration...");
        JSONProperties config = new JSONProperties("/topology.json");
        TopologyBuilder builder = new TopologyBuilder();

        /* Spout Configuration */
        JSONArray spouts = config.getSpouts();
        configureSpouts(builder, spouts);

        /* Bolt Configuration */
        JSONArray bolts = config.getBolts();
        configureBolts(builder, bolts);

        /* Drain Configuration */
        JSONArray drains = config.getDrains();
        configureDrains(builder, drains);

        /* Configure more Storm options */
        Config conf = setTopologyStormConfig(config.getProperties());


        if(config.getProperty("name") != null){
            StormSubmitter.submitTopology((String)config.getProperty("name"), conf, builder.createTopology());
        } else {
            conf.setDebug(true);
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("test", conf, builder.createTopology());
            Utils.sleep(1000000); // Alive for 100 seconds = 100000 ms
            cluster.killTopology("test");
            cluster.shutdown();
        }

    }
 
Example 36
Project: fiware-sinfonier   File: DynamicTopology.java   Source Code and License 5 votes vote down vote up
private static Config setTopologyStormConfig(JSONObject topologyProperties) throws ConfigurationException {

        Config conf = new Config();

        Iterator<?> keys = topologyProperties.keys();
        while(keys.hasNext()){
            String stormProperty = (String) keys.next();
            conf.put(stormProperty, topologyProperties.get(stormProperty));
        }

        return conf;
    }
 
Example 37
Project: fiware-sinfonier   File: DynamicTopology.java   Source Code and License 5 votes vote down vote up
private static void configureSpouts(TopologyBuilder builder, JSONArray spouts) throws Exception {

        if (spouts == null || spouts.length() == 0) {
            throw new SinfonierException("There is no spouts. Add at least one spout.");
        }

        for (int i=0; i < spouts.length(); i++){

            JSONObject spout = spouts.getJSONObject(i);

            LOG.info("Creating spout with id:"+ spout.getString("abstractionId"));

            Object spoutInstance = new Object();
            SpoutDeclarer spoutDeclarer = null;

            try{
                spoutInstance = Class.forName(spout.getString("class"))
                        .getConstructor(String.class, String.class).newInstance("", spout.toString());

                spoutDeclarer = builder.setSpout(
                        spout.getString("abstractionId"),
                        (IRichSpout) spoutInstance, spout.getInt("parallelism"));

            } catch (Exception e){
                LOG.error(e.toString());
            }

            if (spout.has("numTasks") && spoutDeclarer != null) {
                spoutDeclarer.setNumTasks(spout.getInt("numTasks"));
            }

            if (spout.has("tickTuple") && spoutDeclarer != null) {
                spoutDeclarer.addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, spout.getInt("tickTuple"));
            }
        }

    }
 
Example 38
Project: reddit-sentiment-storm   File: ZkPublisher.java   Source Code and License 5 votes vote down vote up
public void init(Map stormConf) throws IOException {
	
	List<String> zkServers = (List<String>) stormConf.get(Config.STORM_ZOOKEEPER_SERVERS);
	int zkPort = ((Number) stormConf.get(Config.STORM_ZOOKEEPER_PORT)).intValue();
	
	StringBuilder connectString = new StringBuilder();
	for (int i = 0; i < zkServers.size(); i++) {
		connectString.append(zkServers.get(i)).append(":").append(zkPort);
		if (i < zkServers.size() - 1) {
			connectString.append(",");
		}
	}
	
	LOG.info("ZK connect string:{}", connectString);
	
	zkClient = new ZooKeeper(connectString.toString(), 5000, new Watcher() {
		public void process(WatchedEvent e) {
			LOG.info("Publisher Watcher thread [~{}]: {}", Thread.currentThread().getId(), e.toString());
		}	
	});
	
	try {
		if (zkClient.exists(ROOT_ZNODE, false) == null) {
			
			zkClient.create(ROOT_ZNODE, null, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
		}

	} catch (Exception e) {
		throw new RuntimeException(e);		// The Znode hierarchy is:
	}
}
 
Example 39
Project: splice-community-sample-code   File: SpliceDumperTopology.java   Source Code and License 5 votes vote down vote up
public static void main(String[] args) throws SQLException {

        ArrayList<String> columnNames = new ArrayList<String>();
        ArrayList<String> columnTypes = new ArrayList<String>();
        // this table must exist in splice
        // create table testTable (word varchar(100), number int);
        String tableName = "testTable";
        String server = "localhost";

        // add the column names and the respective types in the two arraylists
        columnNames.add("word");
        columnNames.add("number");

        // add the types
        columnTypes.add("varchar (100)");
        columnTypes.add("int");

        TopologyBuilder builder = new TopologyBuilder();

        // set the spout for the topology
        builder.setSpout("spout", new SpliceIntegerSpout(), 10);

        // dump the stream data into splice       
        SpliceDumperBolt dumperBolt = new SpliceDumperBolt(server, tableName);
        builder.setBolt("dumperBolt", dumperBolt, 1).shuffleGrouping("spout");
        Config conf = new Config();
        conf.setDebug(true);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("splice-topology", conf, builder.createTopology());
        Utils.sleep(10000);
        cluster.shutdown();
    }
 
Example 40
Project: preliminary.demo   File: RaceTopologyLocal.java   Source Code and License 5 votes vote down vote up
public static void main(String[] args) {
	LocalCluster cluster = new LocalCluster();

	
	/* begin young-define*/
	Config conf = new Config();
	TopologyBuilder builder = new TopologyBuilder();
	builder.setSpout("spout", new SpoutLocal(), 1);
       builder.setBolt("split", new SplitSentenceLocal(), 1).shuffleGrouping("spout");
       builder.setBolt("count", new WordCountLocal(), 1).fieldsGrouping("split", new Fields("word"));
       /* end young-define */
       
	
	//建议加上这行,使得每个bolt/spout的并发度都为1
	conf.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM, 1);

	//提交拓扑
	cluster.submitTopology("SequenceTest", conf, builder.createTopology());

	//等待1分钟, 1分钟后会停止拓扑和集群, 视调试情况可增大该数值
	try {
		Thread.sleep(60000);
	} catch (InterruptedException e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}        

	//结束拓扑
	cluster.killTopology("SequenceTest");

	cluster.shutdown();
}
 
Example 41
Project: storm-kafka-examples   File: CounterTopology.java   Source Code and License 5 votes vote down vote up
/**
 * @param args
 * http://www.programcreek.com/java-api-examples/index.php?api=storm.kafka.KafkaSpout
 */
public static void main(String[] args) {
	try{
		//设置喷发节点并分配并发数,该并发数将会控制该对象在集群中的线程数(6个)
		String zkhost = "wxb-1:2181,wxb-2:2181,wxb-3:2181";
		String topic = "order";
		String groupId = "id";
		int spoutNum = 3;
		int boltNum = 1;
		ZkHosts zkHosts = new ZkHosts(zkhost);//kafaka所在的zookeeper
		SpoutConfig spoutConfig = new SpoutConfig(zkHosts, topic, "/order", groupId);  // create /order /id
		spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
		KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("spout", kafkaSpout, spoutNum);
		builder.setBolt("check", new CheckOrderBolt(), boltNum).shuffleGrouping("spout");
        builder.setBolt("counter", new CounterBolt(),boltNum).shuffleGrouping("check");

        Config config = new Config();
        config.setDebug(true);
        
        if(args!=null && args.length > 0) {
            config.setNumWorkers(2);
            StormSubmitter.submitTopology(args[0], config, builder.createTopology());
        } else {        
            config.setMaxTaskParallelism(2);

            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("Wordcount-Topology", config, builder.createTopology());

            Thread.sleep(500000);

            cluster.shutdown();
        }
	}catch (Exception e) {
		e.printStackTrace();
	}
}
 
Example 42
Project: hadooparchitecturebook   File: MovingAvgLocalTopologyRunner.java   Source Code and License 5 votes vote down vote up
public static void main(String[] args) 
    throws Exception {
  
  Config conf = new Config();
  LocalCluster cluster = new LocalCluster();
  
  TridentTopology topology = new TridentTopology();

  Stream movingAvgStream =
    topology.newStream("ticks-spout", buildSpout())
    .each(new Fields("stock-ticks"), new TickParser(), new Fields("price"))
    .aggregate(new Fields("price"), new CalculateAverage(), new Fields("count"));

  cluster.submitTopology("moving-avg", conf, topology.build());
}
 
Example 43
Project: miner   File: TopologyMain.java   Source Code and License 5 votes vote down vote up
public static void main(String[] args) {
	try{
		TopologyBuilder topologyBuilder = new TopologyBuilder();
		topologyBuilder.setSpout("Spout", new EmitMessageSpout(), 1);

		topologyBuilder.setBolt("generate", new ParseLoopBolt(), 1)
				.shuffleGrouping("Spout");



		topologyBuilder.setBolt("Store", new PrintBolt(), 1)
				.shuffleGrouping("generate");
		
		Config config = new Config();
		config.setDebug(false);
		
		if(args != null && args.length>0){
			config.setNumWorkers(4);
			StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
		}else{
			config.setMaxTaskParallelism(2);
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("test", config, topologyBuilder.createTopology());
		}
		
	}catch(Exception e){
		e.printStackTrace();
	}
}
 
Example 44
Project: miner   File: TopologyMain.java   Source Code and License 5 votes vote down vote up
public static void main(String[] args) {
	try{
		TopologyBuilder topologyBuilder = new TopologyBuilder();
		topologyBuilder.setSpout("spout-number", new ProduceRecordSpout(Type.NUMBER, new String[]{"111 222 333", "80966 31"}), 1);
		topologyBuilder.setSpout("spout-string", new ProduceRecordSpout(Type.STRING, new String[]{"abc ddd fasko", "hello the world"}), 1);
		topologyBuilder.setSpout("spout-sign", new ProduceRecordSpout(Type.SIGN, new String[]{"++ -*% *** @@", "{+-} ^#######"}), 1);

		topologyBuilder.setBolt("bolt-splitter", new SplitRecordBolt(), 2)
				.shuffleGrouping("spout-number")
				.shuffleGrouping("spout-string")
				.shuffleGrouping("spout-sign");

		topologyBuilder.setBolt("bolt-distributor", new DistributeWordByTypeBolt(), 1)
				.fieldsGrouping("bolt-splitter", new Fields("type"));

		topologyBuilder.setBolt("bolt-number-saver", new SaveDataBolt(Type.NUMBER), 1)
				.shuffleGrouping("bolt-distributor", "stream-number-saver");
		topologyBuilder.setBolt("bolt-string-saver", new SaveDataBolt(Type.STRING), 1)
				.shuffleGrouping("bolt-distributor", "stream-string-saver");
		topologyBuilder.setBolt("bolt-sign-saver", new SaveDataBolt(Type.SIGN), 1)
				.shuffleGrouping("bolt-distributor", "stream-sign-saver");

		Config config = new Config();
		config.setDebug(false);
		
		if(args != null && args.length>0){
			config.setNumWorkers(4);
			StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
		}else{
			config.setMaxTaskParallelism(2);
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("test", config, topologyBuilder.createTopology());
		}
		
	}catch(Exception e){
		e.printStackTrace();
	}
}
 
Example 45
Project: miner   File: ExclaimBasicTopo.java   Source Code and License 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    TopologyBuilder builder = new TopologyBuilder();

    builder.setSpout("spout", new RandomSpout());
    builder.setBolt("exclaim", new ProxyBolt()).shuffleGrouping("spout");
    builder.setBolt("print", new PrintBolt()).shuffleGrouping("exclaim");

    Config conf = new Config();
    conf.setDebug(false);

    /* Config里封装了Redis的配置 */
    conf.put("ip","127.0.0.1");
    conf.put("port","6379");
    conf.put("password","password");

    if (args != null && args.length > 0) {
        conf.setNumWorkers(1);

        StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
    } else {

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("test", conf, builder.createTopology());
        Utils.sleep(10*1000);
        cluster.killTopology("test");
        cluster.shutdown();
    }
}
 
Example 46
Project: miner   File: TopologyMain.java   Source Code and License 5 votes vote down vote up
public static void main(String[] args) {
		try{
			TopologyBuilder topologyBuilder = new TopologyBuilder();
			topologyBuilder.setSpout("Spout", new EmitMessageSpout(), 1);

			topologyBuilder.setBolt("generate", new GenerateUrlBolt(), 1)
					.shuffleGrouping("Spout");
			topologyBuilder.setBolt("generate_loop", new GenerateUrlBolt(), 1)
					.shuffleGrouping("Parse", "loop");

//			topologyBuilder.setBolt("Parse", new ParseTestBolt(), 1).shuffleGrouping("Spout");
			topologyBuilder.setBolt("Parse", new ParseLoopBolt(), 1)
					.shuffleGrouping("generate")
					.shuffleGrouping("generate_loop");

			topologyBuilder.setBolt("Store", new StoreTestBolt(), 1)
					.shuffleGrouping("Parse", "store");
			
			Config config = new Config();
			config.setDebug(false);
			
			if(args != null && args.length>0){
				config.setNumWorkers(4);
				StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
			}else{
				config.setMaxTaskParallelism(2);
				LocalCluster cluster = new LocalCluster();
				cluster.submitTopology("test", config, topologyBuilder.createTopology());
			}
			
		}catch(Exception e){
			e.printStackTrace();
		}
	}
 
Example 47
Project: erad2016-streamprocessing   File: SentimentAnalysisTopology.java   Source Code and License 5 votes vote down vote up
private static Config createConfig(boolean local) {
    int workers = Properties.getInt("sa.storm.workers");
    Config conf = new Config();
    conf.setDebug(true);
    if (local)
        conf.setMaxTaskParallelism(workers);
    else
        conf.setNumWorkers(workers);
    return conf;
}
 
Example 48
Project: es-hadoop-v2.2.0   File: AbstractStormSuite.java   Source Code and License 5 votes vote down vote up
private static void copyPropertiesIntoCfg(Config cfg) {
    Properties props = TestSettings.TESTING_PROPS;

    for (String property : props.stringPropertyNames()) {
        cfg.put(property, props.get(property));
    }
}
 
Example 49
Project: sourcevirtues-samples   File: DummyJsonTerminalLogTopology.java   Source Code and License 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config config = new Config();

    RandomJsonTestSpout spout = new RandomJsonTestSpout().withComplexJson(false);

    String2ByteArrayTupleMapper tuppleMapper = new String2ByteArrayTupleMapper();
    tuppleMapper.configure(CmnStormCons.TUPLE_FIELD_MSG);

    MorphlinesBolt morphBolt = new MorphlinesBolt()
            .withTupleMapper(tuppleMapper)
            .withMorphlineId("json_terminal_log")
            .withMorphlineConfFile("target/test-classes/morphline_confs/json_terminal_log.conf");

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("WORD_SPOUT", spout, 1);
    builder.setBolt("MORPH_BOLT", morphBolt, 1).shuffleGrouping("WORD_SPOUT");

    if (args.length == 0) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("MyDummyJsonTerminalLogTopology", config, builder.createTopology());
        Thread.sleep(10000);
        cluster.killTopology("MyDummyJsonTerminalLogTopology");
        cluster.shutdown();
        System.exit(0);
    } else if (args.length == 1) {
        StormSubmitter.submitTopology(args[0], config, builder.createTopology());
    } else {
        System.out.println("Usage: DummyJsonTerminalLogTopology <topology_name>");
    }
}
 
Example 50
Project: sourcevirtues-samples   File: DummyJson2StringTopology.java   Source Code and License 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config config = new Config();

    RandomJsonTestSpout spout = new RandomJsonTestSpout().withComplexJson(false);

    String2ByteArrayTupleMapper tuppleMapper = new String2ByteArrayTupleMapper();
    tuppleMapper.configure(CmnStormCons.TUPLE_FIELD_MSG);

    /*PairNode<String, IRecordHandler<?, ?>>[] resultRecordHandlers = new PairNode[] { new PairNode(
            CmnStormCons.TUPLE_FIELD_MSG,
            RecordHandlerFactory.createMyObject(String.class, new JsonNode2StringResultMapper())) };*/

    MorphlinesBolt morphBolt = new MorphlinesBolt()
            .withTupleMapper(tuppleMapper)
            .withMorphlineId("json2string")
            .withMorphlineConfFile("target/test-classes/morphline_confs/json2string.conf")
            //.withOutputProcessors(Arrays.asList(resultRecordHandlers));
            .withOutputFields(CmnStormCons.TUPLE_FIELD_MSG)
            .withRecordMapper(RecordHandlerFactory.genDefaultRecordHandler(String.class, new JsonNode2StringResultMapper()));

    LoggingBolt printBolt = new LoggingBolt().withFields(CmnStormCons.TUPLE_FIELD_MSG);

    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("WORD_SPOUT", spout, 1);
    builder.setBolt("MORPH_BOLT", morphBolt, 1).shuffleGrouping("WORD_SPOUT");
    builder.setBolt("PRINT_BOLT", printBolt, 1).shuffleGrouping("MORPH_BOLT");

    if (args.length == 0) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("MyDummyJson2StringTopology", config, builder.createTopology());
        Thread.sleep(10000);
        cluster.killTopology("MyDummyJson2StringTopology");
        cluster.shutdown();
        System.exit(0);
    } else if (args.length == 1) {
        StormSubmitter.submitTopology(args[0], config, builder.createTopology());
    } else {
        System.out.println("Usage: DummyJson2StringTopology <topology_name>");
    }
}
 
Example 51
Project: java   File: DeliveryTopology.java   Source Code and License 5 votes vote down vote up
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
  TopologyBuilder builder = new TopologyBuilder();

  LOGGER.info("Starting..");
  builder.setSpout("trade", new DeliveryCheckSpout(), 1);
  builder.setBolt("eligibility", new DeliveryCheckBolt(), 10).shuffleGrouping("trade");
  builder.setBolt("odd", new DeliveryCheckOddBolt(), 10).shuffleGrouping("eligibility",
      "oddstream");
  builder.setBolt("even", new DeliveryCheckEvenBolt(), 10).shuffleGrouping("eligibility",
      "evenstream");

  Config conf = new Config();
  conf.setDebug(false);
  conf.setMaxSpoutPending(5);

  if (args != null && args.length > 0) {
    conf.setNumWorkers(1);
    LOGGER.info("Submitting DeliveryTopology");
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  } else {

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("DeliveryTopology", conf, builder.createTopology());
    Utils.sleep(100000000);
    cluster.killTopology("DeliveryTopology");
    cluster.shutdown();
  }
}
 
Example 52
Project: streaming_outliers   File: StormTopologyComponent.java   Source Code and License 5 votes vote down vote up
public void submitFluxTopology(String topologyName, File topologyLoc, Properties properties) throws IOException, ClassNotFoundException, NoSuchMethodException, InvocationTargetException, InstantiationException, IllegalAccessException, TException {
    TopologyDef topologyDef = loadYaml(topologyName, topologyLoc, properties);
    Config conf = FluxBuilder.buildConfig(topologyDef);
    ExecutionContext context = new ExecutionContext(topologyDef, conf);
    StormTopology topology = FluxBuilder.buildTopology(context);
    Assert.assertNotNull(topology);
    topology.validate();
    stormCluster.submitTopology(topologyName, conf, topology);
}
 
Example 53
Project: Tstream   File: MainPage.java   Source Code and License 5 votes vote down vote up
@SuppressWarnings("rawtypes")
private void init() throws Exception {

	NimbusClient client = null;
	ClusterState cluster_state = null;
	try {
		LOG.info("MainPage init...");
		Map conf = UIUtils.readUiConfig();
		
		client = UIUtils.getNimbusClient(conf, clusterName);
		summ = client.getClient().getClusterInfo();

		tsumm = UIUtils.topologySummary(summ.get_topologies());
		csumm = UIUtils.clusterSummary(summ, client, conf);
		ssumm = UIUtils.supervisorSummary(summ.get_supervisors());

		cluster_state = ZkTool.mk_distributed_cluster_state(client
				.getConf());
		slaves = getNimbusSlave(cluster_state, conf);

		zkServers = getZkServer(conf);
		zkPort = String.valueOf(conf.get(Config.STORM_ZOOKEEPER_PORT));

	} catch (Exception e) {
		String errorInfo = e.getMessage();
		if (errorInfo.indexOf("No alive nimbus") == -1) {
		    LOG.error("Failed to get cluster information:", e);
		    throw e;
		}
	} finally {
		if (client != null) {
			client.close();
		}
		if (cluster_state != null) {
			cluster_state.close();
		}
	}
}
 
Example 54
Project: Tstream   File: TridentBoltExecutor.java   Source Code and License 5 votes vote down vote up
@Override
public Map<String, Object> getComponentConfiguration() {
    Map<String, Object> ret = _bolt.getComponentConfiguration();
    if(ret==null) ret = new HashMap();
    ret.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 5);
    // TODO: Need to be able to set the tick tuple time to the message timeout, ideally without parameterization
    return ret;
}
 
Example 55
Project: Tstream   File: StormConfig.java   Source Code and License 5 votes vote down vote up
public static String default_worker_shared_dir(Map conf) throws IOException {
	String ret = String.valueOf(conf.get(Config.STORM_LOCAL_DIR))
			+ FILE_SEPERATEOR + WORKER_DATA_SUBDIR;

	FileUtils.forceMkdir(new File(ret));
	return ret;
}
 
Example 56
Project: jstrom   File: StormMasterServerHandler.java   Source Code and License 5 votes vote down vote up
@SuppressWarnings("unchecked")
private void setStormHostConf() {
	try {
           String host_addr = InetAddress.getLocalHost().getHostAddress();
           LOG.info("Storm master host:"+host_addr);
           _storm_conf.put(Config.NIMBUS_HOST, host_addr);
       } catch (UnknownHostException ex) {
           LOG.warn("Failed to get IP address of local host");
       }
}
 
Example 57
Project: aeolus   File: FileSinkBoltTest.java   Source Code and License 5 votes vote down vote up
@Test
public void testConstructorAbsolutePath() throws Exception {
	String file = "simple.file";
	String dir = File.separator + "directory1" + File.separator + "directory2";
	String filename = dir + File.separator + file;
	
	FileSinkBolt bolt = new FileSinkBolt(filename);
	
	Config stormConf = new Config();
	bolt.prepare(stormConf, null, null);
	
	PowerMockito.verifyNew(FileWriter.class).withArguments(filename);
}
 
Example 58
Project: Tstream   File: RefreshActive.java   Source Code and License 5 votes vote down vote up
@SuppressWarnings("rawtypes")
public RefreshActive(WorkerData workerData) {
	this.workerData = workerData;

	this.active = workerData.getActive();
	this.conf = workerData.getConf();
	this.zkCluster = workerData.getZkCluster();
	this.topologyId = workerData.getTopologyId();
	this.frequence = JStormUtils.parseInt(
			conf.get(Config.TASK_REFRESH_POLL_SECS), 10);
}
 
Example 59
Project: Infrastructure   File: SwitchTopology.java   Source Code and License 5 votes vote down vote up
@Override
public SubTopologyMonitoringEvent createTopology(Config config, RecordingTopologyBuilder builder) {
    // Source - Family = Intermediary - Mapper - Processor - Intermediary = Receiver
    
    builder.setSpout(getTestSourceName(), 
        new TestSourceSource(getTestSourceName(), PIP, SEND_EVENTS), 1)
        .setNumTasks(1);
    
    builder.setBolt(getTestFamilyName(), 
        new TestSwitchFamilyElement(getTestFamilyName(), PIP, SEND_EVENTS, true, 9994), 1)
       .setNumTasks(1).shuffleGrouping(getTestSourceName());

    builder.setSpout(getTestIntermediaryBoltName(), 
        new ReceivingSpout(getTestIntermediaryBoltName(), PIP, SEND_EVENTS, true, 9994), 1)
        .setNumTasks(1);

    builder.setBolt(getMapperName(), 
        new SubTopologyFamilyElement0FamilyElement(getMapperName(), PIP, SEND_EVENTS, true))
        .setNumTasks(1).shuffleGrouping(getTestIntermediaryBoltName());
    
    builder.setBolt(getProcessorName(), 
        new SubTopologyFamilyElement1FamilyElement(getProcessorName(), PIP, SEND_EVENTS, true))
        .setNumTasks(1).shuffleGrouping(getMapperName());
    
    builder.setBolt(getOutSenderName(), 
        new SendingBolt(getOutSenderName(), PIP, SEND_EVENTS, true, 9995), 1)
        .setNumTasks(1).shuffleGrouping(getProcessorName());

    builder.setSpout(getOutReceiverName(), 
        new ReceivingSpout(getOutReceiverName(), PIP, SEND_EVENTS, true, 9995), 1)
        .setNumTasks(1);
    
    if (withSink) {
        builder.setBolt(getSinkName(), 
            new SinkBolt(getSinkName(), PIP, SEND_EVENTS, true), 1)
            .setNumTasks(1).shuffleGrouping(getOutReceiverName());
    }
    
    return builder.createClosingEvent(PIP, config);
}
 
Example 60
Project: Tstream   File: MasterBatchCoordinator.java   Source Code and License 5 votes vote down vote up
@Override
public Map<String, Object> getComponentConfiguration() {
    Config ret = new Config();
    ret.setMaxTaskParallelism(1);
    ret.registerSerialization(TransactionAttempt.class);
    return ret;
}