Java Code Examples for backtype.storm.Config

The following are top voted examples for showing how to use backtype.storm.Config. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: RealEstate-Streaming   File: PhoenixTest.java   View source code 7 votes vote down vote up
public void buildAndSubmit() throws Exception {
	 TopologyBuilder builder = new TopologyBuilder();
	 Config config = new Config();
     config.setDebug(true);
     // String nimbusHost = topologyConfig.getProperty("nimbus.host");
     config.put(Config.NIMBUS_HOST, "localhost");
     
     configureKafkaSpout(builder);
     //configureRouteBolt(builder);
     configurePhoenixTest(builder);
     
     /*
     builder.setBolt("submitter", new SubmitBolt())
        .shuffleGrouping(ROUTE_BOLT);
     */
     
     try {
         StormSubmitter.submitTopology("simple-topology", config, builder.createTopology());
     } catch (Exception e) {
         LOG.error("Error submiting Topology", e);
     }

}
 
Example 2
Project: Mastering-Mesos   File: LogViewerController.java   View source code 6 votes vote down vote up
/**
 * Create a process builder to launch the log viewer
 * @param logDirectory
 * @return
 */
protected ProcessBuilder createProcessBuilder() {
  ProcessBuilder pb = new ProcessBuilder(
      Paths.get(System.getProperty("user.dir"), "/bin/storm").toString(),
      "logviewer",
      "-c",
      "storm.log.dir=" + System.getenv("MESOS_SANDBOX") + "/logs",
      "-c",
      Config.LOGVIEWER_PORT + "=" + port
  );

  // If anything goes wrong at startup we want to see it.
  Path logPath = Paths.get(System.getenv("MESOS_SANDBOX"), "/logs");
  if (!logPath.toFile().exists() && !logPath.toFile().mkdirs()) {
    throw new RuntimeException("Couldn't create log directory");
  }
  File log = Paths.get(System.getenv("MESOS_SANDBOX"), "/logs/logviewer-startup.log").toFile();
  pb.redirectErrorStream(true);
  pb.redirectOutput(Redirect.appendTo(log));
  return pb;
}
 
Example 3
Project: rb-bi   File: PartitionManager.java   View source code 6 votes vote down vote up
public void commit() {
    long lastCompletedOffset = lastCompletedOffset();
    if (_committedTo != lastCompletedOffset) {
        LOG.debug("Writing last completed offset (" + lastCompletedOffset + ") to ZK for " + _partition + " for topology: " + _topologyInstanceId);
        Map<Object, Object> data = (Map<Object, Object>) ImmutableMap.builder()
                .put("topology", ImmutableMap.of("id", _topologyInstanceId,
                        "name", _stormConf.get(Config.TOPOLOGY_NAME)))
                .put("offset", lastCompletedOffset)
                .put("partition", _partition.partition)
                .put("broker", ImmutableMap.of("host", _partition.host.host,
                        "port", _partition.host.port))
                .put("topic", _spoutConfig.topic).build();
        _state.writeJSON(committedPath(), data);

        _committedTo = lastCompletedOffset;
        LOG.debug("Wrote last completed offset (" + lastCompletedOffset + ") to ZK for " + _partition + " for topology: " + _topologyInstanceId);
    } else {
        LOG.debug("No new offset for " + _partition + " for topology: " + _topologyInstanceId);
    }
}
 
Example 4
Project: storm-hbase-1.0.x   File: WordCountTrident.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    Config conf = new Config();
    conf.setMaxSpoutPending(5);
    if (args.length == 1) {
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("wordCounter", conf, buildTopology(args[0]));
        Thread.sleep(60 * 1000);
        cluster.killTopology("wordCounter");
        cluster.shutdown();
        System.exit(0);
    }
    else if(args.length == 2) {
        conf.setNumWorkers(3);
        StormSubmitter.submitTopology(args[1], conf, buildTopology(args[0]));
    } else{
        System.out.println("Usage: TridentFileTopology <hdfs url> [topology name]");
    }
}
 
Example 5
Project: splice-community-sample-code   File: MySqlToSpliceTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws SQLException {

        // tableName is the name of the table in splice to insert records to
        // server is the server instance running splice
        String tableName = "students";
        String server = "localhost";
        TopologyBuilder builder = new TopologyBuilder();

        // set the spout for the topology
        builder.setSpout("seedDataFromMySql", new MySqlSpout());

        // dump the stream data into splice       
        builder.setBolt("dbRowProcessing", new MySqlSpliceBolt(server, tableName), 1).shuffleGrouping("seedDataFromMySql");

        Config conf = new Config();
        conf.setDebug(true);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("mysql-splice-topology", conf, builder.createTopology());
        Utils.sleep(3000);
        cluster.shutdown();
    }
 
Example 6
Project: preliminary.demo   File: RaceTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {

        Config conf = new Config();
        int spout_Parallelism_hint = 1;
        int split_Parallelism_hint = 2;
        int count_Parallelism_hint = 2;

        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout("spout", new RaceSentenceSpout(), spout_Parallelism_hint);
        builder.setBolt("split", new SplitSentence(), split_Parallelism_hint).shuffleGrouping("spout");
        builder.setBolt("count", new WordCount(), count_Parallelism_hint).fieldsGrouping("split", new Fields("word"));
        String topologyName = RaceConfig.JstormTopologyName;

        try {
            StormSubmitter.submitTopology(topologyName, conf, builder.createTopology());
            //begin by Young
            
            //end by Young
        } catch (Exception e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
    }
 
Example 7
Project: hadooparchitecturebook   File: MovingAvgLocalTopologyRunner.java   View source code 6 votes vote down vote up
public static void main(String[] args) {

    Config config = new Config();
    config.setDebug(true);

    StormTopology topology = buildTopology();
    // Un-comment to run locally:
    LocalCluster localCluster = new LocalCluster();
    localCluster.submitTopology("local-moving-avg", config, topology);
    // Un-comment to run as part of a Storm cluster:
    // try {
    //   StormSubmitter.submitTopology("cluster-moving-average",
    //  				    config,
    // 				    topology);
    // } catch(AlreadyAliveException e) {
    //   e.printStackTrace();
    // } catch(InvalidTopologyException e) {
    //   e.printStackTrace();
    //}
  }
 
Example 8
Project: RealEstate-Streaming   File: KafkaPhoenixTopology.java   View source code 6 votes vote down vote up
public void buildAndSubmit() throws Exception {
	 TopologyBuilder builder = new TopologyBuilder();
	 Config config = new Config();
     config.setDebug(true);
     // String nimbusHost = topologyConfig.getProperty("nimbus.host");
     config.put(Config.NIMBUS_HOST, "localhost");
     
     configureKafkaSpout(builder);
     configureRouteBolt(builder);
     configureInsertBolt(builder);
     
     //builder.setBolt("submitter", new SubmitBolt())
     //   .shuffleGrouping(ROUTE_BOLT);
     
     try {
         StormSubmitter.submitTopology("realestate-topology", config, builder.createTopology());
     } catch (Exception e) {
         LOG.error("Error submiting Topology", e);
     }

}
 
Example 9
Project: storm-demo   File: LogStatisticsTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) {
    Config config = new Config();

    HdfsBolt hdfsBolt = makeHdfsBolt();
    KafkaSpout kafkaSpout = makeKafkaSpout(TOPIC, TOPOLOGY_NAME);

    LOG.info("Topology name is {}", TOPOLOGY_NAME);

    TopologyBuilder topologyBuilder = new TopologyBuilder();
    topologyBuilder.setSpout(KAFKA_SPOUT_ID, kafkaSpout, 10);
    topologyBuilder.setBolt(CROP_BOLT_ID, new CropBolt(), 10).shuffleGrouping(KAFKA_SPOUT_ID);
    topologyBuilder.setBolt(SPLIT_FIELDS_BOLT_ID, new SplitFieldsBolt(), 10).shuffleGrouping(CROP_BOLT_ID);
    topologyBuilder.setBolt(STORM_HDFS_BOLT_ID, hdfsBolt, 4).fieldsGrouping(SPLIT_FIELDS_BOLT_ID, new Fields("timestamp", "fieldvalues"));

    if (args != null && args.length > 0) {
        config.setDebug(false);
        config.setNumWorkers(3);

        try {
            StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
        } catch (InvalidTopologyException | AlreadyAliveException | AuthorizationException e) {
            e.printStackTrace();
        }
    }
}
 
Example 10
Project: storm-demo   File: AutoHDFS.java   View source code 6 votes vote down vote up
@SuppressWarnings("unchecked")
public static void main(String[] args) throws Exception {
    Map conf = new HashMap();
    conf.put(Config.TOPOLOGY_SUBMITTER_PRINCIPAL, args[0]); //with realm e.g. [email protected]
    conf.put(STORM_USER_NAME_KEY, args[1]); //with realm e.g. [email protected]
    conf.put(STORM_KEYTAB_FILE_KEY, args[2]);// /etc/security/keytabs/storm.keytab

    Configuration configuration = new Configuration();
    AutoHDFS autoHDFS = new AutoHDFS();
    autoHDFS.prepare(conf);

    Map<String,String> creds  = new HashMap<String, String>();
    autoHDFS.populateCredentials(creds, conf);
    LOG.info("Got HDFS credentials", autoHDFS.getCredentials(creds));

    Subject s = new Subject();
    autoHDFS.populateSubject(s, creds);
    LOG.info("Got a Subject "+ s);

    autoHDFS.renew(creds, conf);
    LOG.info("renewed credentials", autoHDFS.getCredentials(creds));
}
 
Example 11
Project: learn_jstorm   File: TridentTopology.java   View source code 6 votes vote down vote up
private static Integer getMaxParallelism(Set<Group> groups) {
    Integer ret = null;
    for(Group g: groups) {
        if(isSpoutGroup(g)) {
            SpoutNode n = (SpoutNode) g.nodes.iterator().next();
            Map conf = getSpoutComponentConfig(n.spout);
            if(conf==null) conf = new HashMap();
            Number maxP = (Number) conf.get(Config.TOPOLOGY_MAX_TASK_PARALLELISM);
            if(maxP!=null) {
                if(ret==null) ret = maxP.intValue();
                else ret = Math.min(ret, maxP.intValue());
            }
        }
    }
    return ret;
}
 
Example 12
Project: learn_jstorm   File: SequenceTopologyTool.java   View source code 6 votes vote down vote up
public  void SetRemoteTopology() throws AlreadyAliveException,
		InvalidTopologyException, TopologyAssignException {
	 Config conf = getConf();
	StormTopology topology = buildTopology();

	conf.put(Config.STORM_CLUSTER_MODE, "distributed");
	String streamName = (String) conf.get(Config.TOPOLOGY_NAME);
	if (streamName == null) {
		streamName = "SequenceTest";
	}

	if (streamName.contains("zeromq")) {
		conf.put(Config.STORM_MESSAGING_TRANSPORT,
				"com.alibaba.jstorm.message.zeroMq.MQContext");

	} else {
		conf.put(Config.STORM_MESSAGING_TRANSPORT,
				"com.alibaba.jstorm.message.netty.NettyContext");
	}

	StormSubmitter.submitTopology(streamName, conf,topology);
	
}
 
Example 13
Project: big-data-system   File: CopyOfPrintSampleStream.java   View source code 6 votes vote down vote up
public static void main(String[] args) {
    String consumerKey = args[0]; 
    String consumerSecret = args[1]; 
    String accessToken = args[2]; 
    String accessTokenSecret = args[3];
    String[] arguments = args.clone();
    String[] keyWords = Arrays.copyOfRange(arguments, 4, arguments.length);
    
    TopologyBuilder builder = new TopologyBuilder();
    
    builder.setSpout("twitter", new TwitterSampleSpout(consumerKey, consumerSecret,
                            accessToken, accessTokenSecret, keyWords));
    builder.setBolt("print", new PrinterBolt())
            .shuffleGrouping("twitter");
            
            
    Config conf = new Config();
    
    
    LocalCluster cluster = new LocalCluster();
    
    cluster.submitTopology("test", conf, builder.createTopology());
    
    Utils.sleep(10000);
    cluster.shutdown();
}
 
Example 14
Project: cdh-storm   File: ReachTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  LinearDRPCTopologyBuilder builder = construct();


  Config conf = new Config();

  if (args == null || args.length == 0) {
    conf.setMaxTaskParallelism(3);
    LocalDRPC drpc = new LocalDRPC();
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("reach-drpc", conf, builder.createLocalTopology(drpc));

    String[] urlsToTry = new String[]{ "foo.com/blog/1", "engineering.twitter.com/blog/5", "notaurl.com" };
    for (String url : urlsToTry) {
      System.out.println("Reach of " + url + ": " + drpc.execute("reach", url));
    }

    cluster.shutdown();
    drpc.shutdown();
  }
  else {
    conf.setNumWorkers(6);
    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createRemoteTopology());
  }
}
 
Example 15
Project: jstorm-0.9.6.3-   File: NimbusServer.java   View source code 6 votes vote down vote up
private void initThrift(Map conf) throws TTransportException {
	Integer thrift_port = JStormUtils.parseInt(conf
			.get(Config.NIMBUS_THRIFT_PORT));
	TNonblockingServerSocket socket = new TNonblockingServerSocket(
			thrift_port);

	Integer maxReadBufSize = JStormUtils.parseInt(conf
			.get(Config.NIMBUS_THRIFT_MAX_BUFFER_SIZE));

	THsHaServer.Args args = new THsHaServer.Args(socket);
	args.workerThreads(ServiceHandler.THREAD_NUM);
	args.protocolFactory(new TBinaryProtocol.Factory(false, true,
			maxReadBufSize));

	args.processor(new Nimbus.Processor<Iface>(serviceHandler));
	args.maxReadBufferBytes = maxReadBufSize;

	thriftServer = new THsHaServer(args);

	LOG.info("Successfully started nimbus: started Thrift server...");
	thriftServer.serve();
}
 
Example 16
Project: jstrom   File: IsolatedPool.java   View source code 6 votes vote down vote up
@Override
public void addTopology(TopologyDetails td) {
    String topId = td.getId();
    LOG.debug("Adding in Topology {}", topId);
    SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
    Set<Node> assignedNodes = new HashSet<Node>();
    if (assignment != null) {
        for (WorkerSlot ws : assignment.getSlots()) {
            Node n = _nodeIdToNode.get(ws.getNodeId());
            assignedNodes.add(n);
        }
    }
    _usedNodes += assignedNodes.size();
    _topologyIdToNodes.put(topId, assignedNodes);
    _tds.put(topId, td);
    if (td.getConf().get(Config.TOPOLOGY_ISOLATED_MACHINES) != null) {
        _isolated.add(topId);
    }
}
 
Example 17
Project: big-data-system   File: TransactionalGlobalCount.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
  TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 3);
  builder.setBolt("partial-count", new BatchCount(), 5).noneGrouping("spout");
  builder.setBolt("sum", new UpdateGlobalCount()).globalGrouping("partial-count");

  LocalCluster cluster = new LocalCluster();

  Config config = new Config();
  config.setDebug(true);
  config.setMaxSpoutPending(3);

  cluster.submitTopology("global-count-topology", config, builder.buildTopology());

  Thread.sleep(3000);
  cluster.shutdown();
}
 
Example 18
Project: learn_jstorm   File: Heartbeat.java   View source code 6 votes vote down vote up
/**
 * @param conf
 * @param stormClusterState
 * @param supervisorId
 * @param myHostName
 */
@SuppressWarnings({ "rawtypes", "unchecked" })
public Heartbeat(Map conf, StormClusterState stormClusterState,
		String supervisorId, AtomicBoolean active) {

	String myHostName = JStormServerUtils.getHostName(conf);

	this.stormClusterState = stormClusterState;
	this.supervisorId = supervisorId;
	this.conf = conf;
	this.myHostName = myHostName;
	this.startTime = TimeUtils.current_time_secs();
	this.active = active;
	this.frequence = JStormUtils.parseInt(conf
			.get(Config.SUPERVISOR_HEARTBEAT_FREQUENCY_SECS));

	initSupervisorInfo(conf);
	
	LOG.info("Successfully init supervisor heartbeat thread, " + supervisorInfo);
}
 
Example 19
Project: jstorm-0.9.6.3-   File: SequenceSpout.java   View source code 6 votes vote down vote up
public long getMaxPending(Map conf) {
	// if single spout thread, MAX_PENDING should be Long.MAX_VALUE
	if (ConfigExtension.isSpoutSingleThread(conf)) {
		return Long.MAX_VALUE;
	}

	Object pending = conf.get(Config.TOPOLOGY_MAX_SPOUT_PENDING);
	if (pending == null) {
		return Long.MAX_VALUE;
	}

	int pendingNum = JStormUtils.parseInt(pending);
	if (pendingNum == 1) {
		return Long.MAX_VALUE;
	}

	return pendingNum;
}
 
Example 20
Project: jstorm-0.9.6.3-   File: SimpleBatchTopology.java   View source code 6 votes vote down vote up
private static void LoadYaml(String confPath) {

		Yaml yaml = new Yaml();

		try {
			InputStream stream = new FileInputStream(confPath);

			conf = (Map) yaml.load(stream);
			if (conf == null || conf.isEmpty() == true) {
				throw new RuntimeException("Failed to read config file");
			}

		} catch (FileNotFoundException e) {
			System.out.println("No such file " + confPath);
			throw new RuntimeException("No config file");
		} catch (Exception e1) {
			e1.printStackTrace();
			throw new RuntimeException("Failed to read config file");
		}

		topologyName = (String) conf.get(Config.TOPOLOGY_NAME);
		return;
	}
 
Example 21
Project: jstorm-0.9.6.3-   File: TransactionalGlobalCount.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
	    MemoryTransactionalSpout spout = new MemoryTransactionalSpout(DATA, new Fields("word"), PARTITION_TAKE_PER_BATCH);
	    TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("global-count", "spout", spout, 2);
	    builder.setBolt("partial-count", new BatchCount(), 3).noneGrouping("spout");
	    builder.setBolt("sum", new UpdateGlobalCount(), 1).globalGrouping("partial-count");

//	    LocalCluster cluster = new LocalCluster();

	    Config config = new Config();
	    config.setDebug(true);
	    config.setMaxSpoutPending(3);
	    config.put(Config.TOPOLOGY_WORKERS, 9);
	    Config.setNumAckers(config, 0);
	    
	    StormSubmitter.submitTopology("global-count-topology", config, builder.buildTopology());

//	    Thread.sleep(3000);
//	    cluster.shutdown();
	  }
 
Example 22
Project: cdh-storm   File: ExclamationTopology.java   View source code 6 votes vote down vote up
public static void main(String[] args) throws Exception {
  TopologyBuilder builder = new TopologyBuilder();

  builder.setSpout("word", new TestWordSpout(), 10);
  builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word");
  builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");

  Config conf = new Config();
  conf.setDebug(true);

  if (args != null && args.length > 0) {
    conf.setNumWorkers(3);

    StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
  }
  else {

    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("test", conf, builder.createTopology());
    Utils.sleep(10000);
    cluster.killTopology("test");
    cluster.shutdown();
  }
}
 
Example 23
Project: Mastering-Apache-Storm   File: Topology.java   View source code 5 votes vote down vote up
public static void main(String[] args) throws AlreadyAliveException,
			InvalidTopologyException {
		TopologyBuilder builder = new TopologyBuilder();
		
		List<String> zks = new ArrayList<String>();
		zks.add("192.168.41.122");
		
		List<String> cFs = new ArrayList<String>();
		cFs.add("personal");
		cFs.add("company");
		
		// set the spout class
		builder.setSpout("spout", new SampleSpout(), 2);
		// set the bolt class
		builder.setBolt("bolt", new StormRedisBolt("192.168.41.122",2181), 2).shuffleGrouping("spout");

		Config conf = new Config();
		conf.setDebug(true);
		// create an instance of LocalCluster class for
		// executing topology in local mode.
		LocalCluster cluster = new LocalCluster();

		// LearningStormTopolgy is the name of submitted topology.
		cluster.submitTopology("StormRedisTopology", conf,
				builder.createTopology());
		try {
			Thread.sleep(10000);
		} catch (Exception exception) {
			System.out.println("Thread interrupted exception : " + exception);
		}
		// kill the LearningStormTopology
		cluster.killTopology("StormRedisTopology");
		// shutdown the storm test cluster
		cluster.shutdown();
}
 
Example 24
Project: Mastering-Mesos   File: MesosNimbus.java   View source code 5 votes vote down vote up
@SuppressWarnings("unchecked")
protected void initialize(Map conf, String localDir) throws Exception {
  _conf = new HashMap();
  _conf.putAll(conf);

  _state = new LocalStateShim(localDir);
  _allowedHosts = listIntoSet((List<String>) conf.get(CONF_MESOS_ALLOWED_HOSTS));
  _disallowedHosts = listIntoSet((List<String>) conf.get(CONF_MESOS_DISALLOWED_HOSTS));
  Boolean preferReservedResources = (Boolean) conf.get(CONF_MESOS_PREFER_RESERVED_RESOURCES);
  if (preferReservedResources != null) {
    _preferReservedResources = preferReservedResources;
  }
  _container = Optional.fromNullable((String) conf.get(CONF_MESOS_CONTAINER_DOCKER_IMAGE));
  _scheduler = new NimbusScheduler(this);

  // Generate YAML to be served up to clients
  _generatedConfPath = Paths.get(
      Optional.fromNullable((String) conf.get(Config.STORM_LOCAL_DIR)).or("./"),
      "generated-conf");
  if (!_generatedConfPath.toFile().exists() && !_generatedConfPath.toFile().mkdirs()) {
    throw new RuntimeException("Couldn't create generated-conf dir, _generatedConfPath=" + _generatedConfPath.toString());
  }

  createLocalServerPort();
  setupHttpServer();

  _conf.put(Config.NIMBUS_HOST, _configUrl.getHost());

  File generatedConf = Paths.get(_generatedConfPath.toString(), "storm.yaml").toFile();
  Yaml yaml = new Yaml();
  FileWriter writer = new FileWriter(generatedConf);
  yaml.dump(_conf, writer);
}
 
Example 25
Project: rb-bi   File: TridentKafkaEmitter.java   View source code 5 votes vote down vote up
public TridentKafkaEmitter(Map conf, TopologyContext context, TridentKafkaConfig config, String topologyInstanceId) {
    _config = config;
    _topologyInstanceId = topologyInstanceId;
    _connections = new DynamicPartitionConnections(_config, KafkaUtils.makeBrokerReader(conf, _config));
    _topologyName = (String) conf.get(Config.TOPOLOGY_NAME);
    _kafkaOffsetMetric = new KafkaUtils.KafkaOffsetMetric(_config.topic, _connections);
    context.registerMetric("kafkaOffset", _kafkaOffsetMetric, _config.metricsTimeBucketSizeInSecs);
    _kafkaMeanFetchLatencyMetric = context.registerMetric("kafkaFetchAvg", new MeanReducer(), _config.metricsTimeBucketSizeInSecs);
    _kafkaMaxFetchLatencyMetric = context.registerMetric("kafkaFetchMax", new MaxMetric(), _config.metricsTimeBucketSizeInSecs);
}
 
Example 26
Project: rb-bi   File: ZkState.java   View source code 5 votes vote down vote up
private CuratorFramework newCurator(Map stateConf) throws Exception {
    Integer port = (Integer) stateConf.get(Config.TRANSACTIONAL_ZOOKEEPER_PORT);
    String serverPorts = "";
    for (String server : (List<String>) stateConf.get(Config.TRANSACTIONAL_ZOOKEEPER_SERVERS)) {
        serverPorts = serverPorts + server + ":" + port + ",";
    }
    return CuratorFrameworkFactory.newClient(serverPorts,
            Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
            15000,
            new RetryNTimes(Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
                    Utils.getInt(stateConf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
}
 
Example 27
Project: rb-bi   File: DynamicBrokersReader.java   View source code 5 votes vote down vote up
public DynamicBrokersReader(Map conf, String zkStr, String zkPath, String topic) {
    _zkPath = zkPath;
    _topic = topic;
    try {
        _curator = CuratorFrameworkFactory.newClient(
                zkStr,
                Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)),
                Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT)),
                new RetryNTimes(Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)),
                        Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
        _curator.start();
    } catch (Exception ex) {
        LOG.error("Couldn't connect to zookeeper", ex);
    }
}
 
Example 28
Project: rb-bi   File: ConfigData.java   View source code 5 votes vote down vote up
public ConfigData() {
    _conf = new Config();
    _configFile = new ConfigFile(debug);
    _kafkaPartitions = new HashMap<>();
    _topics = _configFile.getAvailableTopics();
    _tranquilityPartitions = new HashMap<>();
    _zookeeper = getZkHost();
    debug = false;
    getZkData();
    getTranquilityPartitions();
}
 
Example 29
Project: rb-bi   File: ConfigData.java   View source code 5 votes vote down vote up
public Config setConfig(String mode) {
    if (mode.equals("local")) {
        _conf.setMaxTaskParallelism(1);
        _conf.setDebug(false);
    } else if (mode.equals("cluster")) {
        _conf.put(Config.TOPOLOGY_WORKERS, getWorkers());
        _conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, getMaxSpoutPending());
        _conf.put(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS, getEmitBatchInterval());
        _conf.put("rbDebug", debug);
        Boolean hash_mac = _configFile.getFromGeneral("hash_mac");

        if (hash_mac != null)
            _conf.put("hash_mac", hash_mac);
        else
            _conf.put("hash_mac", false);

        /*  Metrics  */
        Map<String, Object> zkMetricsConf = new HashMap<>();
        zkMetricsConf.put("zookeeper", _zookeeper);
        _conf.registerMetricsConsumer(KafkaConsumerMonitorMetrics.class, zkMetricsConf, 1);


        if (getMetrics()) {

            Map<String, Object> functionMetricsConf = new HashMap<>();
            List<String> metrics = new ArrayList<>();

            metrics.add("throughput");
            functionMetricsConf.put("zookeeper", _zookeeper);
            functionMetricsConf.put("metrics", metrics);
            functionMetricsConf.put("topic", "rb_monitor");

            _conf.registerMetricsConsumer(Metrics2KafkaProducer.class, functionMetricsConf, 1);

        }
    }

    return _conf;
}
 
Example 30
Project: rb-bi   File: TopologyFunctionTest.java   View source code 5 votes vote down vote up
@Test
public void macVendorTest() throws FileNotFoundException {

    File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/flows.json").getPath());
    File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/macVendorFlows.json").getPath());

    Scanner flows = new Scanner(fileFlow);
    Scanner checkFlows = new Scanner(checkFlow);

    MacVendorFunction._ouiFilePath = Thread.currentThread().getContextClassLoader().getResource("db/oui-vendors").getPath();

    List<String> fieldsFlow = new ArrayList<String>();

    fieldsFlow.add("flows");
    fieldsFlow.add("macVendor");

    LocalDRPC drpc = new LocalDRPC();

    TridentTopology topology = new TridentTopology();
    topology.newDRPCStream("test", drpc)
            .each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
            .each(new Fields("flows"), new MacVendorFunction(), new Fields("macVendor"))
            .each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
            .project(new Fields("finalMap"))
            .each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));

    Config conf = new Config();
    conf.put("rbDebug", true);
    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("testing-topology", conf, topology.build());

    while (flows.hasNextLine()) {
        String stormFlow = drpc.execute("test", flows.nextLine());
        stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
        Assert.assertEquals(checkFlows.nextLine(), stormFlow);
    }
}
 
Example 31
Project: rb-bi   File: TopologyFunctionTest.java   View source code 5 votes vote down vote up
@Test
public void nonTimestampTest() throws FileNotFoundException {

    File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/nonTimestampFlows.json").getPath());

    Scanner flows = new Scanner(fileFlow);


    List<String> fieldsFlow = new ArrayList<String>();

    fieldsFlow.add("flows");

    LocalDRPC drpc = new LocalDRPC();

    TridentTopology topology = new TridentTopology();
    topology.newDRPCStream("test", drpc)
            .each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
            .each(new Fields("flows"), new CheckTimestampFunction(), new Fields("finalMap"))
            .project(new Fields("finalMap"))
            .each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));

    Config conf = new Config();
    conf.put("rbDebug", true);
    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("testing-topology", conf, topology.build());

    while (flows.hasNextLine()) {
        String stormFlow = drpc.execute("test", flows.nextLine());
        stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
        Assert.assertEquals(true, stormFlow.contains("timestamp"));
    }
}
 
Example 32
Project: rb-bi   File: TopologyFunctionTest.java   View source code 5 votes vote down vote up
@Test
public void analizeHttpUrlTest() throws FileNotFoundException {

    File fileFlow = new File(Thread.currentThread().getContextClassLoader().getResource("inputData/httpFlows.json").getPath());
    File checkFlow = new File(Thread.currentThread().getContextClassLoader().getResource("dataCheck/httpFlows.json").getPath());

    Scanner flows = new Scanner(fileFlow);
    Scanner checkFlows = new Scanner(checkFlow);

    List<String> fieldsFlow = new ArrayList<String>();

    fieldsFlow.add("flows");
    fieldsFlow.add("httpUrlMap");

    LocalDRPC drpc = new LocalDRPC();

    TridentTopology topology = new TridentTopology();
    topology.newDRPCStream("test", drpc)
            .each(new Fields("args"), new MapperFunction("rb_test"), new Fields("flows"))
            .each(new Fields("flows"), new AnalizeHttpUrlFunction(), new Fields("httpUrlMap"))
            .each(new Fields(fieldsFlow), new MergeMapsFunction(), new Fields("finalMap"))
            .project(new Fields("finalMap"))
            .each(new Fields("finalMap"), new MapToJSONFunction(), new Fields("jsonString"));

    Config conf = new Config();
    conf.put("rbDebug", true);
    conf.setMaxTaskParallelism(1);
    LocalCluster cluster = new LocalCluster();
    cluster.submitTopology("testing-topology", conf, topology.build());

    while (flows.hasNextLine()) {
        String stormFlow = drpc.execute("test", flows.nextLine());
        stormFlow = stormFlow.substring(stormFlow.indexOf("{"), stormFlow.indexOf("}") + 1);
        Assert.assertEquals(checkFlows.nextLine(), stormFlow);
    }
}
 
Example 33
Project: storm-scheduler   File: MonitoringMetricsToGraphiteWriter.java   View source code 5 votes vote down vote up
/**
 * This method computes and returns the path in Graphite, under which the metrics are stored. Because
 * this path is configurable through the property {@value #CONF_MONITORING_GRAPHITE_BASE_PATH} the storm
 * configuration map must be passed as an argument. It will contain the last "." of the path if necessary.
 * <p/>
 * Example: stools.topoXYZ.
 *
 * @param stormConf the storm configuration map.
 * @return the path in Graphite under which the metric data is stored.
 */
public static String getConfiguredGraphitBasePath(Map stormConf) {
    String path;

    if (stormConf.containsKey(CONF_MONITORING_GRAPHITE_BASE_PATH)) {
        path = (String) stormConf.get(CONF_MONITORING_GRAPHITE_BASE_PATH);
    } else {
        path = DEFAULT_GRAPHITE_BASE_PATH;
    }

    path += "." + stormConf.get(Config.TOPOLOGY_NAME) + ".";

    return path;
}
 
Example 34
Project: storm-scheduler   File: Stootils.java   View source code 5 votes vote down vote up
/**
 * Creates or returns the curator zookeeper client object for the given storm configuration object. According to
 * http://curator.apache.org/curator-framework instances of CuratorFramework are fully thread-safe and should be shared
 * within an application per zk-cluster. We assume that there is only one version of the storm configuration object
 * and return a singleton instance of the zkClient.
 *
 * @param stormConf the storm configuration object, which will be used to create the CuratorFramework instance in
 *                  the case that the singleton instance is null.
 * @return a singleton instance created from the first call of this method.
 */
@SuppressWarnings("unchecked") // the list of zookeeper servers is a list, otherwise we have bigger problems
public static synchronized CuratorFramework getConfiguredZkClient(Map stormConf) {
    if (zkClientSingleton == null) {
        LOG.debug("Creating CuratorFramework client for ZK server at {}:{}", stormConf.get(Config.STORM_ZOOKEEPER_SERVERS), stormConf.get(Config.STORM_ZOOKEEPER_PORT));
        zkClientSingleton = backtype.storm.utils.Utils.newCurator(stormConf,
                (List<String>) stormConf.get(Config.STORM_ZOOKEEPER_SERVERS),
                stormConf.get(Config.STORM_ZOOKEEPER_PORT),
                "/",
                new ZookeeperAuthInfo(stormConf));
        zkClientSingleton.start();
    }

    return zkClientSingleton;
}
 
Example 35
Project: fiware-sinfonier   File: DynamicTopology.java   View source code 5 votes vote down vote up
public static void main(String[] args) throws Exception {

        LOG.info("Reading JSON file configuration...");
        JSONProperties config = new JSONProperties("/topology.json");
        TopologyBuilder builder = new TopologyBuilder();

        /* Spout Configuration */
        JSONArray spouts = config.getSpouts();
        configureSpouts(builder, spouts);

        /* Bolt Configuration */
        JSONArray bolts = config.getBolts();
        configureBolts(builder, bolts);

        /* Drain Configuration */
        JSONArray drains = config.getDrains();
        configureDrains(builder, drains);

        /* Configure more Storm options */
        Config conf = setTopologyStormConfig(config.getProperties());


        if(config.getProperty("name") != null){
            StormSubmitter.submitTopology((String)config.getProperty("name"), conf, builder.createTopology());
        } else {
            conf.setDebug(true);
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("test", conf, builder.createTopology());
            Utils.sleep(1000000); // Alive for 100 seconds = 100000 ms
            cluster.killTopology("test");
            cluster.shutdown();
        }

    }
 
Example 36
Project: fiware-sinfonier   File: DynamicTopology.java   View source code 5 votes vote down vote up
private static Config setTopologyStormConfig(JSONObject topologyProperties) throws ConfigurationException {

        Config conf = new Config();

        Iterator<?> keys = topologyProperties.keys();
        while(keys.hasNext()){
            String stormProperty = (String) keys.next();
            conf.put(stormProperty, topologyProperties.get(stormProperty));
        }

        return conf;
    }
 
Example 37
Project: fiware-sinfonier   File: DynamicTopology.java   View source code 5 votes vote down vote up
private static void configureSpouts(TopologyBuilder builder, JSONArray spouts) throws Exception {

        if (spouts == null || spouts.length() == 0) {
            throw new SinfonierException("There is no spouts. Add at least one spout.");
        }

        for (int i=0; i < spouts.length(); i++){

            JSONObject spout = spouts.getJSONObject(i);

            LOG.info("Creating spout with id:"+ spout.getString("abstractionId"));

            Object spoutInstance = new Object();
            SpoutDeclarer spoutDeclarer = null;

            try{
                spoutInstance = Class.forName(spout.getString("class"))
                        .getConstructor(String.class, String.class).newInstance("", spout.toString());

                spoutDeclarer = builder.setSpout(
                        spout.getString("abstractionId"),
                        (IRichSpout) spoutInstance, spout.getInt("parallelism"));

            } catch (Exception e){
                LOG.error(e.toString());
            }

            if (spout.has("numTasks") && spoutDeclarer != null) {
                spoutDeclarer.setNumTasks(spout.getInt("numTasks"));
            }

            if (spout.has("tickTuple") && spoutDeclarer != null) {
                spoutDeclarer.addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, spout.getInt("tickTuple"));
            }
        }

    }
 
Example 38
Project: reddit-sentiment-storm   File: ZkPublisher.java   View source code 5 votes vote down vote up
public void init(Map stormConf) throws IOException {
	
	List<String> zkServers = (List<String>) stormConf.get(Config.STORM_ZOOKEEPER_SERVERS);
	int zkPort = ((Number) stormConf.get(Config.STORM_ZOOKEEPER_PORT)).intValue();
	
	StringBuilder connectString = new StringBuilder();
	for (int i = 0; i < zkServers.size(); i++) {
		connectString.append(zkServers.get(i)).append(":").append(zkPort);
		if (i < zkServers.size() - 1) {
			connectString.append(",");
		}
	}
	
	LOG.info("ZK connect string:{}", connectString);
	
	zkClient = new ZooKeeper(connectString.toString(), 5000, new Watcher() {
		public void process(WatchedEvent e) {
			LOG.info("Publisher Watcher thread [~{}]: {}", Thread.currentThread().getId(), e.toString());
		}	
	});
	
	try {
		if (zkClient.exists(ROOT_ZNODE, false) == null) {
			
			zkClient.create(ROOT_ZNODE, null, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
		}

	} catch (Exception e) {
		throw new RuntimeException(e);		// The Znode hierarchy is:
	}
}
 
Example 39
Project: splice-community-sample-code   File: SpliceDumperTopology.java   View source code 5 votes vote down vote up
public static void main(String[] args) throws SQLException {

        ArrayList<String> columnNames = new ArrayList<String>();
        ArrayList<String> columnTypes = new ArrayList<String>();
        // this table must exist in splice
        // create table testTable (word varchar(100), number int);
        String tableName = "testTable";
        String server = "localhost";

        // add the column names and the respective types in the two arraylists
        columnNames.add("word");
        columnNames.add("number");

        // add the types
        columnTypes.add("varchar (100)");
        columnTypes.add("int");

        TopologyBuilder builder = new TopologyBuilder();

        // set the spout for the topology
        builder.setSpout("spout", new SpliceIntegerSpout(), 10);

        // dump the stream data into splice       
        SpliceDumperBolt dumperBolt = new SpliceDumperBolt(server, tableName);
        builder.setBolt("dumperBolt", dumperBolt, 1).shuffleGrouping("spout");
        Config conf = new Config();
        conf.setDebug(true);
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("splice-topology", conf, builder.createTopology());
        Utils.sleep(10000);
        cluster.shutdown();
    }
 
Example 40
Project: preliminary.demo   File: RaceTopologyLocal.java   View source code 5 votes vote down vote up
public static void main(String[] args) {
	LocalCluster cluster = new LocalCluster();

	
	/* begin young-define*/
	Config conf = new Config();
	TopologyBuilder builder = new TopologyBuilder();
	builder.setSpout("spout", new SpoutLocal(), 1);
       builder.setBolt("split", new SplitSentenceLocal(), 1).shuffleGrouping("spout");
       builder.setBolt("count", new WordCountLocal(), 1).fieldsGrouping("split", new Fields("word"));
       /* end young-define */
       
	
	//建议加上这行,使得每个bolt/spout的并发度都为1
	conf.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM, 1);

	//提交拓扑
	cluster.submitTopology("SequenceTest", conf, builder.createTopology());

	//等待1分钟, 1分钟后会停止拓扑和集群, 视调试情况可增大该数值
	try {
		Thread.sleep(60000);
	} catch (InterruptedException e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}        

	//结束拓扑
	cluster.killTopology("SequenceTest");

	cluster.shutdown();
}