backtype.storm.topology.base.BaseRichSpout Java Examples

The following examples show how to use backtype.storm.topology.base.BaseRichSpout. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaOffsetSourceSpoutProvider.java    From eagle with Apache License 2.0 6 votes vote down vote up
public BaseRichSpout getSpout(Config config){

		ZKStateConfig zkStateConfig = new ZKStateConfig();
		zkStateConfig.zkQuorum = config.getString("dataSourceConfig.zkQuorum");
		zkStateConfig.zkRoot = config.getString("dataSourceConfig.transactionZKRoot");
		zkStateConfig.zkSessionTimeoutMs = config.getInt("dataSourceConfig.zkSessionTimeoutMs");
		zkStateConfig.zkRetryTimes = config.getInt("dataSourceConfig.zkRetryTimes");
		zkStateConfig.zkRetryInterval = config.getInt("dataSourceConfig.zkRetryInterval");

		ServiceConfig serviceConfig = new ServiceConfig();
		serviceConfig.serviceHost = config.getString(EagleConfigConstants.EAGLE_PROPS + "." + EagleConfigConstants.EAGLE_SERVICE + "." + EagleConfigConstants.HOST);
		serviceConfig.servicePort = config.getInt(EagleConfigConstants.EAGLE_PROPS + "." + EagleConfigConstants.EAGLE_SERVICE + "." + EagleConfigConstants.PORT);
		serviceConfig.username = config.getString(EagleConfigConstants.EAGLE_PROPS + "." + EagleConfigConstants.EAGLE_SERVICE + "." + EagleConfigConstants.USERNAME);
		serviceConfig.password = config.getString(EagleConfigConstants.EAGLE_PROPS + "." + EagleConfigConstants.EAGLE_SERVICE + "." + EagleConfigConstants.PASSWORD);

		KafkaOffsetCheckerConfig.KafkaConfig kafkaConfig = new KafkaOffsetCheckerConfig.KafkaConfig();
		kafkaConfig.kafkaEndPoints = config.getString("dataSourceConfig.kafkaEndPoints");
		kafkaConfig.site = config.getString("dataSourceConfig.site");
		kafkaConfig.topic = config.getString("dataSourceConfig.topic");
		kafkaConfig.group = config.getString("dataSourceConfig.hdfsTopologyConsumerGroupId");
		KafkaOffsetCheckerConfig checkerConfig = new KafkaOffsetCheckerConfig(serviceConfig, zkStateConfig, kafkaConfig);
		KafkaOffsetSpout spout = new KafkaOffsetSpout(checkerConfig);
		return spout;
	}
 
Example #2
Source File: EagleMetricCollectorApplication.java    From eagle with Apache License 2.0 6 votes vote down vote up
@Override
public StormTopology execute(Config config, StormEnvironment environment) {
    String deserClsName = config.getString("dataSourceConfig.deserializerClass");
    KafkaSourcedSpoutScheme scheme = new KafkaSourcedSpoutScheme(deserClsName, config);

    TopologyBuilder builder = new TopologyBuilder();
    BaseRichSpout spout1 = new KafkaOffsetSourceSpoutProvider().getSpout(config);
    BaseRichSpout spout2 = KafkaSourcedSpoutProvider.getSpout(config, scheme);

    int numOfSpoutTasks = config.getInt(SPOUT_TASK_NUM);
    int numOfDistributionTasks = config.getInt(DISTRIBUTION_TASK_NUM);

    builder.setSpout("kafkaLogLagChecker", spout1, numOfSpoutTasks);
    builder.setSpout("kafkaMessageFetcher", spout2, numOfSpoutTasks);

    KafkaMessageDistributionBolt bolt = new KafkaMessageDistributionBolt(config);
    BoltDeclarer bolteclarer = builder.setBolt("distributionBolt", bolt, numOfDistributionTasks);
    bolteclarer.fieldsGrouping("kafkaLogLagChecker", new Fields("f1"));
    bolteclarer.fieldsGrouping("kafkaLogLagChecker", new Fields("f1"));
    return builder.createTopology();
}
 
Example #3
Source File: HDFSSourcedStormSpoutProvider.java    From Eagle with Apache License 2.0 5 votes vote down vote up
@Override
public BaseRichSpout getSpout(Config context){
	LOG.info("GetHDFSSpout called");
	String typeOperation = context.getString("dataSourceConfig.typeOperation");
	HDFSSpout spout = HDFSSpout.getHDFSSpout(typeOperation, context);
	spout.copyFiles();
	return spout;
}
 
Example #4
Source File: HiveJobRunningSourcedStormSpoutProvider.java    From Eagle with Apache License 2.0 4 votes vote down vote up
public BaseRichSpout getSpout(Config config, int parallelism){
	RunningJobEndpointConfig endPointConfig = new RunningJobEndpointConfig();
	String RMEndPoints = config.getString("dataSourceConfig.RMEndPoints");				
	endPointConfig.RMBasePaths = RMEndPoints.split(",");
	
	String HSEndPoint = config.getString("dataSourceConfig.HSEndPoint");
	endPointConfig.HSBasePath = HSEndPoint;
	
	ControlConfig controlConfig = new ControlConfig();
	controlConfig.jobInfoEnabled = true;
	controlConfig.jobConfigEnabled = true;
       controlConfig.numTotalPartitions = parallelism <= 0 ? 1 : parallelism;
       
       boolean zkCleanupTimeSet = config.hasPath("dataSourceConfig.zkCleanupTimeInday");
       //default set as two days
       controlConfig.zkCleanupTimeInday = zkCleanupTimeSet ? config.getInt("dataSourceConfig.zkCleanupTimeInday") : 2;
       
       boolean completedJobOutofDateTimeSet = config.hasPath("dataSourceConfig.completedJobOutofDateTimeInMin");
       controlConfig.completedJobOutofDateTimeInMin = completedJobOutofDateTimeSet ? config.getInt("dataSourceConfig.completedJobOutofDateTimeInMin") : 120;
       
       boolean sizeOfJobConfigQueueSet = config.hasPath("dataSourceConfig.sizeOfJobConfigQueue");
       controlConfig.sizeOfJobConfigQueue = sizeOfJobConfigQueueSet ? config.getInt("dataSourceConfig.sizeOfJobConfigQueue") : 10000;

       boolean sizeOfJobCompletedInfoQueue = config.hasPath("dataSourceConfig.sizeOfJobCompletedInfoQueue");
       controlConfig.sizeOfJobCompletedInfoQueue = sizeOfJobCompletedInfoQueue ? config.getInt("dataSourceConfig.sizeOfJobCompletedInfoQueue") : 10000;
       
       //controlConfig.numTotalPartitions = parallelism == null ? 1 : parallelism;        
	ZKStateConfig zkStateConfig = new ZKStateConfig();
	zkStateConfig.zkQuorum = config.getString("dataSourceConfig.zkQuorum");
	zkStateConfig.zkRoot = config.getString("dataSourceConfig.zkRoot");
	zkStateConfig.zkSessionTimeoutMs = config.getInt("dataSourceConfig.zkSessionTimeoutMs");
	zkStateConfig.zkRetryTimes = config.getInt("dataSourceConfig.zkRetryTimes");
	zkStateConfig.zkRetryInterval = config.getInt("dataSourceConfig.zkRetryInterval");
	RunningJobCrawlConfig crawlConfig = new RunningJobCrawlConfig(endPointConfig, controlConfig, zkStateConfig);

	try{
		controlConfig.partitionerCls = (Class<? extends JobPartitioner>)Class.forName(config.getString("dataSourceConfig.partitionerCls"));
	}
	catch(Exception ex){
		LOG.error("failing find job id partitioner class " + config.getString("dataSourceConfig.partitionerCls"));
		throw new IllegalStateException("jobId partitioner class does not exist " + config.getString("dataSourceConfig.partitionerCls"));
	}
	
	JobRunningSpout spout = new JobRunningSpout(crawlConfig);
	return spout;
}
 
Example #5
Source File: HiveJobRunningSourcedStormSpoutProvider.java    From eagle with Apache License 2.0 4 votes vote down vote up
public BaseRichSpout getSpout(Config config, int parallelism) {
    RunningJobEndpointConfig endPointConfig = new RunningJobEndpointConfig();
    String RMEndPoints = config.getString("dataSourceConfig.RMEndPoints");
    endPointConfig.RMBasePaths = RMEndPoints.split(",");

    String HSEndPoint = config.getString("dataSourceConfig.HSEndPoint");
    endPointConfig.HSBasePath = HSEndPoint;

    ControlConfig controlConfig = new ControlConfig();
    controlConfig.jobInfoEnabled = true;
    controlConfig.jobConfigEnabled = true;
    controlConfig.numTotalPartitions = parallelism <= 0 ? 1 : parallelism;

    boolean zkCleanupTimeSet = config.hasPath("dataSourceConfig.zkCleanupTimeInday");
    //default set as two days
    controlConfig.zkCleanupTimeInday = zkCleanupTimeSet ? config.getInt("dataSourceConfig.zkCleanupTimeInday") : 2;

    boolean completedJobOutofDateTimeSet = config.hasPath("dataSourceConfig.completedJobOutofDateTimeInMin");
    controlConfig.completedJobOutofDateTimeInMin = completedJobOutofDateTimeSet ? config.getInt("dataSourceConfig.completedJobOutofDateTimeInMin") : 120;

    boolean sizeOfJobConfigQueueSet = config.hasPath("dataSourceConfig.sizeOfJobConfigQueue");
    controlConfig.sizeOfJobConfigQueue = sizeOfJobConfigQueueSet ? config.getInt("dataSourceConfig.sizeOfJobConfigQueue") : 10000;

    boolean sizeOfJobCompletedInfoQueue = config.hasPath("dataSourceConfig.sizeOfJobCompletedInfoQueue");
    controlConfig.sizeOfJobCompletedInfoQueue = sizeOfJobCompletedInfoQueue ? config.getInt("dataSourceConfig.sizeOfJobCompletedInfoQueue") : 10000;

    //controlConfig.numTotalPartitions = parallelism == null ? 1 : parallelism;
    ZKStateConfig zkStateConfig = new ZKStateConfig();
    zkStateConfig.zkQuorum = config.getString("dataSourceConfig.zkQuorum");
    zkStateConfig.zkLockPath = Utils.makeLockPath(config.getString("dataSourceConfig.zkRoot") + "/" + config.getString("siteId"));
    zkStateConfig.zkRoot = config.getString("dataSourceConfig.zkRoot") + "/" + config.getString("siteId") + JOB_SYMBOL;
    zkStateConfig.zkSessionTimeoutMs = config.getInt("dataSourceConfig.zkSessionTimeoutMs");
    zkStateConfig.zkRetryTimes = config.getInt("dataSourceConfig.zkRetryTimes");
    zkStateConfig.zkRetryInterval = config.getInt("dataSourceConfig.zkRetryInterval");
    RunningJobCrawlConfig crawlConfig = new RunningJobCrawlConfig(endPointConfig, controlConfig, zkStateConfig);

    try {
        controlConfig.partitionerCls = (Class<? extends DefaultJobIdPartitioner>) Class.forName(config.getString("dataSourceConfig.partitionerCls"));
    } catch (Exception ex) {
        LOG.warn("failing find job id partitioner class " + config.getString("dataSourceConfig.partitionerCls"));
        //throw new IllegalStateException("jobId partitioner class does not exist " + config.getString("dataSourceConfig.partitionerCls"));
        controlConfig.partitionerCls = DefaultJobIdPartitioner.class;
    }

    HiveJobFetchSpout spout = new HiveJobFetchSpout(crawlConfig);
    return spout;
}
 
Example #6
Source File: AbstractStormSpoutProvider.java    From Eagle with Apache License 2.0 votes vote down vote up
public abstract BaseRichSpout getSpout(Config context); 
Example #7
Source File: StormSpoutProvider.java    From eagle with Apache License 2.0 votes vote down vote up
BaseRichSpout getSpout(Config context);