Java Code Examples for backtype.storm.task.TopologyContext#getStormConf()

The following examples show how to use backtype.storm.task.TopologyContext#getStormConf() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BatchCache.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public BatchCache(TopologyContext context, StormTopology sysTopology, boolean isIntraWorker) {
    this.context = context;
    this.stormConf = context.getStormConf();

    this.isExactlyOnceMode = JStormUtils.parseBoolean(stormConf.get("transaction.exactly.once.mode"), true);

    this.pendingBatches = new ConcurrentHashMap<>();

    serializer = new KryoTupleSerializer(stormConf, sysTopology);
    deserializer = new KryoTupleDeserializer(stormConf, context, sysTopology);

    String cacheDir = context.getWorkerIdDir() + "/transactionCache/task-" + context.getThisTaskId();
    if (isIntraWorker)
        cacheDir += "/intra";
    else
        cacheDir += "/inter";
    String cacheType = Utils.getString(stormConf.get("transaction.exactly.cache.type"), "default");
    if (cacheType.equalsIgnoreCase("rocksDb")) {
        cacheOperator = new RocksDbCacheOperator(context, cacheDir);
    } else {
        cacheOperator = new DefaultCacheOperator();
    }
    LOG.info("Cache config: isExactlyOnce={}, cacheType={}", isExactlyOnceMode, cacheType);
}
 
Example 2
Source File: TopologyMasterContext.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public TopologyMasterContext(Map stormConf, TopologyContext context,
                             final OutputCollector collector) {
    this.conf = context.getStormConf();
    this.context = context;
    this.collector = collector;
    this.taskId = context.getThisTaskId();
    this.topologyId = context.getTopologyId();
    this.zkCluster = context.getZkCluster();

    workerSet = new AtomicReference<>();
    try {
        Assignment assignment = zkCluster.assignment_info(topologyId, null);
        this.workerSet.set(assignment.getWorkers());
    } catch (Exception e) {
        LOG.error("Failed to get assignment for " + topologyId);
        throw new RuntimeException(e);
    }

    this.topologyMetricContext = new TopologyMetricContext(topologyId, workerSet.get(), conf);
}
 
Example 3
Source File: HdfsTransactionTopoStateImpl.java    From jstorm with Apache License 2.0 5 votes vote down vote up
@Override
public void init(TopologyContext context) {
    this.context = context;
    Map conf = context.getStormConf();
	this.hdfs = new HdfsCache(conf);
	this.stateBasePath = hdfs.getBaseDir();
	this.RETRY_NUM = JStormUtils.parseInt(conf.get("transaction.topology.state.retry.num"), 5);
	this.RETRY_INTERVAL = JStormUtils.parseInt(conf.get("transaction.topology.state.retry.interval.ms"), 100);
}
 
Example 4
Source File: RocksDbCacheOperator.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public RocksDbCacheOperator(TopologyContext context, String cacheDir) {
    this.stormConf = context.getStormConf();

    this.maxFlushSize = ConfigExtension.getTransactionCacheBatchFlushSize(stormConf);

    Options rocksDbOpt = new Options();
    rocksDbOpt.setCreateMissingColumnFamilies(true).setCreateIfMissing(true);
    long bufferSize =
            ConfigExtension.getTransactionCacheBlockSize(stormConf) != null ? ConfigExtension.getTransactionCacheBlockSize(stormConf) : (1 * SizeUnit.GB);
    rocksDbOpt.setWriteBufferSize(bufferSize);
    int maxBufferNum = ConfigExtension.getTransactionMaxCacheBlockNum(stormConf) != null ? ConfigExtension.getTransactionMaxCacheBlockNum(stormConf) : 3;
    rocksDbOpt.setMaxWriteBufferNumber(maxBufferNum);

    // Config for log of RocksDb
    rocksDbOpt.setMaxLogFileSize(1073741824); // 1G
    rocksDbOpt.setKeepLogFileNum(1);
    rocksDbOpt.setInfoLogLevel(InfoLogLevel.WARN_LEVEL);
    
    try {
        Map<Object, Object> conf = new HashMap<Object, Object>();
        conf.put(ROCKSDB_ROOT_DIR, cacheDir);
        conf.put(ROCKSDB_RESET, true);
        initDir(conf);
        initDb(null, rocksDbOpt);
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    kryo = new Kryo();
    output = new Output(200, 2000000000);
    input = new Input(1);

    LOG.info("Finished rocksDb cache init: maxFlushSize={}, bufferSize={}, maxBufferNum={}", maxFlushSize, bufferSize, maxBufferNum);
}
 
Example 5
Source File: KvStoreManagerFactory.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public static <T> IKvStoreManager<T> getKvStoreManagerWithMonitor(TopologyContext context, String storeName, String storePath, boolean isStateful) throws IOException {
    Map conf = context.getStormConf();
    return getKvStoreManagerWithMonitor(getKvStoreType(conf), context, storeName, storePath, isStateful);
}
 
Example 6
Source File: SnapshotStateMaster.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public SnapshotStateMaster(TopologyContext context, OutputCollector outputCollector) {
    this.topologyId = context.getTopologyId();
    try {
        this.topologyName = Common.topologyIdToName(topologyId);
    } catch (InvalidTopologyException e) {
        LOG.error("Failed to convert topologyId to topologyName", e);
        throw new RuntimeException(e);
    }
    this.topology = context.getRawTopology();
    this.conf = context.getStormConf();
    this.outputCollector = outputCollector;
    this.context = context;

    String topologyStateOpClassName = ConfigExtension.getTopologyStateOperatorClass(conf);
    if (topologyStateOpClassName == null) {
        stateOperator = new DefaultTopologyStateOperator();
    } else {
        stateOperator = (ITopologyStateOperator) Utils.newInstance(topologyStateOpClassName);
    }
    stateOperator.init(context);

    Set<String> spoutIds = topology.get_spouts().keySet();
    Set<String> statefulBoltIds = TransactionCommon.getStatefulBolts(topology);
    Set<String> endBolts = TransactionCommon.getEndBolts(topology);
    Set<String> downstreamComponents = new HashSet<>(topology.get_bolts().keySet());

    spouts = componentToComponentTasks(context, spoutIds);
    statefulBolts = componentToComponentTasks(context, statefulBoltIds);
    downstreamComponents.removeAll(statefulBoltIds);
    nonStatefulBoltTasks = componentToComponentTasks(context, downstreamComponents);
    endBoltTasks = new HashSet<Integer>(context.getComponentsTasks(endBolts));
    snapshotState = new SnapshotState(context, spouts, statefulBolts, nonStatefulBoltTasks, endBoltTasks, stateOperator);

    SnapshotState commitState = ConfigExtension.resetTransactionTopologyState(conf) ? null : (SnapshotState) stateOperator.initState(topologyName);
    snapshotState.initState(commitState);

    LOG.info("topologySnapshotState: {}, isResetTopologyState: {}", snapshotState, ConfigExtension.resetTransactionTopologyState(conf));
    LOG.info("lastSuccessfulSnapshotState: {}", snapshotState.getLastSuccessfulBatch().statesInfo());

    this.batchSnapshotTimeout = ConfigExtension.getTransactionBatchSnapshotTimeout(conf);
    scheduledService = Executors.newSingleThreadScheduledExecutor();
    scheduledService.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            expiredCheck();
        }
    }, batchSnapshotTimeout, batchSnapshotTimeout / 2, TimeUnit.SECONDS);

    this.lock = new ReentrantLock(true);
}
 
Example 7
Source File: DefaultTopologyStateOperator.java    From jstorm with Apache License 2.0 4 votes vote down vote up
@Override
public void init(TopologyContext context) {
    this.conf = context.getStormConf();
}