Java Code Examples for backtype.storm.utils.Utils#newInstance()

The following examples show how to use backtype.storm.utils.Utils#newInstance() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: NettyUnitTest.java    From jstorm with Apache License 2.0 6 votes vote down vote up
private IConnection initNettyServer(int port) {
    ConcurrentHashMap<Integer, DisruptorQueue> deserializeQueues = new ConcurrentHashMap<Integer, DisruptorQueue>();
    //ConcurrentHashMap<Integer, DisruptorQueue> deserializeCtrlQueues = new ConcurrentHashMap<Integer, DisruptorQueue>();

    WaitStrategy wait = (WaitStrategy)Utils.newInstance("com.lmax.disruptor.TimeoutBlockingWaitStrategy", 5, TimeUnit.MILLISECONDS);
    DisruptorQueue recvControlQueue = DisruptorQueue.mkInstance("Dispatch-control", ProducerType.MULTI,
            256, wait, false, 0, 0);
    Set<Integer> taskSet = new HashSet<Integer>();
    taskSet.add(1);
    IConnection server = context.bind(null, port, deserializeQueues, recvControlQueue, true, taskSet);

    WaitStrategy waitStrategy = new BlockingWaitStrategy();
    DisruptorQueue recvQueue = DisruptorQueue.mkInstance("NettyUnitTest", ProducerType.SINGLE, 1024, waitStrategy, false, 0, 0);
    server.registerQueue(task, recvQueue);

    return server;
}
 
Example 2
Source File: RocksDbFactory.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public static Options getOptions(Map conf) {
    Options options = (new RocksDbOptionsFactory.Defaults()).createOptions(null);
    String optionsFactoryClass = (String) conf.get(ConfigExtension.ROCKSDB_OPTIONS_FACTORY_CLASS);
    if (optionsFactoryClass != null) {
        RocksDbOptionsFactory udfOptionFactory = (RocksDbOptionsFactory) Utils.newInstance(optionsFactoryClass);
        options = udfOptionFactory.createOptions(options);
    }
    return options;
}
 
Example 3
Source File: KeyRangeState.java    From jstorm with Apache License 2.0 5 votes vote down vote up
private void initKeyRangeState(int keyRange) {
    IRichCheckpointKvState<K, V, String> state =
            (IRichCheckpointKvState<K, V, String>) Utils.newInstance("com.alibaba.jstorm.hdfs.transaction.RocksDbHdfsState");
    state.setStateName(context.getThisComponentId() + "/" + String.valueOf(keyRange));
    state.init(context);
    keyRangeToState.put(keyRange, state);
}
 
Example 4
Source File: RocksDbFactory.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public static ColumnFamilyOptions getColumnFamilyOptions(Map conf) {
    ColumnFamilyOptions cfOptions = (new RocksDbOptionsFactory.Defaults()).createColumnFamilyOptions(null);
    String optionsFactoryClass = (String) conf.get(ConfigExtension.ROCKSDB_OPTIONS_FACTORY_CLASS);
    if (optionsFactoryClass != null) {
        RocksDbOptionsFactory udfOptionFactory = (RocksDbOptionsFactory) Utils.newInstance(optionsFactoryClass);
        cfOptions = udfOptionFactory.createColumnFamilyOptions(cfOptions);
    }
    return cfOptions;
}
 
Example 5
Source File: RocksDbFactory.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public static DBOptions getDBOptions(Map conf) {
    DBOptions dbOptions = (new RocksDbOptionsFactory.Defaults()).createDbOptions(null);
    String optionsFactoryClass = (String) conf.get(ConfigExtension.ROCKSDB_OPTIONS_FACTORY_CLASS);
    if (optionsFactoryClass != null) {
        RocksDbOptionsFactory udfOptionFactory = (RocksDbOptionsFactory) Utils.newInstance(optionsFactoryClass);
        dbOptions = udfOptionFactory.createDbOptions(dbOptions);
    }
    return dbOptions;
}
 
Example 6
Source File: BlobStoreUtils.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public static BlobStore getNimbusBlobStore(Map conf, String baseDir, NimbusInfo nimbusInfo) {
    String type = (String) conf.get(Config.NIMBUS_BLOBSTORE);
    if (type == null) {
        type = LocalFsBlobStore.class.getName();
    }
    BlobStore store = (BlobStore) Utils.newInstance(type);
    HashMap nconf = new HashMap(conf);
    // only enable cleanup of blobstore on nimbus
    nconf.put(Config.BLOBSTORE_CLEANUP_ENABLE, Boolean.TRUE);
    store.prepare(nconf, baseDir, nimbusInfo);
    return store;
}
 
Example 7
Source File: TransactionalWindowedBolt.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public void createState(TopologyContext context) {
    windowedStateManager = (IRichCheckpointWindowedState<K, V, String>) Utils.newInstance("com.alibaba.jstorm.hdfs.transaction.WindowedRocksDbHdfsState");
    windowedStateManager.setStateName(String.valueOf(context.getThisTaskId()));
    windowedStateManager.init(context);
    windowedStates = new HashMap<>();
    windowUpdateLock = new ReentrantLock();
}
 
Example 8
Source File: JStormMetricCache.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public JStormMetricCache(Map conf, StormClusterState zkCluster) {
    String dbCacheClass = getNimbusCacheClass(conf);
    LOG.info("JStorm metrics cache will use {}", dbCacheClass);

    boolean reset = ConfigExtension.getMetricCacheReset(conf);
    try {
        cache = (JStormCache) Utils.newInstance(dbCacheClass);

        String dbDir = StormConfig.metricDbDir(conf);
        conf.put(RocksDBCache.ROCKSDB_ROOT_DIR, dbDir);
        conf.put(RocksDBCache.ROCKSDB_RESET, reset);
        cache.init(conf);
    } catch (Exception e) {
        if (!reset && cache != null) {
            LOG.error("Failed to init rocks db, will reset and try to re-init...");
            conf.put(RocksDBCache.ROCKSDB_RESET, true);
            try {
                cache.init(conf);
            } catch (Exception ex) {
                LOG.error("Error", ex);
            }
        } else {
            LOG.error("Failed to create metrics cache!", e);
            throw new RuntimeException(e);
        }
    }

    this.zkCluster = zkCluster;
}
 
Example 9
Source File: RocksDbHdfsState.java    From jstorm with Apache License 2.0 5 votes vote down vote up
protected void initRocksDb() {
    RocksDbOptionsFactory optionFactory = new RocksDbOptionsFactory.Defaults();
    Options options = optionFactory.createOptions(null);
    DBOptions dbOptions = optionFactory.createDbOptions(null);
    ColumnFamilyOptions cfOptions = optionFactory.createColumnFamilyOptions(null);
    String optionsFactoryClass = (String) conf.get(ConfigExtension.ROCKSDB_OPTIONS_FACTORY_CLASS);
    if (optionsFactoryClass != null) {
        RocksDbOptionsFactory udfOptionFactory = (RocksDbOptionsFactory) Utils.newInstance(optionsFactoryClass);
        options = udfOptionFactory.createOptions(options);
        dbOptions = udfOptionFactory.createDbOptions(dbOptions);
        cfOptions = udfOptionFactory.createColumnFamilyOptions(cfOptions);
    }

    try {
        ttlTimeSec = ConfigExtension.getStateTtlTime(conf);
        if (ttlTimeSec > 0)
            rocksDb = TtlDB.open(options, rocksDbDir, ttlTimeSec, false);
        else
            rocksDb = RocksDB.open(options, rocksDbDir);
        // enable compaction
        rocksDb.compactRange();
        LOG.info("Finish the initialization of RocksDB");
    } catch (RocksDBException e) {
        LOG.error("Failed to open rocksdb located at " + rocksDbDir, e);
        throw new RuntimeException(e.getMessage());
    }

    lastCheckpointFiles = new HashSet<String>();
    lastCleanTime = System.currentTimeMillis();
    lastSuccessBatchId = -1;
}
 
Example 10
Source File: BlobStoreUtils.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public static ClientBlobStore getClientBlobStoreForSupervisor(Map conf) {
    ClientBlobStore store = (ClientBlobStore) Utils.newInstance(
            (String) conf.get(Config.SUPERVISOR_BLOBSTORE));
    store.prepare(conf);
    return store;
}
 
Example 11
Source File: DfsFactory.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public static IDfs getHdfsInstance(Map conf) {
    return (IDfs) Utils.newInstance(HDFS_CLASS_PATH, conf);
}
 
Example 12
Source File: WindowedRocksDbHdfsState.java    From jstorm with Apache License 2.0 4 votes vote down vote up
@Override
protected void initRocksDb() {
    windowToCFHandler = new HashMap<>();

    RocksDbOptionsFactory optionFactory = new RocksDbOptionsFactory.Defaults();
    Options options = optionFactory.createOptions(null);
    DBOptions dbOptions = optionFactory.createDbOptions(null);
    ColumnFamilyOptions cfOptions = optionFactory.createColumnFamilyOptions(null);
    String optionsFactoryClass = (String) conf.get(ConfigExtension.ROCKSDB_OPTIONS_FACTORY_CLASS);
    if (optionsFactoryClass != null) {
        RocksDbOptionsFactory udfOptionFactory = (RocksDbOptionsFactory) Utils.newInstance(optionsFactoryClass);
        options = udfOptionFactory.createOptions(options);
        dbOptions = udfOptionFactory.createDbOptions(dbOptions);
        cfOptions = udfOptionFactory.createColumnFamilyOptions(cfOptions);
    }

    try {
        ttlTimeSec = ConfigExtension.getStateTtlTime(conf);
        List<Integer> ttlValues = new ArrayList<>();

        List<byte[]> families = RocksDB.listColumnFamilies(options, rocksDbDir);
        List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>();
        List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>();
        if (families != null) {
            for (byte[] bytes : families) {
                columnFamilyDescriptors.add(new ColumnFamilyDescriptor(bytes, cfOptions));
                LOG.debug("Load colum family of {}", new String(bytes));
                if (ttlTimeSec > 0)
                    ttlValues.add(ttlTimeSec);
            }
        }
        
        if (columnFamilyDescriptors.size() > 0) {
            if (ttlTimeSec > 0)
                rocksDb = TtlDB.open(dbOptions, rocksDbDir, columnFamilyDescriptors, columnFamilyHandles, ttlValues, false);
            else
                rocksDb = RocksDB.open(dbOptions, rocksDbDir, columnFamilyDescriptors, columnFamilyHandles);

            int n = Math.min(columnFamilyDescriptors.size(), columnFamilyHandles.size());
            LOG.info("Try to load RocksDB with column family, desc_num={}, handler_num={}", columnFamilyDescriptors.size(), columnFamilyHandles.size());
            // skip default column
            for (int i = 1; i < n; i++) {
                windowToCFHandler.put((TimeWindow) serializer.deserialize(columnFamilyDescriptors.get(i).columnFamilyName()), columnFamilyHandles.get(i));
            }
        } else {
            rocksDb = RocksDB.open(options, rocksDbDir);
        }
        rocksDb.compactRange();
        LOG.info("Finish the initialization of RocksDB");
    } catch (RocksDBException e) {
        LOG.error("Failed to open rocksdb located at " + rocksDbDir, e);
        throw new RuntimeException(e.getMessage());
    }

    lastCheckpointFiles = new HashSet<String>();
    lastCleanTime = System.currentTimeMillis();
    lastSuccessBatchId = -1;
}
 
Example 13
Source File: SnapshotStateMaster.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public SnapshotStateMaster(TopologyContext context, OutputCollector outputCollector) {
    this.topologyId = context.getTopologyId();
    try {
        this.topologyName = Common.topologyIdToName(topologyId);
    } catch (InvalidTopologyException e) {
        LOG.error("Failed to convert topologyId to topologyName", e);
        throw new RuntimeException(e);
    }
    this.topology = context.getRawTopology();
    this.conf = context.getStormConf();
    this.outputCollector = outputCollector;
    this.context = context;

    String topologyStateOpClassName = ConfigExtension.getTopologyStateOperatorClass(conf);
    if (topologyStateOpClassName == null) {
        stateOperator = new DefaultTopologyStateOperator();
    } else {
        stateOperator = (ITopologyStateOperator) Utils.newInstance(topologyStateOpClassName);
    }
    stateOperator.init(context);

    Set<String> spoutIds = topology.get_spouts().keySet();
    Set<String> statefulBoltIds = TransactionCommon.getStatefulBolts(topology);
    Set<String> endBolts = TransactionCommon.getEndBolts(topology);
    Set<String> downstreamComponents = new HashSet<>(topology.get_bolts().keySet());

    spouts = componentToComponentTasks(context, spoutIds);
    statefulBolts = componentToComponentTasks(context, statefulBoltIds);
    downstreamComponents.removeAll(statefulBoltIds);
    nonStatefulBoltTasks = componentToComponentTasks(context, downstreamComponents);
    endBoltTasks = new HashSet<Integer>(context.getComponentsTasks(endBolts));
    snapshotState = new SnapshotState(context, spouts, statefulBolts, nonStatefulBoltTasks, endBoltTasks, stateOperator);

    SnapshotState commitState = ConfigExtension.resetTransactionTopologyState(conf) ? null : (SnapshotState) stateOperator.initState(topologyName);
    snapshotState.initState(commitState);

    LOG.info("topologySnapshotState: {}, isResetTopologyState: {}", snapshotState, ConfigExtension.resetTransactionTopologyState(conf));
    LOG.info("lastSuccessfulSnapshotState: {}", snapshotState.getLastSuccessfulBatch().statesInfo());

    this.batchSnapshotTimeout = ConfigExtension.getTransactionBatchSnapshotTimeout(conf);
    scheduledService = Executors.newSingleThreadScheduledExecutor();
    scheduledService.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            expiredCheck();
        }
    }, batchSnapshotTimeout, batchSnapshotTimeout / 2, TimeUnit.SECONDS);

    this.lock = new ReentrantLock(true);
}
 
Example 14
Source File: NimbusData.java    From jstorm with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings({"unchecked", "rawtypes"})
public NimbusData(final Map conf, INimbus inimbus) throws Exception {
    this.conf = conf;

    createFileHandler();
    mkBlobCacheMap();
    this.nimbusHostPortInfo = NimbusInfo.fromConf(conf);
    this.blobStore = BlobStoreUtils.getNimbusBlobStore(conf, nimbusHostPortInfo);

    this.isLaunchedCleaner = false;
    this.isLaunchedMonitor = false;

    this.submittedCount = new AtomicInteger(0);

    this.stormClusterState = Cluster.mk_storm_cluster_state(conf);

    createCache();

    this.taskHeartbeatsCache = new ConcurrentHashMap<>();

    this.scheduExec = Executors.newScheduledThreadPool(SCHEDULE_THREAD_NUM);

    this.statusTransition = new StatusTransition(this);

    this.startTime = TimeUtils.current_time_secs();

    this.inimubs = inimbus;

    localMode = StormConfig.local_mode(conf);

    this.metricCache = new JStormMetricCache(conf, this.stormClusterState);
    this.clusterName = ConfigExtension.getClusterName(conf);

    pendingSubmitTopologies = new TimeCacheMap<>(JStormUtils.MIN_10);
    topologyTaskTimeout = new ConcurrentHashMap<>();
    tasksHeartbeat = new ConcurrentHashMap<>();

    // init nimbus metric reporter
    this.metricsReporter = new JStormMetricsReporter(this);

    // metrics thread will be started in NimbusServer
    this.metricRunnable = ClusterMetricsRunnable.mkInstance(this);

    String configUpdateHandlerClass = ConfigExtension.getNimbusConfigUpdateHandlerClass(conf);
    this.configUpdateHandler = (ConfigUpdateHandler) Utils.newInstance(configUpdateHandlerClass);

    if (conf.containsKey(Config.NIMBUS_TOPOLOGY_ACTION_NOTIFIER_PLUGIN)) {
        String string = (String) conf.get(Config.NIMBUS_TOPOLOGY_ACTION_NOTIFIER_PLUGIN);
        nimbusNotify = (ITopologyActionNotifierPlugin) Utils.newInstance(string);
    } else {
        nimbusNotify = null;
    }
}
 
Example 15
Source File: TransactionStateOperator.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public IKvState<K, V> createState(TopologyContext context) {
    stateInstance = (IRichCheckpointKvState<K, V, String>) Utils.newInstance("com.alibaba.jstorm.hdfs.transaction.RocksDbHdfsState");
    stateInstance.setStateName(String.valueOf(context.getThisTaskId()));
    stateInstance.init(context);
    return stateInstance;
}
 
Example 16
Source File: Task.java    From jstorm with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("rawtypes")
public Task(WorkerData workerData, int taskId) throws Exception {
    openOrPrepareWasCalled = new Atom(false);

    this.workerData = workerData;
    this.topologyContext = workerData.getContextMaker().makeTopologyContext(
            workerData.getSysTopology(), taskId, openOrPrepareWasCalled);
    this.userContext = workerData.getContextMaker().makeTopologyContext(
            workerData.getRawTopology(), taskId, openOrPrepareWasCalled);
    this.taskId = taskId;
    this.componentId = topologyContext.getThisComponentId();
    topologyContext.getStormConf().putAll(Common.component_conf(topologyContext, componentId));
    this.stormConf = topologyContext.getStormConf();

    this.taskStatus = new TaskStatus();

    this.innerTaskTransfer = workerData.getInnerTaskTransfer();
    this.deserializeQueues = workerData.getDeserializeQueues();
    this.controlQueues = workerData.getControlQueues();
    this.topologyId = workerData.getTopologyId();
    this.context = workerData.getContext();
    this.workHalt = workerData.getWorkHalt();
    this.zkCluster = workerData.getZkCluster();
    this.nodePortToSocket = workerData.getNodePortToSocket();
    this.taskToNodePort = workerData.getTaskToNodePort();
    // create report error callback,
    // in fact it is storm_cluster.report-task-error
    ITaskReportErr reportError = new TaskReportError(zkCluster, topologyId, taskId);

    // report error and halt worker
    reportErrorDie = new TaskReportErrorAndDie(reportError, workHalt);
    this.taskStats = new TaskBaseMetric(topologyId, componentId, taskId);
    //register auto hook
    List<String> listHooks = Config.getTopologyAutoTaskHooks(stormConf);
    for (String hook : listHooks) {
        ITaskHook iTaskHook = (ITaskHook) Utils.newInstance(hook);
        userContext.addTaskHook(iTaskHook);
    }

    LOG.info("Begin to deserialize taskObj " + componentId + ":" + this.taskId);

    try {
        WorkerClassLoader.switchThreadContext();
        this.taskObj = Common.get_task_object(
                topologyContext.getRawTopology(), componentId, WorkerClassLoader.getInstance());
        WorkerClassLoader.restoreThreadContext();
    } catch (Exception e) {
        if (reportErrorDie != null) {
            reportErrorDie.report(e);
        } else {
            throw e;
        }
    }
    isTaskBatchTuple = ConfigExtension.isTaskBatchTuple(stormConf);
    LOG.info("Transfer/receive in batch mode :" + isTaskBatchTuple);

    LOG.info("Loading task " + componentId + ":" + this.taskId);
}
 
Example 17
Source File: TopologyMaster.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public void registerHandlers() {
    // register hb handler
    TMHandler hbHandler = new TaskHeartbeatUpdater();
    hbHandler.init(tmContext);
    handlers.put(Common.TOPOLOGY_MASTER_HB_STREAM_ID, hbHandler);

    // update metric data
    TMHandler metricUpdater = new MetricsUpdater();
    metricUpdater.init(tmContext);
    handlers.put(Common.TOPOLOGY_MASTER_METRICS_STREAM_ID, metricUpdater);

    // update metric meta
    TMHandler metricRegister = new MetricRegister();
    metricRegister.init(tmContext);
    handlers.put(Common.TOPOLOGY_MASTER_REGISTER_METRICS_STREAM_ID, metricRegister);

    // broadcast metric meta to all workers every 15 sec
    handlers.put(MERTRICS_META_BROADCAST, metricRegister);
    TMEvent metricsMetaBroadCastEvent = new TMEvent(metricRegister, new MetricsMetaBroadcastEvent());
    threadPools.scheduleAtFixedRate(metricsMetaBroadCastEvent, 10, 15, TimeUnit.SECONDS);

    // upload metric data every minute
    TMHandler metricsUploader = new MetricsUploader();
    metricsUploader.init(tmContext);
    handlers.put(METRICS_UPLOADER_NAME, metricsUploader);
    TMEvent metricsUploaderEvent = new TMEvent(metricsUploader, null);
    threadPools.scheduleAtFixedRate(metricsUploaderEvent, 5, 60, TimeUnit.SECONDS);

    TMHandler ctrlEventDispatcher = new CtrlEventDispatcher();
    ctrlEventDispatcher.init(tmContext);
    handlers.put(Common.TOPOLOGY_MASTER_CONTROL_STREAM_ID, ctrlEventDispatcher);
    handlers.put(UPDATE_CONFIG_NAME, ctrlEventDispatcher);

    TMHandler workerSetUpdater = new WorkerSetUpdater();
    workerSetUpdater.init(tmContext);
    handlers.put(WORKER_SET_UPDATER_NAME, workerSetUpdater);
    TMEvent workerSetUpdateEvent = new TMEvent(workerSetUpdater, null);
    threadPools.scheduleAtFixedRate(workerSetUpdateEvent, 10, 10, TimeUnit.SECONDS);

    TMHandler grayUpgradeHandler = new GrayUpgradeHandler();
    grayUpgradeHandler.init(tmContext);
    handlers.put("DUMMY", grayUpgradeHandler);
    threadPools.scheduleAtFixedRate((Runnable) grayUpgradeHandler, 15, 15, TimeUnit.SECONDS);
    
    String udfStreamClass = ConfigExtension.getTMUdfStreamClass(tmContext.getConf());
    if (udfStreamClass != null) {
        TMHandler tmUdfHandler = (TMHandler) Utils.newInstance(udfStreamClass);
        tmUdfHandler.init(tmContext);
        handlers.put(USER_DEFINED_STREAM, tmUdfHandler);
        LOG.info("Successfully load user defined stream handler: {}", udfStreamClass);
    }
}