backtype.storm.utils.Utils Java Examples

The following examples show how to use backtype.storm.utils.Utils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaBoltTest.java    From storm-kafka-0.8-plus with Apache License 2.0 6 votes vote down vote up
private boolean verifyMessage(String key, String message) {
    long lastMessageOffset = KafkaUtils.getOffset(simpleConsumer, kafkaConfig.topic, 0, OffsetRequest.LatestTime()) - 1;
    ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(kafkaConfig, simpleConsumer,
            new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), lastMessageOffset);
    MessageAndOffset messageAndOffset = messageAndOffsets.iterator().next();
    Message kafkaMessage = messageAndOffset.message();
    ByteBuffer messageKeyBuffer = kafkaMessage.key();
    String keyString = null;
    String messageString = new String(Utils.toByteArray(kafkaMessage.payload()));
    if (messageKeyBuffer != null) {
        keyString = new String(Utils.toByteArray(messageKeyBuffer));
    }
    assertEquals(key, keyString);
    assertEquals(message, messageString);
    return true;
}
 
Example #2
Source File: ZooKeeperDataViewTest.java    From jstorm with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void init() {
    String CONFIG_PATH = System.getProperty("user.home") + CONFIG_FILE;
    File file = new File(CONFIG_PATH);
    if (file.exists() == false) {
        SKIP = true;
        return;
    }

    try {
        zkobj = new Zookeeper();
        System.getProperties().setProperty("storm.conf.file", CONFIG_PATH);
        Map conf = Utils.readStormConfig();
        zk = zkobj.mkClient(conf,
                (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS),
                conf.get(Config.STORM_ZOOKEEPER_PORT),
                (String) conf.get(Config.STORM_ZOOKEEPER_ROOT));
        gson = new GsonBuilder().setPrettyPrinting().create();
    }catch(Throwable e) {
        e.printStackTrace();
        SKIP = true;
    }
}
 
Example #3
Source File: Heartbeat.java    From jstorm with Apache License 2.0 6 votes vote down vote up
private void initSupervisorInfo(Map conf) {
    Set<Integer> portList = JStormUtils.getDefaultSupervisorPortList(conf);
    if (!StormConfig.local_mode(conf)) {
        try {
            boolean isLocalIP = myHostName.equals("127.0.0.1") || myHostName.equals("localhost");
            if (isLocalIP) {
                throw new Exception("the hostname supervisor got is localhost");
            }
        } catch (Exception e1) {
            LOG.error("get supervisor host error!", e1);
            throw new RuntimeException(e1);
        }
        supervisorInfo = new SupervisorInfo(myHostName, supervisorId, portList, conf);
    } else {
        supervisorInfo = new SupervisorInfo(myHostName, supervisorId, portList, conf);
    }

    supervisorInfo.setVersion(Utils.getVersion());
    String buildTs = Utils.getBuildTime();
    supervisorInfo.setBuildTs(buildTs);
    LOG.info("jstorm version:{}, build ts:{}", supervisorInfo.getVersion(), supervisorInfo.getBuildTs());
}
 
Example #4
Source File: StormConfig.java    From jstorm with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
public static Object readLocalObject(String topologyId, String readFile) throws IOException {
    String errMsg = "Failed to get topology configuration of " + topologyId + " file:" + readFile;

    byte[] bconf = FileUtils.readFileToByteArray(new File(readFile));
    if (bconf == null) {
        errMsg += ", failed to read";
        LOG.error(errMsg);
        throw new IOException(errMsg);
    }

    Object ret;
    try {
        ret = Utils.javaDeserialize(bconf);
    } catch (Exception e) {
        errMsg += ", failed to serialize the data";
        LOG.error(errMsg);
        throw new IOException(errMsg);
    }

    return ret;
}
 
Example #5
Source File: list.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    NimbusClient client = null;
    try {
        Map conf = Utils.readStormConfig();
        client = NimbusClient.getConfiguredClient(conf);

        if (args.length > 0 && !StringUtils.isBlank(args[0])) {
            String topologyName = args[0];
            TopologyInfo info = client.getClient().getTopologyInfoByName(topologyName);
            System.out.println("Successfully get topology info \n" + Utils.toPrettyJsonString(info));
        } else {
            ClusterSummary clusterSummary = client.getClient().getClusterInfo();
            System.out.println("Successfully get cluster info \n" + Utils.toPrettyJsonString(clusterSummary));
        }
    } catch (Exception e) {
        System.out.println(e.getMessage());
        e.printStackTrace();
        throw new RuntimeException(e);
    } finally {
        if (client != null) {
            client.close();
        }
    }
}
 
Example #6
Source File: ZookeeperManager.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public static String getZKNodeData(String clusterName, String path) {
    String out = null;
    try {
        ClusterState clusterState = getAndCreateClusterState(clusterName);
        if (clusterState == null) {
            throw new IllegalStateException("Cluster state is null");
        }

        byte[] data = clusterState.get_data(PathUtils.normalize_path(path), false);
        if (data != null && data.length > 0) {
            Object obj = Utils.maybe_deserialize(data);
            if (obj != null){
                out = gson.toJson(obj);
            } else {
                out = new String(data);
            }
        }
    } catch (Exception e) {
        LOG.error("Get zookeeper data error!", e);
    }
    return out;
}
 
Example #7
Source File: rollback_topology.java    From jstorm with Apache License 2.0 6 votes vote down vote up
private static void rollbackTopology(String topologyName) {
    Map conf = Utils.readStormConfig();
    NimbusClient client = NimbusClient.getConfiguredClient(conf);
    try {
        // update jar
        client.getClient().rollbackTopology(topologyName);
        CommandLineUtil.success("Successfully submit command rollback_topology " + topologyName);
    } catch (Exception e) {
        e.printStackTrace();
        throw new RuntimeException(e);
    } finally {
        if (client != null) {
            client.close();
        }
    }
}
 
Example #8
Source File: RocksTTLDBCache.java    From jstorm with Apache License 2.0 6 votes vote down vote up
@Override
public Object get(String key) {
    for (Entry<Integer, ColumnFamilyHandle> entry : windowHandlers.entrySet()) {
        try {
            byte[] data = ttlDB.get(entry.getValue(), key.getBytes());
            if (data != null) {
                try {
                    return Utils.javaDeserialize(data);
                } catch (Exception e) {
                    LOG.error("Failed to deserialize obj of " + key);
                    ttlDB.remove(entry.getValue(), key.getBytes());
                    return null;
                }
            }
        } catch (Exception ignored) {
        }
    }

    return null;
}
 
Example #9
Source File: Grep.java    From storm-benchmark with Apache License 2.0 6 votes vote down vote up
@Override
public StormTopology getTopology(Config config) {

  final int spoutNum = BenchmarkUtils.getInt(config, SPOUT_NUM, DEFAULT_SPOUT_NUM);
  final int matBoltNum = BenchmarkUtils.getInt(config, FM_NUM, DEFAULT_MAT_BOLT_NUM);
  final int cntBoltNum = BenchmarkUtils.getInt(config, CM_NUM, DEFAULT_CNT_BOLT_NUM);
  final String ptnString = (String) Utils.get(config, PATTERN_STRING, DEFAULT_PATTERN_STR);

  spout = new KafkaSpout(KafkaUtils.getSpoutConfig(config, new SchemeAsMultiScheme(new StringScheme())));

  TopologyBuilder builder = new TopologyBuilder();
  builder.setSpout(SPOUT_ID, spout, spoutNum);
  builder.setBolt(FM_ID, new FindMatchingSentence(ptnString), matBoltNum)
          .localOrShuffleGrouping(SPOUT_ID);
  builder.setBolt(CM_ID, new CountMatchingSentence(), cntBoltNum)
          .fieldsGrouping(FM_ID, new Fields(FindMatchingSentence.FIELDS));

  return builder.createTopology();
}
 
Example #10
Source File: NettyUnitTest.java    From jstorm with Apache License 2.0 6 votes vote down vote up
private IConnection initNettyServer(int port) {
    ConcurrentHashMap<Integer, DisruptorQueue> deserializeQueues = new ConcurrentHashMap<Integer, DisruptorQueue>();
    //ConcurrentHashMap<Integer, DisruptorQueue> deserializeCtrlQueues = new ConcurrentHashMap<Integer, DisruptorQueue>();

    WaitStrategy wait = (WaitStrategy)Utils.newInstance("com.lmax.disruptor.TimeoutBlockingWaitStrategy", 5, TimeUnit.MILLISECONDS);
    DisruptorQueue recvControlQueue = DisruptorQueue.mkInstance("Dispatch-control", ProducerType.MULTI,
            256, wait, false, 0, 0);
    Set<Integer> taskSet = new HashSet<Integer>();
    taskSet.add(1);
    IConnection server = context.bind(null, port, deserializeQueues, recvControlQueue, true, taskSet);

    WaitStrategy waitStrategy = new BlockingWaitStrategy();
    DisruptorQueue recvQueue = DisruptorQueue.mkInstance("NettyUnitTest", ProducerType.SINGLE, 1024, waitStrategy, false, 0, 0);
    server.registerQueue(task, recvQueue);

    return server;
}
 
Example #11
Source File: TridentTopology.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public Stream multiReduce(List<Fields> inputFields, List<Stream> streams, MultiReducer function, Fields outputFields) {
    List<String> names = new ArrayList<>();
    for(Stream s: streams) {
        if(s._name!=null) {
            names.add(s._name);
        }
    }
    Node n = new ProcessorNode(getUniqueStreamId(), Utils.join(names, "-"), outputFields, outputFields, new MultiReducerProcessor(inputFields, function));
    return addSourcedNode(streams, n);
}
 
Example #12
Source File: ConfigurableIngestTopologyTest.java    From cognition with Apache License 2.0 5 votes vote down vote up
@Test
public void testConfigureStreamGrouping_default(
    @Injectable String prevComponent,
    @Injectable Configuration boltConf,
    @Injectable BoltDeclarer declarer) throws Exception {
  new Expectations(topology) {{
    boltConf.getString(STREAM_GROUPING_CONF_TYPE, STREAM_GROUPING_LOCAL_OR_SHUFFLE);
    result = STREAM_GROUPING_SHUFFLE;
    boltConf.getString(STREAM_ID, Utils.DEFAULT_STREAM_ID);
    result = Utils.DEFAULT_STREAM_ID;
    declarer.shuffleGrouping(prevComponent, Utils.DEFAULT_STREAM_ID);
  }};

  topology.configureStreamGrouping(prevComponent, boltConf, declarer);
}
 
Example #13
Source File: RocksDbHdfsState.java    From jstorm with Apache License 2.0 5 votes vote down vote up
protected void initRocksDb() {
    RocksDbOptionsFactory optionFactory = new RocksDbOptionsFactory.Defaults();
    Options options = optionFactory.createOptions(null);
    DBOptions dbOptions = optionFactory.createDbOptions(null);
    ColumnFamilyOptions cfOptions = optionFactory.createColumnFamilyOptions(null);
    String optionsFactoryClass = (String) conf.get(ConfigExtension.ROCKSDB_OPTIONS_FACTORY_CLASS);
    if (optionsFactoryClass != null) {
        RocksDbOptionsFactory udfOptionFactory = (RocksDbOptionsFactory) Utils.newInstance(optionsFactoryClass);
        options = udfOptionFactory.createOptions(options);
        dbOptions = udfOptionFactory.createDbOptions(dbOptions);
        cfOptions = udfOptionFactory.createColumnFamilyOptions(cfOptions);
    }

    try {
        ttlTimeSec = ConfigExtension.getStateTtlTime(conf);
        if (ttlTimeSec > 0)
            rocksDb = TtlDB.open(options, rocksDbDir, ttlTimeSec, false);
        else
            rocksDb = RocksDB.open(options, rocksDbDir);
        // enable compaction
        rocksDb.compactRange();
        LOG.info("Finish the initialization of RocksDB");
    } catch (RocksDBException e) {
        LOG.error("Failed to open rocksdb located at " + rocksDbDir, e);
        throw new RuntimeException(e.getMessage());
    }

    lastCheckpointFiles = new HashSet<String>();
    lastCleanTime = System.currentTimeMillis();
    lastSuccessBatchId = -1;
}
 
Example #14
Source File: RollingSortTest.java    From storm-benchmark with Apache License 2.0 5 votes vote down vote up
@Test
public void componentParallelismCouldBeSetThroughConfig() {
  StormBenchmark benchmark = new RollingSort();
  Config config = new Config();
  config.put(RollingSort.SPOUT_NUM, 4);
  config.put(RollingSort.SORT_BOLT_NUM, 5);

  StormTopology topology = benchmark.getTopology(config);
  assertThat(topology).isNotNull();
  TestUtils.verifyParallelism(Utils.getComponentCommon(topology, RollingSort.SPOUT_ID), 4);
  TestUtils.verifyParallelism(Utils.getComponentCommon(topology, RollingSort.SORT_BOLT_ID), 5);
}
 
Example #15
Source File: DRPCSpout.java    From jstorm with Apache License 2.0 5 votes vote down vote up
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
    _collector = collector;
    _clients = new ArrayList<>();
    if (_local_drpc_id == null) {
        _backround = new ExtendedThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS,
                new SynchronousQueue<Runnable>());
        _futures = new LinkedList<>();

        int numTasks = context.getComponentTasks(context.getThisComponentId()).size();
        int index = context.getThisTaskIndex();

        int port = Utils.getInt(conf.get(Config.DRPC_INVOCATIONS_PORT));
        List<String> servers = NetWorkUtils.host2Ip((List<String>) conf.get(Config.DRPC_SERVERS));

        if (servers == null || servers.isEmpty()) {
            throw new RuntimeException("No DRPC servers configured for topology");
        }

        if (numTasks < servers.size()) {
            for (String s : servers) {
                _futures.add(_backround.submit(new Adder(s, port, conf)));
            }
        } else {
            int i = index % servers.size();
            _futures.add(_backround.submit(new Adder(servers.get(i), port, conf)));
        }
    }

}
 
Example #16
Source File: ConfigurableIngestTopologyTest.java    From cognition with Apache License 2.0 5 votes vote down vote up
@Test
public void testConfigureStreamGrouping_localOrShuffleGrouping(
    @Injectable String prevComponent,
    @Injectable Configuration boltConf,
    @Injectable BoltDeclarer declarer) throws Exception {
  new Expectations(topology) {{
    boltConf.getString(STREAM_GROUPING_CONF_TYPE, STREAM_GROUPING_LOCAL_OR_SHUFFLE);
    result = STREAM_GROUPING_LOCAL_OR_SHUFFLE;
    boltConf.getString(STREAM_ID, Utils.DEFAULT_STREAM_ID);
    result = Utils.DEFAULT_STREAM_ID;
    declarer.localOrShuffleGrouping(prevComponent, Utils.DEFAULT_STREAM_ID);
  }};

  topology.configureStreamGrouping(prevComponent, boltConf, declarer);
}
 
Example #17
Source File: TridentBoltExecutor.java    From jstorm with Apache License 2.0 5 votes vote down vote up
@Override
public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
    _messageTimeoutMs = context.maxTopologyMessageTimeout() * 1000L;
    _lastRotate = System.currentTimeMillis();
    _batches = new RotatingMap<>(2);
    _context = context;
    _collector = collector;
    _coordCollector = new CoordinatedOutputCollector(new OutputCollector(collector));
    _coordOutputCollector = new BatchOutputCollectorImpl(new OutputCollector(_coordCollector));

    _coordConditions = (Map) context.getExecutorData("__coordConditions");
    if (_coordConditions == null) {
        _coordConditions = new HashMap<>();
        for (String batchGroup : _coordSpecs.keySet()) {
            CoordSpec spec = _coordSpecs.get(batchGroup);
            CoordCondition cond = new CoordCondition();
            cond.commitStream = spec.commitStream;
            cond.expectedTaskReports = 0;
            for (String comp : spec.coords.keySet()) {
                CoordType ct = spec.coords.get(comp);
                if (ct.equals(CoordType.single())) {
                    cond.expectedTaskReports += 1;
                } else {
                    cond.expectedTaskReports += context.getComponentTasks(comp).size();
                }
            }
            cond.targetTasks = new HashSet<>();
            for (String component : Utils.get(context.getThisTargets(),
                    COORD_STREAM(batchGroup),
                    new HashMap<String, Grouping>()).keySet()) {
                cond.targetTasks.addAll(context.getComponentTasks(component));
            }
            _coordConditions.put(batchGroup, cond);
        }
        context.setExecutorData("_coordConditions", _coordConditions);
    }
    _bolt.prepare(conf, context, _coordOutputCollector);
}
 
Example #18
Source File: ConfigurableIngestTopology.java    From cognition with Apache License 2.0 5 votes vote down vote up
void configureStreamGrouping(String prevComponent, Configuration boltConf, BoltDeclarer declarer)
    throws ConfigurationException {

  String streamType = boltConf.getString(STREAM_GROUPING_CONF_TYPE, STREAM_GROUPING_LOCAL_OR_SHUFFLE);
  String streamId = boltConf.getString(STREAM_ID, Utils.DEFAULT_STREAM_ID);

  if (StringUtils.equals(streamType, STREAM_GROUPING_FIELDS)) {
    configureStreamFieldsGrouping(prevComponent, streamId, boltConf, declarer);
  } else if (StringUtils.equals(streamType, STREAM_GROUPING_LOCAL_OR_SHUFFLE)) {
    declarer.localOrShuffleGrouping(prevComponent, streamId);
  } else {
    declarer.shuffleGrouping(prevComponent, streamId);
  }
}
 
Example #19
Source File: ZkState.java    From jstorm with Apache License 2.0 5 votes vote down vote up
private CuratorFramework newCurator(Map conf, KafkaSpoutConfig config) throws Exception {
    String serverPorts = "";
    List<Host> zkServers = config.zkServers;
    for (Host server : zkServers) {
        serverPorts = serverPorts + server.getHost() + ":" + server.getPort() + ",";
    }
    return CuratorFrameworkFactory.newClient(serverPorts, Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT)), 15000, new RetryNTimes(
            Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES)), Utils.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL))));
}
 
Example #20
Source File: StormZkClusterState.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public void setObject(String path, Object obj) throws Exception {
    if (obj instanceof byte[]) {
        cluster_state.set_data(path, (byte[]) obj);
    } else if (obj instanceof String) {
        cluster_state.set_data(path, ((String) obj).getBytes());
    } else {
        cluster_state.set_data(path, Utils.serialize(obj));
    }
}
 
Example #21
Source File: KafkaUtilsTest.java    From storm-kafka-0.8-plus with Apache License 2.0 5 votes vote down vote up
@Test
public void fetchMessage() throws Exception {
    String value = "test";
    createTopicAndSendMessage(value);
    long offset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.LatestTime()) - 1;
    ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(config, simpleConsumer,
            new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), offset);
    String message = new String(Utils.toByteArray(messageAndOffsets.iterator().next().message().payload()));
    assertThat(message, is(equalTo(value)));
}
 
Example #22
Source File: ProcessLauncher.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public static int getSleepSeconds() {
    Map<Object, Object> conf;
    try {
        conf = Utils.readStormConfig();
    } catch (Exception e) {
        conf = new HashMap<>();
    }
    return ConfigExtension.getProcessLauncherSleepSeconds(conf);
}
 
Example #23
Source File: FixedTupleSpout.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public void nextTuple() {
    if (_serveTuples.size() > 0) {
        FixedTuple ft = _serveTuples.remove(0);
        String id = UUID.randomUUID().toString();
        _pending.put(id, ft);
        _collector.emit(ft.stream, ft.values, id);
    } else {
        Utils.sleep(100);
    }
}
 
Example #24
Source File: SampleClient2.java    From eagle with Apache License 2.0 5 votes vote down vote up
/**
 * @param args
 */
public static void main(String[] args) {
    AtomicLong base1 = new AtomicLong(System.currentTimeMillis());
    AtomicLong base2 = new AtomicLong(System.currentTimeMillis());
    AtomicLong count = new AtomicLong();

    Config config = ConfigFactory.load();

    try (KafkaProducer<String, String> proceduer = SampleClient1.createProceduer(config)) {
        while (true) {
            nextUuid = String.format(instanceUuidTemp, UUID.randomUUID().toString());
            nextReqId = String.format(reqIdTemp, UUID.randomUUID().toString());

            int hostIndex = 6;
            for (int i = 0; i < hostIndex; i++) {
                sendMetric(base1, base2, count, proceduer, i);
            }

            if (count.get() % 600 == 0) {
                System.out.println("send 600 LOG/FAILURE metric!");
            }

            Utils.sleep(3000);

        }
    }
}
 
Example #25
Source File: SerializationFactory.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public IdDictionary(StormTopology topology) {
    List<String> componentNames = new ArrayList<>(topology.get_spouts().keySet());
    componentNames.addAll(topology.get_bolts().keySet());
    componentNames.addAll(topology.get_state_spouts().keySet());

    for (String name : componentNames) {
        ComponentCommon common = Utils.getComponentCommon(topology, name);
        List<String> streams = new ArrayList<>(common.get_streams().keySet());
        streamNametoId.put(name, idify(streams));
        streamIdToName.put(name, Utils.reverseMap(streamNametoId.get(name)));
    }
}
 
Example #26
Source File: AckTransactionSpout.java    From jstorm with Apache License 2.0 5 votes vote down vote up
@Override
public List<Integer> emit(String streamId, List<Object> tuple, Object messageId) {
    if (messageId != null) {
        addPendingTuple(currBatchId, streamId, messageId, tuple);
        tuple.add(Utils.generateId(random));
    } else {
        // for non-anchor tuples, use 0 as default rootId
        tuple.add(0l);
    }
    return delegate.emit(streamId, tuple, null);
}
 
Example #27
Source File: MockStreamReceiver.java    From eagle with Apache License 2.0 5 votes vote down vote up
/**
 * This unit test is not to mock the end2end logic of correlation spout,
 * but simply generate some sample data for following bolts testing
 */
@Override
public void nextTuple() {
    PartitionedEvent event = MockSampleMetadataFactory.createRandomOutOfTimeOrderEventGroupedByName("sampleStream_1");
    LOG.info("Receive {}", event);
    collector.emit(outputStreamIds.get(
        // group by the first field in event i.e. name
        (int) (event.getPartitionKey() % outputStreamIds.size())),
        Collections.singletonList(event));
    Utils.sleep(500);
}
 
Example #28
Source File: UnitTopologyRunner.java    From eagle with Apache License 2.0 5 votes vote down vote up
private void run(String topologyId,
                 int numOfTotalWorkers,
                 int numOfSpoutTasks,
                 int numOfRouterBolts,
                 int numOfAlertBolts,
                 int numOfPublishExecutors,
                 int numOfPublishTasks,
                 Config config,
                 boolean localMode) {

    backtype.storm.Config stormConfig = givenStormConfig == null ? new backtype.storm.Config() : givenStormConfig;
    // TODO: Configurable metric consumer instance number

    int messageTimeoutSecs = config.hasPath(MESSAGE_TIMEOUT_SECS) ? config.getInt(MESSAGE_TIMEOUT_SECS) : DEFAULT_MESSAGE_TIMEOUT_SECS;
    LOG.info("Set topology.message.timeout.secs as {}", messageTimeoutSecs);
    stormConfig.setMessageTimeoutSecs(messageTimeoutSecs);

    if (config.hasPath("metric")) {
        stormConfig.registerMetricsConsumer(StormMetricTaggedConsumer.class, config.root().render(ConfigRenderOptions.concise()), 1);
    }

    stormConfig.setNumWorkers(numOfTotalWorkers);
    StormTopology topology = buildTopology(topologyId, numOfSpoutTasks, numOfRouterBolts, numOfAlertBolts, numOfPublishExecutors, numOfPublishTasks, config).createTopology();

    if (localMode) {
        LOG.info("Submitting as local mode");
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology(topologyId, stormConfig, topology);
        Utils.sleep(Long.MAX_VALUE);
    } else {
        LOG.info("Submitting as cluster mode");
        try {
            StormSubmitter.submitTopologyWithProgressBar(topologyId, stormConfig, topology);
        } catch (Exception ex) {
            LOG.error("fail submitting topology {}", topology, ex);
            throw new IllegalStateException(ex);
        }
    }
}
 
Example #29
Source File: UIUtils.java    From jstorm with Apache License 2.0 5 votes vote down vote up
public static Map<String, Object> getNimbusConf(String clusterName) {
    NimbusClient client = null;
    try {
        client = NimbusClientManager.getNimbusClient(clusterName);

        String jsonConf = client.getClient().getNimbusConf();
        Map<String, Object> nimbusConf =
                (Map<String, Object>) Utils.from_json(jsonConf);
        return nimbusConf;
    } catch (Exception e) {
        NimbusClientManager.removeClient(clusterName);
        LOG.error(e.getMessage(), e);
        return UIUtils.readUiConfig();
    }
}
 
Example #30
Source File: BasicMetricsCollector.java    From storm-benchmark with Apache License 2.0 5 votes vote down vote up
@Override
public void run() {
  long now = System.currentTimeMillis();
  long endTime = now + config.totalTime;
  MetricsState state = new MetricsState();
  state.startTime = now;
  state.lastTime = now;

  final String path = config.path;
  final String name = config.name;
  final String confFile = String.format(
          MetricsCollectorConfig.CONF_FILE_FORMAT, path, name, now);
  final String dataFile = String.format(
          MetricsCollectorConfig.DATA_FILE_FORMAT, path, name, now);
  PrintWriter confWriter = FileUtils.createFileWriter(path, confFile);
  PrintWriter dataWriter = FileUtils.createFileWriter(path, dataFile);
  config.writeStormConfig(confWriter);
  writeHeader(dataWriter);

  try {
    boolean live = true;
    do {
      Utils.sleep(config.pollInterval);
      now = System.currentTimeMillis();
      live = pollNimbus(getNimbusClient(config.stormConfig), now, state, dataWriter);
    } while (live && now < endTime);
  } catch (Exception e) {
    LOG.error("storm metrics failed! ", e);
  } finally {
    dataWriter.close();
    confWriter.close();
  }
}