backtype.storm.generated.TopologyInfo Java Examples

The following examples show how to use backtype.storm.generated.TopologyInfo. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ClusterInfoBolt.java    From jstorm with Apache License 2.0 6 votes vote down vote up
protected long getTopologyTPS(TopologySummary topology, Client client) throws NotAliveException, TException{
    long topologyTps = 0l;
    String topologyId = topology.get_id();
    if(topologyId.startsWith("ClusterMonitor")){
        return topologyTps;
    }
    TopologyInfo topologyInfo = client.getTopologyInfo(topologyId);
    if(topologyInfo == null){
        return topologyTps;
    }
    List<ExecutorSummary> executorSummaryList = topologyInfo.get_executors();
    for(ExecutorSummary executor : executorSummaryList){
        topologyTps += getComponentTPS(executor);
    }
    LOGGER.info("topology = " + topology.get_name() + ", tps = " + topologyTps);
    return topologyTps;
}
 
Example #2
Source File: list.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    NimbusClient client = null;
    try {
        Map conf = Utils.readStormConfig();
        client = NimbusClient.getConfiguredClient(conf);

        if (args.length > 0 && !StringUtils.isBlank(args[0])) {
            String topologyName = args[0];
            TopologyInfo info = client.getClient().getTopologyInfoByName(topologyName);
            System.out.println("Successfully get topology info \n" + Utils.toPrettyJsonString(info));
        } else {
            ClusterSummary clusterSummary = client.getClient().getClusterInfo();
            System.out.println("Successfully get cluster info \n" + Utils.toPrettyJsonString(clusterSummary));
        }
    } catch (Exception e) {
        System.out.println(e.getMessage());
        e.printStackTrace();
        throw new RuntimeException(e);
    } finally {
        if (client != null) {
            client.close();
        }
    }
}
 
Example #3
Source File: JstormEngineCheckPointImpl.java    From PoseidonX with Apache License 2.0 5 votes vote down vote up
/**
 * 返回拓扑错误信息
 * @param nimbusClient
 * @return
 */
public static String getTopologyErrorInfo(NimbusClient nimbusClient,String topId){
    TopologyInfo topologyInfo = JStormClusterBusiness.getTopologyInfoWithRetry(nimbusClient,topId);
    if(topologyInfo!=null){
        Set<String> error = JStormTopologyBusiness.buildTopologyErrorInfo(topologyInfo);
        return JSONObject.toJSONString(error);
    }
    return JSONObject.toJSONString(Sets.newHashSet());
}
 
Example #4
Source File: gray_upgrade.java    From jstorm with Apache License 2.0 5 votes vote down vote up
private static void upgradeTopology(String topologyName, String component, List<String> workers, int workerNum)
        throws Exception {
    Map conf = Utils.readStormConfig();
    NimbusClient client = NimbusClient.getConfiguredClient(conf);
    try {
        String topologyId = client.getClient().getTopologyId(topologyName);
        Map stormConf = (Map) Utils.from_json(client.getClient().getTopologyConf(topologyId));
        // check if TM is a separate worker
        TopologyInfo topologyInfo = client.getClient().getTopologyInfo(topologyId);
        for (TaskSummary taskSummary : topologyInfo.get_tasks()) {
            if (!taskSummary.get_status().equalsIgnoreCase("active")) {
                CommandLineUtil.error("Some of the tasks are not in ACTIVE state, cannot perform the upgrade!");
                return;
            }
        }

        if (!ConfigExtension.isTmSingleWorker(stormConf, topologyInfo.get_topology().get_numWorkers())) {
            CommandLineUtil.error("Gray upgrade requires that topology master to be a single worker, " +
                    "cannot perform the upgrade!");
            return;
        }

        client.getClient().grayUpgrade(topologyName, component, workers, workerNum);
        CommandLineUtil.success("Successfully submit command gray_upgrade " + topologyName);
    } catch (Exception ex) {
        CommandLineUtil.error("Failed to perform gray_upgrade: " + ex.getMessage());
        ex.printStackTrace();
    } finally {
        if (client != null) {
            client.close();
        }
    }
}
 
Example #5
Source File: JstormMetricCollectImpl.java    From PoseidonX with Apache License 2.0 4 votes vote down vote up
public Void call() throws Exception {
    NimbusClient nimbusClient = null;
    try{
        if(taskPO.getTaskStatus() != TaskStatusEnum.RUNNING.getValue()){
            return null;
        }

        JstormTaskConfigDTO taskConfigDTO = JSONObject.parseObject(jstormProcessPO.getTaskConfig(), JstormTaskConfigDTO.class);
        if(taskConfigDTO == null){
            return null;
        }

        String topId = jstormProcessPO.getTopId();
        taskMetricHbaseRecord = buildTaskMetricHbaseRecord(topId);

        TopologyInfo topologyInfo = null;
        nimbusClient = JStormClusterBusiness.getNimBusClientWithRetry(taskConfigDTO.getJstormZkHost(),taskConfigDTO.getJstormZkPort(),taskConfigDTO.getJstormZkRoot(),2,2);
        if(nimbusClient != null){
            topologyInfo = JStormClusterBusiness.getTopologyInfoWithRetry(nimbusClient,topId);
            if(topologyInfo == null){
                LOGGER.error("JstormMetricCollectExecutor 获取topologyInfo 失败! zkRoot=" + taskConfigDTO.getJstormZkRoot());
            }
        }else{
            LOGGER.error("JstormMetricCollectExecutor 获取nimbus client 失败! zkRoot=" + taskConfigDTO.getJstormZkRoot());
        }

        collectTopologyMetic(topologyInfo, topId);
        collectComponentMetic(topologyInfo, topId);
        collectWorkerMetic(topologyInfo, topId);
        processThreadStatus(topologyInfo, topId);

        if (isZkleader) {
            HBaseRecordUtils.send(taskMetricHbaseRecord);
        }
    }catch (Throwable e){
        LOGGER.error("JstormMetricCollectExecutor is error",e);
    }finally {
        if(nimbusClient!=null){
            nimbusClient.close();
        }
    }
    return null;
}
 
Example #6
Source File: JstormMetricCollectImpl.java    From PoseidonX with Apache License 2.0 4 votes vote down vote up
/**
 * 收集拓扑相关指标
 * @param topologyInfo
 * @param topId
 * @param currentTime
 */
private void collectTopologyMetic(TopologyInfo topologyInfo, String topId){

    String SEND_TPS = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String RECV_TPS = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String NETTY_CLI_SEND_SPEED = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String NETTY_SRV_RECV_SPEED = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String EMMITTED_NUM = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String FULL_GC = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String MEMORY_USED = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String HEAP_MEMORY_USED = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String CPU_USED_RATIO = MoniterContant.METRIC_COLLECT_FAILVALUE;

    if(topologyInfo != null){
        MetricInfo topologyMetrics = topologyInfo.get_metrics().get_topologyMetric();
        JstormTopologyMetric jstormTopologyMetric = JstormMetricBusiness.buildSummaryMetrics(topologyMetrics, MoniterContant.METRIC_COLLECT_CYCLE);

        SEND_TPS = jstormTopologyMetric.getMetrics().get(MetricDef.SEND_TPS);
        RECV_TPS = jstormTopologyMetric.getMetrics().get(MetricDef.RECV_TPS);
        NETTY_CLI_SEND_SPEED = jstormTopologyMetric.getMetrics().get(MetricDef.NETTY_CLI_SEND_SPEED);
        NETTY_SRV_RECV_SPEED = jstormTopologyMetric.getMetrics().get(MetricDef.NETTY_SRV_RECV_SPEED);
        EMMITTED_NUM = jstormTopologyMetric.getMetrics().get(MetricDef.EMMITTED_NUM);
        FULL_GC = jstormTopologyMetric.getMetrics().get(MetricDef.FULL_GC);
        MEMORY_USED = JstormMetricBusiness.getMemNumStr(jstormTopologyMetric.getMetrics().get(MetricDef.MEMORY_USED));
        HEAP_MEMORY_USED = JstormMetricBusiness.getMemNumStr(jstormTopologyMetric.getMetrics().get(MetricDef.HEAP_MEMORY_USED));
        CPU_USED_RATIO = jstormTopologyMetric.getMetrics().get(MetricDef.CPU_USED_RATIO);
    }

    MetricReportContainer.addDefaultGroupMetric( topId, MetricDef.SEND_TPS, currentTimeStamp, SEND_TPS);
    addTaskMetricHbaseCell(MetricDef.SEND_TPS, SEND_TPS);
    MetricReportContainer.addDefaultGroupMetric( topId, MetricDef.RECV_TPS, currentTimeStamp,RECV_TPS);
    addTaskMetricHbaseCell(MetricDef.RECV_TPS, RECV_TPS);
    MetricReportContainer.addDefaultGroupMetric( topId, MetricDef.NETTY_CLI_SEND_SPEED,currentTimeStamp, NETTY_CLI_SEND_SPEED);
    addTaskMetricHbaseCell(MetricDef.NETTY_CLI_SEND_SPEED, NETTY_CLI_SEND_SPEED);
    MetricReportContainer.addDefaultGroupMetric( topId, MetricDef.NETTY_SRV_RECV_SPEED,currentTimeStamp, NETTY_SRV_RECV_SPEED);
    addTaskMetricHbaseCell(MetricDef.NETTY_SRV_RECV_SPEED, NETTY_SRV_RECV_SPEED);
    MetricReportContainer.addDefaultGroupMetric( topId, MetricDef.EMMITTED_NUM,currentTimeStamp, EMMITTED_NUM);
    addTaskMetricHbaseCell(MetricDef.EMMITTED_NUM, EMMITTED_NUM);
    MetricReportContainer.addDefaultGroupMetric( topId, MetricDef.FULL_GC,currentTimeStamp, FULL_GC);
    addTaskMetricHbaseCell(MetricDef.FULL_GC, FULL_GC);
    MetricReportContainer.addDefaultGroupMetric( topId, MetricDef.MEMORY_USED, currentTimeStamp,MEMORY_USED);
    addTaskMetricHbaseCell(MetricDef.MEMORY_USED, MEMORY_USED);
    MetricReportContainer.addDefaultGroupMetric( topId, MetricDef.HEAP_MEMORY_USED, currentTimeStamp,HEAP_MEMORY_USED);
    addTaskMetricHbaseCell(MetricDef.HEAP_MEMORY_USED, HEAP_MEMORY_USED);
    MetricReportContainer.addDefaultGroupMetric( topId, MetricDef.CPU_USED_RATIO,currentTimeStamp, CPU_USED_RATIO);
    addTaskMetricHbaseCell(MetricDef.CPU_USED_RATIO, CPU_USED_RATIO);
}
 
Example #7
Source File: JstormMetricCollectImpl.java    From PoseidonX with Apache License 2.0 4 votes vote down vote up
/**
 * 收集组件相关指标
 * @param topologyInfo
 * @param topId
 * @param currentTime
 */
private void collectComponentMetic(TopologyInfo topologyInfo, String topId) {

    String SEND_TPS = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String RECV_TPS = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String TUPLE_LIEF_CYCLE = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String PROCESS_LATENCY = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String EXECUTE_TIME = MoniterContant.METRIC_COLLECT_FAILVALUE;

    Map<String, String> COMP_SEND_TPS = Maps.newHashMap();
    Map<String, String> COMP_RECV_TPS = Maps.newHashMap();
    Map<String, String> COMP_TUPLE_LIEF_CYCLE = Maps.newHashMap();
    Map<String, String> COMP_PROCESS_LATENCY = Maps.newHashMap();
    Map<String, String> COMP_EXECUTE_TIME = Maps.newHashMap();

    if(topologyInfo != null){

        MetricInfo componentMetrics = topologyInfo.get_metrics().get_componentMetric();
        List<JstormComponentMetric> jstormComponentMetrics = JstormMetricBusiness.buildComponentMetrics(componentMetrics,  MoniterContant.METRIC_COLLECT_CYCLE, topologyInfo.get_components());
        for (JstormComponentMetric component : jstormComponentMetrics) {
            Map<String, String> metricMap = component.getMetrics();
            String componentName = component.getComponentName();
            if (metricMap == null || metricMap.size() == 0 || StringUtils.isBlank(componentName) || componentName.equals("__topology_master") || componentName.equals("__acker")){
                continue;
            }

            SEND_TPS = metricMap.get( MetricDef.SEND_TPS);
            RECV_TPS = metricMap.get( MetricDef.RECV_TPS);
            TUPLE_LIEF_CYCLE = metricMap.get( MetricDef.TUPLE_LIEF_CYCLE);
            PROCESS_LATENCY = metricMap.get( MetricDef.PROCESS_LATENCY);
            EXECUTE_TIME =metricMap.get( MetricDef.EXECUTE_TIME);

            COMP_SEND_TPS.put(componentName,SEND_TPS);
            COMP_RECV_TPS.put(componentName,RECV_TPS);
            COMP_TUPLE_LIEF_CYCLE .put(componentName,TUPLE_LIEF_CYCLE);
            COMP_PROCESS_LATENCY .put(componentName,PROCESS_LATENCY);
            COMP_EXECUTE_TIME.put(componentName,EXECUTE_TIME);
        }
    }

    addTaskMetricHbaseCell("comp_" + MetricDef.SEND_TPS,COMP_SEND_TPS);
    addTaskMetricHbaseCell("comp_" + MetricDef.RECV_TPS,COMP_RECV_TPS);
    addTaskMetricHbaseCell("comp_" + MetricDef.TUPLE_LIEF_CYCLE,COMP_TUPLE_LIEF_CYCLE);
    addTaskMetricHbaseCell("comp_" + MetricDef.PROCESS_LATENCY,COMP_PROCESS_LATENCY);
    addTaskMetricHbaseCell("comp_" + MetricDef.EXECUTE_TIME,COMP_EXECUTE_TIME);
}
 
Example #8
Source File: JstormMetricCollectImpl.java    From PoseidonX with Apache License 2.0 4 votes vote down vote up
/**
 * 收集workder相关指标
 * @param topologyInfo
 * @param topId
 * @param currentTime
 */
private void collectWorkerMetic(TopologyInfo topologyInfo, String topId) {

    String FULL_GC = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String MEMORY_USED = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String HEAP_MEMORY_USED = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String GCCOUNT = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String GCTIME = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String NETTY_CLI_SEND_SPEED = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String NETTY_SRV_RECV_SPEED = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String RECV_CTRL_QUEUE = MoniterContant.METRIC_COLLECT_FAILVALUE;
    String SEND_QUEUE = MoniterContant.METRIC_COLLECT_FAILVALUE;

    Map<String, String> WORKER_FULL_GC = Maps.newHashMap();
    Map<String, String> WORKER_MEMORY_USED = Maps.newHashMap();
    Map<String, String> WORKER_HEAP_MEMORY_USED = Maps.newHashMap();
    Map<String, String> WORKER_GCCOUNT = Maps.newHashMap();
    Map<String, String> WORKER_GCTIME = Maps.newHashMap();
    Map<String, String> WORKER_NETTY_CLI_SEND_SPEED = Maps.newHashMap();
    Map<String, String> WORKER_NETTY_SRV_RECV_SPEED = Maps.newHashMap();
    Map<String, String> WORKER_RECV_CTRL_QUEUE = Maps.newHashMap();
    Map<String, String> WORKER_SEND_QUEUE = Maps.newHashMap();

    if(topologyInfo != null){
        MetricInfo workerMetrics = topologyInfo.get_metrics().get_workerMetric();
        List<JstormWorkerMetric> jstormWorkerMetrics = JstormMetricBusiness.buildWorkerMetrics(workerMetrics, topId,  MoniterContant.METRIC_COLLECT_CYCLE);

        for (JstormWorkerMetric jstormWorkerMetric : jstormWorkerMetrics) {
            Map<String, String> metricMap = jstormWorkerMetric.getMetrics();
            String host = jstormWorkerMetric.getHost();
            String port = jstormWorkerMetric.getPort();
            if (metricMap == null || metricMap.size() == 0 || StringUtils.isBlank(host) || StringUtils.isBlank(port)) {
                continue;
            }

            String worker = host + ":" + port;

            FULL_GC = metricMap.get( MetricDef.FULL_GC);
            MEMORY_USED = JstormMetricBusiness.getMemNumStr(metricMap.get( MetricDef.MEMORY_USED));
            HEAP_MEMORY_USED = JstormMetricBusiness.getMemNumStr(metricMap.get( MetricDef.HEAP_MEMORY_USED));
            GCCOUNT = metricMap.get(METRIC_WORKER_GC_COUNT);
            GCTIME = metricMap.get(METRIC_WORKER_GC_TIME);
            NETTY_CLI_SEND_SPEED = metricMap.get( MetricDef.NETTY_CLI_SEND_SPEED);
            NETTY_SRV_RECV_SPEED = metricMap.get( MetricDef.NETTY_SRV_RECV_SPEED);
            RECV_CTRL_QUEUE = metricMap.get( MetricDef.SEND_TPS);
            SEND_QUEUE = metricMap.get( MetricDef.SEND_TPS);

            WORKER_FULL_GC.put(worker,FULL_GC);
            WORKER_MEMORY_USED.put(worker,MEMORY_USED);
            WORKER_HEAP_MEMORY_USED .put(worker,HEAP_MEMORY_USED);
            WORKER_GCCOUNT.put(worker,GCCOUNT);
            WORKER_GCTIME.put(worker,GCTIME);
            WORKER_NETTY_CLI_SEND_SPEED.put(worker,NETTY_CLI_SEND_SPEED);
            WORKER_NETTY_SRV_RECV_SPEED .put(worker,NETTY_SRV_RECV_SPEED);
            WORKER_RECV_CTRL_QUEUE.put(worker,RECV_CTRL_QUEUE);
            WORKER_SEND_QUEUE.put(worker,SEND_QUEUE);
        }
    }

    addTaskMetricHbaseCell("worker_" + MetricDef.FULL_GC,WORKER_FULL_GC);
    addTaskMetricHbaseCell("worker_" + MetricDef.MEMORY_USED,WORKER_MEMORY_USED);
    addTaskMetricHbaseCell("worker_" + MetricDef.HEAP_MEMORY_USED,WORKER_HEAP_MEMORY_USED);
    addTaskMetricHbaseCell("worker_" + METRIC_WORKER_GC_COUNT,WORKER_GCCOUNT);
    addTaskMetricHbaseCell("worker_" + METRIC_WORKER_GC_TIME,WORKER_GCTIME);
    addTaskMetricHbaseCell("worker_" + MetricDef.NETTY_CLI_SEND_SPEED,WORKER_NETTY_CLI_SEND_SPEED);
    addTaskMetricHbaseCell("worker_" + MetricDef.NETTY_SRV_RECV_SPEED,WORKER_NETTY_SRV_RECV_SPEED);
    addTaskMetricHbaseCell("worker_" + MetricDef.RECV_CTRL_QUEUE,WORKER_RECV_CTRL_QUEUE);
    addTaskMetricHbaseCell("worker_" + MetricDef.SEND_QUEUE,WORKER_SEND_QUEUE);
}
 
Example #9
Source File: UserDefinedWorkerTopology.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public void verifyAssignment(String topologyName) {

            
            Set<ResourceWorkerSlot> spoutResourceWorkerSlots = new HashSet<>();
            Set<ResourceWorkerSlot> bolt1ResourceWorkerSlots = new HashSet<>();
            Set<ResourceWorkerSlot> bolt2ResourceWorkerSlots = new HashSet<>();
            
            NimbusClientWrapper client = new NimbusClientWrapper();
            try {
                Map nimbusConf = Utils.readStormConfig();
                client.init(nimbusConf);
                
                String topologyId = client.getClient().getTopologyId(topologyName);
                
                TopologyInfo topologyInfo = client.getClient().getTopologyInfo(topologyId);
                
                Assignment assignment = JStormHelper.getAssignment(topologyId, conf);
                Set<ResourceWorkerSlot> workerSet = assignment.getWorkers();
                
                List<ComponentSummary>  componentList = topologyInfo.get_components();
                for (ComponentSummary component : componentList) {
                    if (SPOUT_NAME.equals(component.get_name())) {
                        spoutResourceWorkerSlots = getComponentWorkers(component, workerSet);
                    }else if (BOLT1_NAME.equals(component.get_name())) {
                        bolt1ResourceWorkerSlots = getComponentWorkers(component, workerSet);
                    }else if (BOLT2_NAME.equals(component.get_name())) {
                        bolt2ResourceWorkerSlots = getComponentWorkers(component, workerSet);
                    }
                }
                
            }catch(Exception e) {
                Assert.fail("Fail to get workerSlots");
            }finally {
                client.cleanup();
            }
            
            
            verifySpoutAssignment(spoutResourceWorkerSlots);
            verifyBolt1Assignment(spoutResourceWorkerSlots, bolt1ResourceWorkerSlots);
            verifyBolt2Assignment(bolt1ResourceWorkerSlots, bolt2ResourceWorkerSlots);
            
        }
 
Example #10
Source File: LogController.java    From jstorm with Apache License 2.0 4 votes vote down vote up
@RequestMapping(value = "/deepSearch", method = RequestMethod.GET)
public String deepSearch(@RequestParam(value = "cluster", required = true) String clusterName,
                         @RequestParam(value = "tid", required = true) String topologyId,
                         @RequestParam(value = "key", required = false) String keyword,
                         @RequestParam(value = "caseIgnore", required = false) String caseIgnore,
                         ModelMap model) {
    clusterName = StringEscapeUtils.escapeHtml(clusterName);
    topologyId = StringEscapeUtils.escapeHtml(topologyId);
    boolean _caseIgnore = !StringUtils.isBlank(caseIgnore);
    int port = UIUtils.getSupervisorPort(clusterName);
    model.addAttribute("keyword", keyword);
    List<Future<?>> futures = new ArrayList<>();
    ConcurrentLinkedQueue<Map> result = new ConcurrentLinkedQueue<>();

    if (filterKeyword(model, keyword)) {
        NimbusClient client = null;
        try {
            keyword = URLEncoder.encode(keyword, "UTF-8");      // encode space and url characters
            client = NimbusClientManager.getNimbusClient(clusterName);
            TopologyInfo info = client.getClient().getTopologyInfo(topologyId);
            String topologyName = info.get_topology().get_name();
            List<UIWorkerMetric> workerData = UIMetricUtils.getWorkerMetrics(info.get_metrics().get_workerMetric(), topologyId, 60);
            String dir = "." + File.separator +  topologyName;
            for (UIWorkerMetric metric : workerData){
                String logFile = topologyName + "-worker-" + metric.getPort() + ".log";
                String url = String.format("http://%s:%s/logview?cmd=searchLog&file=%s&key=%s&offset=%s&case_ignore=%s",
                        metric.getHost(), port, getFullFile(dir, logFile), keyword, 0, _caseIgnore);
                futures.add(_backround.submit(new SearchRequest(url, metric.getHost(), metric.getPort(), dir, logFile, result)));
            }

            JStormServerUtils.checkFutures(futures);

            model.addAttribute("result", result);

        } catch (NotAliveException nae) {
            model.addAttribute("tip", String.format("The topology: %s is dead.", topologyId));
        } catch (Exception e) {
            NimbusClientManager.removeClient(clusterName);
            LOG.error(e.getMessage(), e);
            UIUtils.addErrorAttribute(model, e);
        }
    }

    model.addAttribute("clusterName", clusterName);
    model.addAttribute("topologyId", topologyId);
    model.addAttribute("logServerPort", port);
    model.addAttribute("caseIgnore", _caseIgnore);
    UIUtils.addTitleAttribute(model, "DeepSearch");

    return "deepSearch";
}
 
Example #11
Source File: ServiceHandler.java    From jstorm with Apache License 2.0 4 votes vote down vote up
@Override
public TopologyInfo getTopologyInfoByName(String topologyName) throws TException {
    String topologyId = getTopologyId(topologyName);
    return getTopologyInfo(topologyId);
}
 
Example #12
Source File: ILocalCluster.java    From jstorm with Apache License 2.0 votes vote down vote up
TopologyInfo getTopologyInfo(String id);