org.ansj.app.keyword.Keyword Java Examples

The following examples show how to use org.ansj.app.keyword.Keyword. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TagContent.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
public String tagContent(List<Keyword> keyWords, String content) {
    SmartForest<Double> sf = new SmartForest<>();
    for (Keyword keyWord : keyWords) {
        sf.add(keyWord.getName().toLowerCase(), keyWord.getScore());
    }

    SmartGetWord<Double> sgw = new SmartGetWord<>(sf, content.toLowerCase());

    int beginOffe = 0;
    String temp = null;
    StringBuilder sb = new StringBuilder();
    while ((temp = sgw.getFrontWords()) != null) {
        sb.append(content.substring(beginOffe, sgw.offe));
        sb.append(beginTag);
        sb.append(content.substring(sgw.offe, sgw.offe + temp.length()));
        sb.append(endTag);
        beginOffe = sgw.offe + temp.length();
    }

    if (beginOffe <= content.length() - 1) {
        sb.append(content.substring(beginOffe, content.length()));
    }

    return sb.toString();
}
 
Example #2
Source File: ContentBasedRecommender.java    From NewsRecommendSystem with MIT License 5 votes vote down vote up
/**
 * 获得用户的关键词列表和新闻关键词列表的匹配程度
 * 
 * @return
 */
private double getMatchValue(CustomizedHashMap<String, Double> map, List<Keyword> list)
{
	Set<String> keywordsSet = map.keySet();
	double matchValue = 0;
	for (Keyword keyword : list)
	{
		if (keywordsSet.contains(keyword.getName()))
		{
			matchValue += keyword.getScore() * map.get(keyword.getName());
		}
	}
	return matchValue;
}
 
Example #3
Source File: TFIDF.java    From NewsRecommendSystem with MIT License 5 votes vote down vote up
/**
 * 
 * @param title 文本标题
 * @param content 文本内容
 * @param keyNums 返回的关键词数目
 * @return
 */
public static List<Keyword> getTFIDE(String title, String content,int keyNums)
{
	// String
	// sentence="我今天很开心,所以一口气买了好多东西。然而我一不小心把本月预算透支了,现在有很不开心了,因为后面的日子得吃土了!";
	KeyWordComputer kwc = new KeyWordComputer(keyNums);
	return kwc.computeArticleTfidf(title, content);
}
 
Example #4
Source File: PatentMapperTest.java    From yuzhouwan with Apache License 2.0 5 votes vote down vote up
@Test
public void chineseToken() {
    KeyWordComputer kwc = new KeyWordComputer(5);
    String title = "维基解密否认斯诺登接受委内瑞拉庇护";
    String content = "有俄罗斯国会议员,9号在社交网站推特表示,美国中情局前雇员斯诺登,已经接受委内瑞拉的庇护,不过推文在发布几分钟后随即删除。俄罗斯当局拒绝发表评论,而一直协助斯诺登的维基解密否认他将投靠委内瑞拉。  俄罗斯国会国际事务委员会主席普什科夫,在个人推特率先披露斯诺登已接受委内瑞拉的庇护建议,令外界以为斯诺登的动向终于有新进展。  不过推文在几分钟内旋即被删除,普什科夫澄清他是看到俄罗斯国营电视台的新闻才这样说,而电视台已经作出否认,称普什科夫是误解了新闻内容。  委内瑞拉驻莫斯科大使馆、俄罗斯总统府发言人、以及外交部都拒绝发表评论。而维基解密就否认斯诺登已正式接受委内瑞拉的庇护,说会在适当时间公布有关决定。  斯诺登相信目前还在莫斯科谢列梅捷沃机场,已滞留两个多星期。他早前向约20个国家提交庇护申请,委内瑞拉、尼加拉瓜和玻利维亚,先后表示答应,不过斯诺登还没作出决定。  而另一场外交风波,玻利维亚总统莫拉莱斯的专机上星期被欧洲多国以怀疑斯诺登在机上为由拒绝过境事件,涉事国家之一的西班牙突然转口风,外长马加略]号表示愿意就任何误解致歉,但强调当时当局没有关闭领空或不许专机降落。";
    Collection<Keyword> result = kwc.computeArticleTfidf(title, content);
    //System.out.println(result.iterator().next().getName());

    //[斯诺登/211.83897497289786, 维基/163.46869316143392, 委内瑞拉/101.31414008144232, 庇护/46.05172894231714, 俄罗斯/45.70875018647603]
    for (Keyword keyword : result) {
        System.out.println(keyword.getName());
    }
}
 
Example #5
Source File: SummaryComputer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * 根据用户查询串计算摘要
 * 
 * @return
 */
public Summary toSummary(String query) {

    List<Term> parse = NlpAnalysis.parse(query).getTerms();

    List<Keyword> keywords = new ArrayList<>();
    for (Term term : parse) {
        if (FILTER_SET.contains(term.natrue().natureStr)) {
            continue;
        }
        keywords.add(new Keyword(term.getName(), term.termNatures().allFreq, 1));
    }

    return toSummary(keywords);
}
 
Example #6
Source File: SummaryComputer.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * 计算摘要,传入用户自己算好的关键词
 * 
 * @return
 */
public Summary toSummary(List<Keyword> keywords) {

    if (keywords == null) {
        keywords = new ArrayList<>();
    }

    if (keywords.isEmpty()) {

        KeyWordComputer kc = new KeyWordComputer(10);
        keywords = kc.computeArticleTfidf(title, content);
    }
    return explan(keywords, content);
}
 
Example #7
Source File: ContentBasedRecommender.java    From NewsRecommendSystem with MIT License 4 votes vote down vote up
@Override
public void recommend(List<Long> users)
{
	try
	{
		int count=0;
		System.out.println("CB start at "+ new Date());
		// 首先进行用户喜好关键词列表的衰减更新+用户当日历史浏览记录的更新
		new UserPrefRefresher().refresh(users);
		// 新闻及对应关键词列表的Map
		HashMap<Long, List<Keyword>> newsKeyWordsMap = new HashMap<Long, List<Keyword>>();
		HashMap<Long, Integer> newsModuleMap = new HashMap<Long, Integer>();
		// 用户喜好关键词列表
		HashMap<Long, CustomizedHashMap<Integer, CustomizedHashMap<String, Double>>> userPrefListMap = RecommendKit
				.getUserPrefListMap(users);
		List<News> newsList=News.dao.find("select id,title,content,module_id from news where news_time>"
						+ RecommendKit.getInRecDate());
		for (News news:newsList)
		{
			newsKeyWordsMap.put(news.getId(), TFIDF.getTFIDE(news.getTitle(), news.getContent(), KEY_WORDS_NUM));
			newsModuleMap.put(news.getId(), news.getModuleId());
		}

		for (Long userId : users)
		{
			Map<Long, Double> tempMatchMap = new HashMap<Long, Double>();
			Iterator<Long> ite = newsKeyWordsMap.keySet().iterator();
			while (ite.hasNext())
			{
				Long newsId = ite.next();
				int moduleId = newsModuleMap.get(newsId);
				if (null != userPrefListMap.get(userId).get(moduleId))
					tempMatchMap.put(newsId,
							getMatchValue(userPrefListMap.get(userId).get(moduleId), newsKeyWordsMap.get(newsId)));
				else
					continue;
			}
			// 去除匹配值为0的项目
			removeZeroItem(tempMatchMap);
			if (!(tempMatchMap.toString().equals("{}")))
			{
				tempMatchMap = sortMapByValue(tempMatchMap);
				Set<Long> toBeRecommended=tempMatchMap.keySet();
				//过滤掉已经推荐过的新闻
				RecommendKit.filterReccedNews(toBeRecommended,userId);
				//过滤掉用户已经看过的新闻
				RecommendKit.filterBrowsedNews(toBeRecommended, userId);
				//如果可推荐新闻数目超过了系统默认为CB算法设置的单日推荐上限数(N),则去掉一部分多余的可推荐新闻,剩下的N个新闻才进行推荐
				if(toBeRecommended.size()>N){
					RecommendKit.removeOverNews(toBeRecommended,N);
				}
				RecommendKit.insertRecommend(userId, toBeRecommended.iterator(),RecommendAlgorithm.CB);
				count+=toBeRecommended.size();
			}
		}
		System.out.println("CB has contributed " + (count/users.size()) + " recommending news on average");
		System.out.println("CB finished at "+new Date());
	}
	catch (Exception e)
	{
		e.printStackTrace();
	}
	return;
}
 
Example #8
Source File: PatentMapper.java    From yuzhouwan with Apache License 2.0 4 votes vote down vote up
/**
 * 处理 原始数据,完成 Patents解析工作.
 *
 * @param context
 * @param text
 */
private void parsePatents(Context context, String text) {
    //剔除掉 BCP前缀(INSERT INTO `patent` VALUES (')
    String aim = text.substring(30);
    //将一行中多个 patent分割出来
    String[] patent = aim.split(PATENT_SPLIT_TOKEN);
    //遍历每一个 patent进行处理
    for (int i = 0; i < patent.length; i++) {
        if (i == patent.length - 1) {
            //针对最后一 patent,需要特殊处理一下(剔除字符: ');)
            dealWithLastPatent(patent, i);
        }
        //分割出每个 patent的属性
        String[] fields = patent[i].split(FIELD_SPLIT_TOKEN);
        //判断数据是否是完整的,避免处理异常
        if (fields.length == 17) {
            //拿到 patent的标题,例如(一种超细铜丝拉拔用润滑剂)
            String title = fields[0];
            //拿到 patent的主题内容介绍,例如(本发明公开一种超细铜丝拉拔用润滑剂,该润滑剂包含的成分...)
            String content = fields[16];

            //依据 title 和 patent 的具体描述,提取出 Rank分值 最高的一个关键字
            Collection<Keyword> result = getKeywordsByKWC(title, content);
            //剔除空缺数据
            if (result == null) {
                continue;
            }
            //迭代提取关键字
            Iterator<Keyword> iterable = result.iterator();
            //判断迭代器里面是否存在数据
            if (!iterable.hasNext()) {
                continue;
            }
            //拿到 patent的类型
            String type = fields[1];
            //拿到 patent的公司,剔除后面的邮编(天津佳盟知识产权代理有限公司 12002)
            String company = fields[11].split(" ")[0];
            //拿到关键字
            String keywordName = iterable.next().getName();
            //将需要传输到 Hadoop上下文的 Key、Value设置好,并完成写入
            writeKV2MapContext(context, type, company, keywordName);
        }
    }
}
 
Example #9
Source File: Summary.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public Summary(List<Keyword> keyWords, String summary) {
    this.keyWords = keyWords;
    this.summary = summary;
}
 
Example #10
Source File: Summary.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public List<Keyword> getKeyWords() {
    return keyWords;
}
 
Example #11
Source File: PatentMapper.java    From yuzhouwan with Apache License 2.0 3 votes vote down vote up
/**
 * 依据 title 和 patent 的具体描述,提取出 Rank分值 最高的一个关键字.
 * <p>
 * 接口相关文档:http://demo.nlpcn.org/demo#
 *
 * @param title
 * @param content
 * @return
 */
private Collection<Keyword> getKeywordsByKWC(String title, String content) {
    //标题、内容 判空
    if (StringUtils.isEmpty(title) || StringUtils.isEmpty(content))
        return null;
    return kwc.computeArticleTfidf(title, content);
}
 
Example #12
Source File: TFIDF.java    From NewsRecommendSystem with MIT License 2 votes vote down vote up
/**
 * 
 * @param content 文本内容
 * @param keyNums 返回的关键词数目
 * @return
 */
public static List<Keyword> getTFIDE(String content,int keyNums)
{
	KeyWordComputer kwc = new KeyWordComputer(keyNums);
	return kwc.computeArticleTfidf(content);
}
 
Example #13
Source File: SummaryComputer.java    From deeplearning4j with Apache License 2.0 2 votes vote down vote up
/**
 * 计算摘要,利用关键词抽取计算
 * 
 * @return
 */
public Summary toSummary() {
    return toSummary(new ArrayList<Keyword>());
}