Python nltk.RegexpTokenizer() Examples

The following are code examples for showing how to use nltk.RegexpTokenizer(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: question_answering   Author: joswinkj   File: Tokenizers.py    Apache License 2.0 6 votes vote down vote up
def __call__(self, doc ,string_tokenize='[a-zA-Z0-9]+'):
    	from nltk.tokenize import RegexpTokenizer 
    	from nltk.corpus import stopwords
    	from nltk.corpus import wordnet as wn
    	#tokenizer = RegexpTokenizer(r'\w+')
    	tokenizer = RegexpTokenizer(string_tokenize)
        #words=[self.wnl.lemmatize(t) for t in word_tokenize(doc)]
        words=[self.wnl.lemmatize(t) for t in tokenizer.tokenize(doc)]
        mystops=(u'youtube',u'mine',u'this',u'that')
        stop_words=set(stopwords.words('english'))
        stop_words.update(mystops)
        stop_words=list(stop_words)
        words1= [i.lower() for i in words if i not in stop_words]
        words2= list(set(list({l.name() for word in words1 for s in wn.synsets(word) for l in s.lemmas()})+words1))
        
        return [i.lower() for i in words2 if i not in stop_words] 
Example 2
Project: question_answering   Author: joswinkj   File: Tokenizers.py    Apache License 2.0 6 votes vote down vote up
def __call__(self, doc ,string_tokenize='[a-zA-Z0-9]+'):
    	from nltk.tokenize import RegexpTokenizer
    	from nltk.corpus import stopwords
    	from nltk.corpus import wordnet as wn
    	#tokenizer = RegexpTokenizer(r'\w+')
    	tokenizer = RegexpTokenizer(string_tokenize)
        #words=[self.wnl.lemmatize(t) for t in word_tokenize(doc)]
        words=[self.wnl.lemmatize(t) for t in tokenizer.tokenize(doc)]
        mystops=(u'youtube',u'mine',u'this',u'that')
        stop_words=set(stopwords.words('english'))
        stop_words.update(mystops)
        stop_words=list(stop_words)
        words1= [i.lower() for i in words if i not in stop_words]
        words2= list(set(list({l.name() for word in words1 for s in wn.synsets(word) for l in s.lemmas()})+words1))
        words3=list(set([self.snowball_stemmer.stem(t) for t in words2]))
        return [i.lower() for i in words3 if i not in stop_words] 
Example 3
Project: sparv-pipeline   Author: spraakbanken   File: segment.py    MIT License 6 votes vote down vote up
def span_tokenize(self, s):
        result = []
        spans = nltk.RegexpTokenizer.span_tokenize(self, s)
        first = True
        temp = [0, 0]

        for start, _ in spans:
            if not first:
                temp[1] = start
                result.append(tuple(temp))
            temp[0] = start
            first = False

        temp[1] = len(s)
        result.append(tuple(temp))

        return result 
Example 4
Project: ConvNetPy   Author: benglard   File: next_word.py    MIT License 6 votes vote down vote up
def load_data():
    global N, words

    raw = list(word 
            for fileid in corpus.fileids()
            for word in corpus.words(fileid))
    words = list(token for token in RegexpTokenizer('\w+').tokenize(' '.join(raw)))[100:1000]
    tokens = set(words)
    tokens_l = list(tokens)
    N = len(tokens)
    print 'Corpus size: {} words'.format(N)

    step = 4
    data = []
    for gram in ngrams(words, step):
        w1, w2, w3, pred = gram
        V = Vol(1, 1, N, 0.0)
        V.w[tokens_l.index(w1)] = 1
        V.w[tokens_l.index(w2)] = 1
        V.w[tokens_l.index(w3)] = 1
        label = tokens_l.index(pred)
        data.append((V, label))

    return data 
Example 5
Project: ConvNetPy   Author: benglard   File: similarity.py    MIT License 6 votes vote down vote up
def test(): 
    gt = GetTweets()
    documents = gt.get_hashtag('ferguson', count=20)
    documents += gt.get_hashtag('police', count=21)
    print 'Query:', documents[-1]

    tokenizer = RegexpTokenizer('\w+')
    vols = []
    for doc in documents:
        samples = []
        for token in tokenizer.tokenize(doc):
            word = token.lower()
            if word not in ENGLISH_STOP_WORDS and word not in punctuation:
                samples.append(word)
        vols.append(volumize(FreqDist(samples)))

    vectors = [ doc_code(v) for v in vols[:-1] ]
    query_vec = doc_code(vols[-1])

    sims = [ cos(v, query_vec) for v in vectors ]
    m = max(sims)
    print m, documents[sims.index(m)] 
Example 6
Project: convai-bot-1337   Author: sld   File: tokenizing.py    GNU General Public License v3.0 6 votes vote down vote up
def convert_to_vw(text):
    tokenizer = nltk.RegexpTokenizer(r'\w+')
    lmtzr = WordNetLemmatizer()
    tokens = [t.lower() for t in tokenizer.tokenize(text)]
    id_ = 13371337
    processed = []
    for t in tokens:
        l = lmtzr.lemmatize(t)
        processed.append(l)
    counted = Counter(processed)
    res_str = str(id_)
    for k, v in counted.items():
        if v != 1:
            res_str = res_str + " {}:{}".format(k, v)
        else:
            res_str = res_str + " {}".format(k)
    return res_str 
Example 7
Project: ConvLab   Author: ConvLab   File: utils.py    MIT License 5 votes vote down vote up
def get_tokenize():
    return RegexpTokenizer(r'\w+|#\w+|<\w+>|%\w+|[^\w\s]+').tokenize 
Example 8
Project: question_answering   Author: joswinkj   File: Tokenizers.py    Apache License 2.0 5 votes vote down vote up
def __call__(self, doc ):
    	from nltk.tokenize import RegexpTokenizer 
    	from nltk.corpus import stopwords
    	#tokenizer = RegexpTokenizer(r'\w+')
    	tokenizer = RegexpTokenizer(r'[a-zA-Z]+')
        #words=[self.wnl.lemmatize(t) for t in word_tokenize(doc)]
        words=[self.wnl.lemmatize(t) for t in tokenizer.tokenize(doc)]
        mystops=(u'youtube',u'mine',u'this',u'that','facebook','com','google','www','http','https')
        stop_words=set(stopwords.words('english'))
        stop_words.update(mystops)
        
        stop_words=list(stop_words)
        return [i.lower() for i in words if i not in stop_words] 
Example 9
Project: question_answering   Author: joswinkj   File: Tokenizers.py    Apache License 2.0 5 votes vote down vote up
def __call__(self, doc ):
        snowball_stemmer = SnowballStemmer('english')
    	#tokenizer = RegexpTokenizer(r'\w+')
        #words=[self.wnl.lemmatize(t) for t in word_tokenize(doc)]
        words=[snowball_stemmer.stem(t) for t in word_tokenize(doc)]
        stop_words=set(stopwords.words('english'))
        stop_words.update(self.mystops)
        stop_words=list(stop_words)
        return [i.lower() for i in words if i not in stop_words] 
Example 10
Project: dialogue-models   Author: siat-nlp   File: field.py    Apache License 2.0 5 votes vote down vote up
def tokenize(s):
    """
    tokenize
    """
    #s = re.sub('\d+', NUM, s).lower()
    # tokens = nltk.RegexpTokenizer(r'\w+|<sil>|[^\w\s]+').tokenize(s)
    tokens = s.split(' ')
    return tokens 
Example 11
Project: dialogue-models   Author: siat-nlp   File: field.py    Apache License 2.0 5 votes vote down vote up
def tokenize(s):
    """
    tokenize
    """
    #s = re.sub('\d+', NUM, s).lower()
    # tokens = nltk.RegexpTokenizer(r'\w+|<sil>|[^\w\s]+').tokenize(s)
    tokens = s.split(' ')
    return tokens 
Example 12
Project: practicalDataAnalysisCookbook   Author: drabastomek   File: nlp_pos_alternative.py    GNU General Public License v2.0 5 votes vote down vote up
def preprocess_data(text):
    global sentences, tokenized
    tokenizer = nltk.RegexpTokenizer(r'\w+')

    sentences =  nltk.sent_tokenize(text)
    tokenized = [tokenizer.tokenize(s) for s in sentences]

# import the data 
Example 13
Project: practicalDataAnalysisCookbook   Author: drabastomek   File: nlp_countWords.py    GNU General Public License v2.0 5 votes vote down vote up
def preprocess_data(text):
    global sentences, tokenized
    tokenizer = nltk.RegexpTokenizer(r'\w+')

    sentences =  nltk.sent_tokenize(text)
    tokenized = [tokenizer.tokenize(s) for s in sentences]

# import the data 
Example 14
Project: practicalDataAnalysisCookbook   Author: drabastomek   File: nlp_pos.py    GNU General Public License v2.0 5 votes vote down vote up
def preprocess_data(text):
    global sentences, tokenized
    tokenizer = nltk.RegexpTokenizer(r'\w+')

    sentences =  nltk.sent_tokenize(text)
    tokenized = [tokenizer.tokenize(s) for s in sentences]

# import the data 
Example 15
Project: TransformerGen   Author: anurag1paul   File: data_loader.py    GNU General Public License v3.0 5 votes vote down vote up
def tokenize(self, caption):
        cap = caption.replace(u"\ufffd\ufffd", u" ")
        tokenizer = RegexpTokenizer(r'\w+')
        tokens = tokenizer.tokenize(cap.lower())

        tokens_new = []
        for t in tokens:
            t = t.encode('ascii', 'ignore').decode('ascii')
            if len(t) > 0:
                if t in self.word_to_idx:
                    tokens_new.append(self.word_to_idx[t])

        return tokens_new 
Example 16
Project: TransformerGen   Author: anurag1paul   File: data_loader.py    GNU General Public License v3.0 5 votes vote down vote up
def tokenize(self, caption):
        cap = caption.replace(u"\ufffd\ufffd", u" ")
        tokenizer = RegexpTokenizer(r'\w+')
        tokens = tokenizer.tokenize(cap.lower())

        tokens_new = []
        for t in tokens:
            t = t.encode('ascii', 'ignore').decode('ascii')
            if len(t) > 0:
                if t in self.word_to_idx:
                    tokens_new.append(self.word_to_idx[t])

        return tokens_new 
Example 17
Project: sparv-pipeline   Author: spraakbanken   File: segment.py    MIT License 5 votes vote down vote up
def __init__(self):
        nltk.RegexpTokenizer.__init__(self, r'\s*\n\s*', gaps=True) 
Example 18
Project: sparv-pipeline   Author: spraakbanken   File: segment.py    MIT License 5 votes vote down vote up
def __init__(self):
        nltk.RegexpTokenizer.__init__(self, r"[\.!\?]\s*", gaps=True) 
Example 19
Project: Topic_Disc   Author: zengjichuan   File: utils.py    MIT License 5 votes vote down vote up
def get_tokenize():
    return nltk.RegexpTokenizer(r'\w+|#\w+|<\w+>|%\w+|[^\w\s]+').tokenize 
Example 20
Project: Topic_Disc   Author: zengjichuan   File: utils.py    MIT License 5 votes vote down vote up
def get_chat_tokenize():

    return nltk.RegexpTokenizer(u'\w+|:d|:p|<sil>|<men>|<hash>|<url>|'
                                u'[\U0001f600-\U0001f64f\U0001f300-\U0001f5ff\U0001f680-\U0001f6ff]|'
                                u'[^\w\s]+').tokenize 
Example 21
Project: sklearn-doc2vec   Author: fanta-mnix   File: word_embeddings.py    MIT License 5 votes vote down vote up
def __init__(self, corpus, tokenizer=nltk.RegexpTokenizer(r'(?u)\b(?:\d+?(?:[\.\-/_:,]\d+)*|\w\w+)\b')):
        self.corpus = corpus
        self.tokenizer = tokenizer
        self.transformer = custom_transformer
        self.documents = None 
Example 22
Project: NeuralDialog-LaRL   Author: snakeztc   File: utils.py    Apache License 2.0 5 votes vote down vote up
def get_tokenize():
    return RegexpTokenizer(r'\w+|#\w+|<\w+>|%\w+|[^\w\s]+').tokenize 
Example 23
Project: ConvNetPy   Author: benglard   File: topics.py    MIT License 5 votes vote down vote up
def test():
    global N, words, network

    print 'In testing.'

    gettysburg = """Four score and seven years ago our fathers brought forth on this continent, a new nation, conceived in Liberty, and dedicated to the proposition that all men are created equal. Now we are engaged in a great civil war, testing whether that nation, or any nation so conceived and so dedicated, can long endure. We are met on a great battle-field of that war. We have come to dedicate a portion of that field, as a final resting place for those who here gave their lives that that nation might live. It is altogether fitting and proper that we should do this. But, in a larger sense, we can not dedicate -- we can not consecrate -- we can not hallow -- this ground. The brave men, living and dead, who struggled here, have consecrated it, far above our poor power to add or detract. The world will little note, nor long remember what we say here, but it can never forget what they did here. It is for us the living, rather, to be dedicated here to the unfinished work which they who fought here have thus far so nobly advanced. It is rather for us to be here dedicated to the great task remaining before us -- that from these honored dead we take increased devotion to that cause for which they gave the last full measure of devotion -- that we here highly resolve that these dead shall not have died in vain -- that this nation, under God, shall have a new birth of freedom -- and that government of the people, by the people, for the people, shall not perish from the earth."""
    tokenizer = RegexpTokenizer('\w+')
    gettysburg_tokens = tokenizer.tokenize(gettysburg) 

    samples = []
    for token in gettysburg_tokens:
        word = token.lower()
        if word not in ENGLISH_STOP_WORDS and word not in punctuation:
            samples.append(word)

    dist = FreqDist(samples)
    V = Vol(1, 1, N, 0.0)
    for i, word in enumerate(words):
        V.w[i] = dist.freq(word)

    pred = network.forward(V).w
    topics = []
    while len(topics) != 5:
        max_act = max(pred)
        topic_idx = pred.index(max_act)
        topic = words[topic_idx]

        if topic in gettysburg_tokens:
            topics.append(topic)
    
        del pred[topic_idx]

    print 'Topics of the Gettysburg Address:'
    print topics 
Example 24
Project: NeuralDialog-ZSDG   Author: snakeztc   File: utils.py    Apache License 2.0 5 votes vote down vote up
def get_tokenize():
    return nltk.RegexpTokenizer(r'\w+|#\w+|<\w+>|%\w+|[^\w\s]+').tokenize 
Example 25
Project: NeuralDialog-ZSDG   Author: snakeztc   File: utils.py    Apache License 2.0 5 votes vote down vote up
def get_chat_tokenize():
    return nltk.RegexpTokenizer(r'\w+|<sil>|[^\w\s]+').tokenize