imaginaryfriend/src/tokenizer.py

56 lines
1.8 KiB
Python
Raw Normal View History

2016-12-09 19:37:55 +01:00
import re
from src.utils import random_element
from src.config import config
class Tokenizer:
def __init__(self):
2017-08-18 02:48:09 +02:00
self.chain_len = config.getint('grammar', 'chain_len')
2016-12-09 19:37:55 +01:00
self.stop_word = config['grammar']['stop_word']
2017-08-18 02:48:09 +02:00
self.endsent = config['grammar']['endsent']
self.garbage = config['grammar']['garbage']
2016-12-09 19:37:55 +01:00
def split_to_trigrams(self, src_words):
2017-08-18 02:48:09 +02:00
if len(src_words) <= self.chain_len:
2016-12-09 19:37:55 +01:00
yield from ()
words = [self.stop_word]
2017-08-18 02:48:09 +02:00
for word in src_words:
words.append(word)
2017-08-18 02:48:09 +02:00
if word[-1] in self.endsent:
words.append(self.stop_word)
if words[-1] != self.stop_word:
words.append(self.stop_word)
2017-08-18 02:48:09 +02:00
for i in range(len(words) - self.chain_len):
j = i + self.chain_len + 1
yield words[i : j]
2016-12-09 19:37:55 +01:00
def extract_words(self, message):
symbols = list(re.sub('\s', ' ', message.text))
for entity in message.entities:
2017-08-18 02:48:09 +02:00
# TODO: explain the code
# TODO: validate the formula
symbols[entity.offset : (entity.length+entity.offset)] = ' ' * entity.length
2016-12-09 19:37:55 +01:00
return list(filter(None, map(self.__prettify, ''.join(symbols).split(' '))))
def random_end_sentence_token(self):
2017-08-18 02:48:09 +02:00
return random_element(list(self.endsent))
2016-12-09 19:37:55 +01:00
def __prettify(self, word):
lowercase_word = word.lower().strip()
last_symbol = lowercase_word[-1:]
2017-08-18 02:48:09 +02:00
if last_symbol not in self.endsent:
2016-12-09 19:37:55 +01:00
last_symbol = ''
pretty_word = lowercase_word.strip(self.garbage_tokens)
if pretty_word != '' and len(pretty_word) > 2:
return pretty_word + last_symbol
elif lowercase_word in self.garbage_tokens:
return None
return lowercase_word