2014-12-21 20:25:43 +00:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
from os import path
|
2015-01-17 05:21:17 +00:00
|
|
|
import re
|
2014-12-21 20:25:43 +00:00
|
|
|
|
2015-01-11 23:26:22 +00:00
|
|
|
from .. import orth
|
2014-12-21 20:25:43 +00:00
|
|
|
from ..vocab import Vocab
|
|
|
|
from ..tokenizer import Tokenizer
|
|
|
|
from ..syntax.parser import GreedyParser
|
|
|
|
from ..tokens import Tokens
|
|
|
|
from .pos import EnPosTagger
|
2014-12-21 21:54:47 +00:00
|
|
|
from .pos import POS_TAGS
|
2014-12-21 20:25:43 +00:00
|
|
|
from .attrs import get_flags
|
|
|
|
|
|
|
|
|
2015-01-17 05:21:17 +00:00
|
|
|
from ..util import read_lang_data
|
|
|
|
|
|
|
|
|
2014-12-21 20:25:43 +00:00
|
|
|
def get_lex_props(string):
|
2015-01-14 13:33:16 +00:00
|
|
|
return {
|
|
|
|
'flags': get_flags(string),
|
|
|
|
'length': len(string),
|
2015-01-22 15:08:25 +00:00
|
|
|
'orth': string,
|
2015-01-23 19:17:03 +00:00
|
|
|
'lower': string.lower(),
|
|
|
|
'norm': string,
|
2015-01-14 13:33:16 +00:00
|
|
|
'shape': orth.word_shape(string),
|
|
|
|
'prefix': string[0],
|
|
|
|
'suffix': string[-3:],
|
|
|
|
'cluster': 0,
|
|
|
|
'prob': 0,
|
|
|
|
'sentiment': 0
|
|
|
|
}
|
|
|
|
|
2014-12-21 20:25:43 +00:00
|
|
|
|
2015-01-11 23:26:22 +00:00
|
|
|
LOCAL_DATA_DIR = path.join(path.dirname(__file__), 'data')
|
2014-12-21 20:25:43 +00:00
|
|
|
|
2015-01-26 15:45:21 +00:00
|
|
|
parse_if_model_present = -1
|
|
|
|
|
2015-01-14 13:33:16 +00:00
|
|
|
|
2014-12-21 20:25:43 +00:00
|
|
|
class English(object):
|
2014-12-27 07:45:16 +00:00
|
|
|
"""The English NLP pipeline.
|
|
|
|
|
|
|
|
Provides a tokenizer, lexicon, part-of-speech tagger and parser.
|
|
|
|
|
|
|
|
Keyword args:
|
|
|
|
data_dir (unicode): A path to a directory, from which to load the pipeline.
|
2015-01-31 05:38:27 +00:00
|
|
|
If empty string ('') --- the default --- it looks for a directory
|
|
|
|
named "data/" in the same directory as the present file, i.e.
|
|
|
|
|
|
|
|
>>> data_dir = path.join(path.dirname(__file__, 'data'))
|
2014-12-27 07:45:16 +00:00
|
|
|
|
2015-01-26 15:45:21 +00:00
|
|
|
If path.join(data_dir, 'pos') exists, the tagger is loaded from there.
|
2014-12-27 07:45:16 +00:00
|
|
|
|
2015-01-26 15:45:21 +00:00
|
|
|
If path.join(data_dir, 'deps') exists, the parser is loaded from there.
|
2015-01-31 05:38:27 +00:00
|
|
|
|
|
|
|
To prevent any data files from being loaded, pass data_dir=None. This
|
|
|
|
is useful if you want to construct a lexicon, which you'll then save
|
|
|
|
for later loading.
|
2014-12-27 07:45:16 +00:00
|
|
|
"""
|
2015-01-31 05:38:27 +00:00
|
|
|
def __init__(self, data_dir=''):
|
|
|
|
if data_dir == '':
|
|
|
|
data_dir = LOCAL_DATA_DIR
|
2014-12-30 12:25:09 +00:00
|
|
|
self._data_dir = data_dir
|
2015-01-13 13:03:48 +00:00
|
|
|
self.vocab = Vocab(data_dir=path.join(data_dir, 'vocab') if data_dir else None,
|
2015-01-02 14:59:22 +00:00
|
|
|
get_lex_props=get_lex_props)
|
2014-12-30 12:25:09 +00:00
|
|
|
tag_names = list(POS_TAGS.keys())
|
|
|
|
tag_names.sort()
|
2015-01-11 23:26:22 +00:00
|
|
|
if data_dir is None:
|
2015-01-17 05:21:17 +00:00
|
|
|
tok_rules = {}
|
|
|
|
prefix_re = None
|
|
|
|
suffix_re = None
|
|
|
|
infix_re = None
|
2015-01-31 05:38:27 +00:00
|
|
|
self.has_parser_model = False
|
|
|
|
self.has_tagger_model = False
|
2015-01-11 23:26:22 +00:00
|
|
|
else:
|
2015-01-17 05:21:17 +00:00
|
|
|
tok_data_dir = path.join(data_dir, 'tokenizer')
|
|
|
|
tok_rules, prefix_re, suffix_re, infix_re = read_lang_data(tok_data_dir)
|
2015-01-21 07:27:31 +00:00
|
|
|
prefix_re = re.compile(prefix_re)
|
|
|
|
suffix_re = re.compile(suffix_re)
|
|
|
|
infix_re = re.compile(infix_re)
|
2015-01-31 05:38:27 +00:00
|
|
|
self.has_parser_model = path.exists(path.join(self._data_dir, 'deps'))
|
|
|
|
self.has_tagger_model = path.exists(path.join(self._data_dir, 'pos'))
|
|
|
|
|
2015-01-21 07:27:31 +00:00
|
|
|
self.tokenizer = Tokenizer(self.vocab, tok_rules, prefix_re,
|
|
|
|
suffix_re, infix_re,
|
2015-01-17 05:21:17 +00:00
|
|
|
POS_TAGS, tag_names)
|
2015-01-31 05:38:27 +00:00
|
|
|
# These are lazy-loaded
|
2014-12-30 12:25:09 +00:00
|
|
|
self._tagger = None
|
|
|
|
self._parser = None
|
|
|
|
|
2015-01-25 03:47:38 +00:00
|
|
|
|
2014-12-30 12:25:09 +00:00
|
|
|
@property
|
|
|
|
def tagger(self):
|
|
|
|
if self._tagger is None:
|
|
|
|
self._tagger = EnPosTagger(self.vocab.strings, self._data_dir)
|
|
|
|
return self._tagger
|
|
|
|
|
|
|
|
@property
|
|
|
|
def parser(self):
|
|
|
|
if self._parser is None:
|
|
|
|
self._parser = GreedyParser(path.join(self._data_dir, 'deps'))
|
|
|
|
return self._parser
|
2014-12-21 20:25:43 +00:00
|
|
|
|
2015-01-26 15:45:21 +00:00
|
|
|
def __call__(self, text, tag=True, parse=parse_if_model_present):
|
|
|
|
"""Apply the pipeline to some text. The text can span multiple sentences,
|
|
|
|
and can contain arbtrary whitespace. Alignment into the original string
|
|
|
|
|
|
|
|
The tagger and parser are lazy-loaded the first time they are required.
|
|
|
|
Loading the parser model usually takes 5-10 seconds.
|
2014-12-27 07:45:16 +00:00
|
|
|
|
|
|
|
Args:
|
|
|
|
text (unicode): The text to be processed.
|
|
|
|
|
|
|
|
Keyword args:
|
2015-01-26 15:45:21 +00:00
|
|
|
tag (bool): Whether to add part-of-speech tags to the text. Also
|
|
|
|
sets morphological analysis and lemmas.
|
|
|
|
|
|
|
|
parse (True, False, -1): Whether to add labelled syntactic dependencies.
|
|
|
|
|
|
|
|
-1 (default) is "guess": It will guess True if tag=True and the
|
|
|
|
model has been installed.
|
2014-12-27 07:45:16 +00:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
tokens (spacy.tokens.Tokens):
|
2015-01-26 15:45:21 +00:00
|
|
|
|
|
|
|
>>> from spacy.en import English
|
|
|
|
>>> nlp = English()
|
|
|
|
>>> tokens = nlp('An example sentence. Another example sentence.')
|
|
|
|
>>> tokens[0].orth_, tokens[0].head.tag_
|
|
|
|
('An', 'NN')
|
2014-12-27 07:45:16 +00:00
|
|
|
"""
|
2015-01-26 15:45:21 +00:00
|
|
|
if parse == True and tag == False:
|
|
|
|
msg = ("Incompatible arguments: tag=False, parse=True"
|
|
|
|
"Part-of-speech tags are required for parsing.")
|
|
|
|
raise ValueError(msg)
|
2015-01-17 05:21:17 +00:00
|
|
|
tokens = self.tokenizer(text)
|
2015-01-26 15:45:21 +00:00
|
|
|
if parse == -1 and tag == False:
|
|
|
|
parse = False
|
|
|
|
elif parse == -1 and not self.has_parser_model:
|
|
|
|
parse = False
|
|
|
|
if tag and self.has_tagger_model:
|
2014-12-23 00:40:32 +00:00
|
|
|
self.tagger(tokens)
|
2015-01-26 15:45:21 +00:00
|
|
|
if parse == True and not self.has_parser_model:
|
|
|
|
msg = ("Receive parse=True, but parser model not found.\n\n"
|
|
|
|
"Run:\n"
|
|
|
|
"$ python -m spacy.en.download\n"
|
|
|
|
"To install the model.")
|
|
|
|
raise IOError(msg)
|
2015-01-25 03:47:38 +00:00
|
|
|
if parse and self.has_parser_model:
|
2015-01-17 05:21:17 +00:00
|
|
|
self.parser(tokens)
|
2014-12-21 20:25:43 +00:00
|
|
|
return tokens
|
2014-12-24 06:42:00 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def tags(self):
|
2014-12-27 07:45:16 +00:00
|
|
|
"""List of part-of-speech tag names."""
|
2014-12-31 08:40:59 +00:00
|
|
|
return self.tagger.tag_names
|