spaCy/website/docs/_quickstart-examples.jade

177 lines
5.7 KiB
Plaintext
Raw Normal View History

2016-10-03 18:19:13 +00:00
//- ----------------------------------
//- 💫 DOCS > QUICKSTART > USAGE EXAMPLES
//- ----------------------------------
2016-03-31 14:24:48 +00:00
2016-10-03 18:19:13 +00:00
+section("examples")
+h(2, "examples").
2016-03-31 14:24:48 +00:00
Usage Examples
2016-10-03 18:19:13 +00:00
+h(3, "examples-resources") Load resources and process text
2016-03-31 14:24:48 +00:00
+code.
2016-10-03 18:19:13 +00:00
import spacy
en_nlp = spacy.load('en')
en_doc = en_nlp(u'Hello, world. Here are two sentences.')
de_doc = de_nlp(u'ich bin ein Berliner.')
2016-03-31 14:24:48 +00:00
2016-10-03 18:19:13 +00:00
+h(3, "multi-threaded") Multi-threaded generator (using OpenMP. No GIL!)
2016-03-31 14:24:48 +00:00
+code.
texts = [u'One document.', u'...', u'Lots of documents']
# .pipe streams input, and produces streaming output
iter_texts = (texts[i % 3] for i in xrange(100000000))
for i, doc in enumerate(nlp.pipe(iter_texts, batch_size=50, n_threads=4)):
assert doc.is_parsed
if i == 100:
break
2016-10-03 18:19:13 +00:00
+h(3, "examples-tokens-sentences") Get tokens and sentences
2016-03-31 14:24:48 +00:00
+code.
token = doc[0]
sentence = next(doc.sents)
assert token is sentence[0]
assert sentence.text == 'Hello, world.'
2016-10-03 18:19:13 +00:00
+h(3, "examples-integer-ids") Use integer IDs for any string
2016-03-31 14:24:48 +00:00
+code.
hello_id = nlp.vocab.strings['Hello']
hello_str = nlp.vocab.strings[hello_id]
assert token.orth == hello_id == 3125
assert token.orth_ == hello_str == 'Hello'
2016-10-03 18:19:13 +00:00
+h(3, "examples-string-views-flags") Get and set string views and flags
2016-03-31 14:24:48 +00:00
+code.
assert token.shape_ == 'Xxxxx'
for lexeme in nlp.vocab:
if lexeme.is_alpha:
2016-10-03 18:19:13 +00:00
lexeme.shape_ = 'W'
2016-03-31 14:24:48 +00:00
elif lexeme.is_digit:
2016-10-03 18:19:13 +00:00
lexeme.shape_ = 'D'
2016-03-31 14:24:48 +00:00
elif lexeme.is_punct:
2016-10-03 18:19:13 +00:00
lexeme.shape_ = 'P'
2016-03-31 14:24:48 +00:00
else:
2016-10-03 18:19:13 +00:00
lexeme.shape_ = 'M'
2016-03-31 14:24:48 +00:00
assert token.shape_ == 'W'
2016-10-03 18:19:13 +00:00
+h(3, "examples-numpy-arrays") Export to numpy arrays
2016-03-31 14:24:48 +00:00
+code.
from spacy.attrs import ORTH, LIKE_URL, IS_OOV
attr_ids = [ORTH, LIKE_URL, IS_OOV]
doc_array = doc.to_array(attr_ids)
assert doc_array.shape == (len(doc), len(attr_ids))
assert doc[0].orth == doc_array[0, 0]
assert doc[1].orth == doc_array[1, 0]
assert doc[0].like_url == doc_array[0, 1]
assert list(doc_array[:, 1]) == [t.like_url for t in doc]
2016-10-03 18:19:13 +00:00
+h(3, "examples-word-vectors") Word vectors
2016-03-31 14:24:48 +00:00
+code.
doc = nlp("Apples and oranges are similar. Boots and hippos aren't.")
apples = doc[0]
oranges = doc[2]
boots = doc[6]
hippos = doc[8]
assert apples.similarity(oranges) > boots.similarity(hippos)
2016-10-03 18:19:13 +00:00
+h(3, "examples-pos-tags") Part-of-speech tags
2016-03-31 14:24:48 +00:00
+code.
from spacy.parts_of_speech import ADV
def is_adverb(token):
return token.pos == spacy.parts_of_speech.ADV
# These are data-specific, so no constants are provided. You have to look
# up the IDs from the StringStore.
NNS = nlp.vocab.strings['NNS']
NNPS = nlp.vocab.strings['NNPS']
def is_plural_noun(token):
return token.tag == NNS or token.tag == NNPS
def print_coarse_pos(token):
print(token.pos_)
def print_fine_pos(token):
print(token.tag_)
2016-10-03 18:19:13 +00:00
+h(3, "examples-dependencies") Syntactic dependencies
2016-03-31 14:24:48 +00:00
+code.
def dependency_labels_to_root(token):
'''Walk up the syntactic tree, collecting the arc labels.'''
dep_labels = []
while token.head is not token:
dep_labels.append(token.dep)
token = token.head
return dep_labels
2016-10-03 18:19:13 +00:00
+h(3, "examples-entities") Named entities
2016-03-31 14:24:48 +00:00
+code.
def iter_products(docs):
for doc in docs:
for ent in doc.ents:
if ent.label_ == 'PRODUCT':
yield ent
def word_is_in_entity(word):
return word.ent_type != 0
def count_parent_verb_by_person(docs):
counts = defaultdict(defaultdict(int))
for doc in docs:
for ent in doc.ents:
if ent.label_ == 'PERSON' and ent.root.head.pos == VERB:
counts[ent.orth_][ent.root.head.lemma_] += 1
return counts
2016-10-03 18:19:13 +00:00
+h(3, "examples-inline") Calculate inline mark-up on original string
2016-03-31 14:24:48 +00:00
+code.
def put_spans_around_tokens(doc, get_classes):
'''Given some function to compute class names, put each token in a
span element, with the appropriate classes computed.
All whitespace is preserved, outside of the spans. (Yes, I know HTML
won't display it. But the point is no information is lost, so you can
calculate what you need, e.g. <br /> tags, <p> tags, etc.)
'''
output = []
template = '<span classes="{classes}">{word}</span>{space}'
for token in doc:
if token.is_space:
output.append(token.orth_)
else:
output.append(
template.format(
classes=' '.join(get_classes(token)),
word=token.orth_,
space=token.whitespace_))
string = ''.join(output)
string = string.replace('\n', '')
string = string.replace('\t', ' ')
return string
2016-10-03 18:19:13 +00:00
+h(3, "examples-binary") Efficient binary serialization
2016-03-31 14:24:48 +00:00
+code.
2016-10-03 18:19:13 +00:00
import spacy
2016-03-31 14:24:48 +00:00
from spacy.tokens.doc import Doc
byte_string = doc.to_bytes()
open('moby_dick.bin', 'wb').write(byte_string)
2016-10-03 18:19:13 +00:00
nlp = spacy.load('en')
2016-03-31 14:24:48 +00:00
for byte_string in Doc.read_bytes(open('moby_dick.bin', 'rb')):
doc = Doc(nlp.vocab)
doc.from_bytes(byte_string)