From d10993f41a2bb306cf7df90194025af3ad091bb3 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Thu, 21 Aug 2014 16:37:13 +0200 Subject: [PATCH] * More docs work --- spacy/lexeme.pxd | 1 + spacy/lexeme.pyx | 86 +++++++++++++++++++------------------ spacy/orthography/latin.pyx | 16 +++++-- spacy/util.py | 11 +++-- 4 files changed, 65 insertions(+), 49 deletions(-) diff --git a/spacy/lexeme.pxd b/spacy/lexeme.pxd index 91f5b0884..f6836af56 100644 --- a/spacy/lexeme.pxd +++ b/spacy/lexeme.pxd @@ -22,6 +22,7 @@ cdef struct Lexeme: StringHash* string_views +cpdef StringHash lex_of(LexID lex_id) except 0 cpdef char first_of(LexID lex_id) except 0 cpdef size_t length_of(LexID lex_id) except 0 cpdef double prob_of(LexID lex_id) except 0 diff --git a/spacy/lexeme.pyx b/spacy/lexeme.pyx index 78c98d045..1aa908043 100644 --- a/spacy/lexeme.pyx +++ b/spacy/lexeme.pyx @@ -29,26 +29,21 @@ cpdef StringHash view_of(LexID lex_id, size_t view) except 0: return (lex_id).string_views[view] -cpdef StringHash lex_of(size_t lex_id) except 0: - '''Access the `lex' field of the Lexeme pointed to by lex_id. +cpdef StringHash lex_of(LexID lex_id) except 0: + '''Access a hash of the word's string. - The lex field is the hash of the string you would expect to get back from - a standard tokenizer, i.e. the word with punctuation and other non-whitespace - delimited tokens split off. The other fields refer to properties of the - string that the lex field stores a hash of, except sic and tail. - - >>> from spacy import en - >>> [en.unhash(lex_of(lex_id) for lex_id in en.tokenize(u'Hi! world')] - [u'Hi', u'!', u'world'] + >>> lex_of(lookup(u'Hi')) == hash(u'Hi') + True ''' return (lex_id).lex cpdef ClusterID cluster_of(LexID lex_id) except 0: - '''Access the `cluster' field of the Lexeme pointed to by lex_id, which - gives an integer representation of the cluster ID of the word, - which should be understood as a binary address: + '''Access an integer representation of the word's Brown cluster. + A Brown cluster is an address into a binary tree, which gives some (noisy) + information about the word's distributional context. + >>> strings = (u'pineapple', u'apple', u'dapple', u'scalable') >>> token_ids = [lookup(s) for s in strings] >>> clusters = [cluster_of(t) for t in token_ids] @@ -64,29 +59,28 @@ cpdef ClusterID cluster_of(LexID lex_id) except 0: cpdef char first_of(size_t lex_id) except 0: - '''Access the `first' field of the Lexeme pointed to by lex_id, which - stores the first character of the lex string of the word. + '''Access the first byte of a utf8 encoding of the word. >>> lex_id = lookup(u'Hello') - >>> unhash(first_of(lex_id)) - u'H' + >>> chr(first_of(lex_id)) + 'H' ''' return (lex_id).string[0] cpdef size_t length_of(size_t lex_id) except 0: - '''Access the `length' field of the Lexeme pointed to by lex_id, which stores - the length of the string hashed by lex_of.''' + '''Access the (unicode) length of the word. + ''' cdef Lexeme* word = lex_id return word.length cpdef double prob_of(size_t lex_id) except 0: - '''Access the `prob' field of the Lexeme pointed to by lex_id, which stores - the smoothed unigram log probability of the word, as estimated from a large - text corpus. By default, probabilities are based on counts from Gigaword, - smoothed using Knesser-Ney; but any probabilities file can be supplied to - load_probs. + '''Access an estimate of the word's unigram log probability. + + Probabilities are calculated from a large text corpus, and smoothed using + simple Good-Turing. Estimates are read from data/en/probabilities, and + can be replaced using spacy.en.load_probabilities. >>> prob_of(lookup(u'world')) -20.10340371976182 @@ -97,31 +91,39 @@ DEF OFT_UPPER = 1 DEF OFT_TITLE = 2 cpdef bint is_oft_upper(size_t lex_id): - '''Access the `oft_upper' field of the Lexeme pointed to by lex_id, which - stores whether the lowered version of the string hashed by `lex' is found - in all-upper case frequently in a large sample of text. Users are free - to load different data, by default we use a sample from Wikipedia, with - a threshold of 0.95, picked to maximize mutual information for POS tagging. - - >>> is_oft_upper(lookup(u'abc')) - True - >>> is_oft_upper(lookup(u'aBc')) # This must get the same answer + '''Check the OFT_UPPER distributional flag for the word. + + The OFT_UPPER flag records whether a lower-cased version of the word + is found in all-upper case frequently in a large sample of text, where + "frequently" is defined as P >= 0.95 (chosen for high mutual information for + POS tagging). + + Case statistics are estimated from a large text corpus. Estimates are read + from data/en/case_stats, and can be replaced using spacy.en.load_case_stats. + + >>> is_oft_upper(lookup(u'nato')) True + >>> is_oft_upper(lookup(u'the')) + False ''' return (lex_id).dist_flags & (1 << OFT_UPPER) cpdef bint is_oft_title(size_t lex_id): - '''Access the `oft_upper' field of the Lexeme pointed to by lex_id, which - stores whether the lowered version of the string hashed by `lex' is found - title-cased frequently in a large sample of text. Users are free - to load different data, by default we use a sample from Wikipedia, with - a threshold of 0.3, picked to maximize mutual information for POS tagging. - - >>> is_oft_title(lookup(u'marcus')) - True - >>> is_oft_title(lookup(u'MARCUS')) # This must get the same value + '''Check the OFT_TITLE distributional flag for the word. + + The OFT_TITLE flag records whether a lower-cased version of the word + is found title-cased (see string.istitle) frequently in a large sample of text, + where "frequently" is defined as P >= 0.3 (chosen for high mutual information for + POS tagging). + + Case statistics are estimated from a large text corpus. Estimates are read + from data/en/case_stats, and can be replaced using spacy.en.load_case_stats. + + >>> is_oft_upper(lookup(u'john')) True + >>> is_oft_upper(lookup(u'Bill')) + False ''' return (lex_id).dist_flags & (1 << OFT_TITLE) diff --git a/spacy/orthography/latin.pyx b/spacy/orthography/latin.pyx index 684eb4c2e..328e73d8c 100644 --- a/spacy/orthography/latin.pyx +++ b/spacy/orthography/latin.pyx @@ -141,7 +141,11 @@ cpdef bint is_ascii(LexID lex_id) except *: cpdef StringHash norm_of(LexID lex_id) except 0: - """Return the hash of a normalized version of the string. + """Return the hash of a "normalized" version of the string. + + Normalized strings are intended to be less sparse, while still capturing + important lexical information. See spacy.latin.orthography.normalize_string for details of the normalization + function. >>> unhash(norm_of(lookupu'Hi')) u'hi' @@ -154,7 +158,11 @@ cpdef StringHash norm_of(LexID lex_id) except 0: cpdef StringHash shape_of(LexID lex_id) except 0: - """Return the hash of the string shape. + """Return the hash of a string describing the word's "orthograpgic shape". + + Orthographic shapes are calculated by the spacy.orthography.latin.string_shape + function. Word shape features have been found useful for NER and POS tagging, + e.g. Manning (2011) >>> unhash(shape_of(lookupu'Hi')) u'Xx' @@ -168,8 +176,8 @@ cpdef StringHash shape_of(LexID lex_id) except 0: cpdef StringHash last3_of(LexID lex_id) except 0: - '''Access the `last3' field of the Lexeme pointed to by lex_id, which stores - the hash of the last three characters of the word: + '''Return the hash of string[-3:], i.e. the last three characters of the word. + >>> lex_ids = [lookup(w) for w in (u'Hello', u'!')] >>> [unhash(last3_of(lex_id)) for lex_id in lex_ids] [u'llo', u'!'] diff --git a/spacy/util.py b/spacy/util.py index c9bce0171..0e1d237a6 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -1,6 +1,7 @@ import os from os import path import codecs +import json DATA_DIR = path.join(path.dirname(__file__), '..', 'data') @@ -19,9 +20,13 @@ def load_case_stats(data_dir): return case_stats -def load_dist_info(lang): - with path.join(DATA_DIR, lang, 'distribution_info.json') as file_: - dist_info = json.load(file_) +def read_dist_info(lang): + dist_path = path.join(DATA_DIR, lang, 'distribution_info.json') + if path.exists(dist_path): + with open(dist_path) as file_: + dist_info = json.load(file_) + else: + dist_info = {} return dist_info