From 7745b3ae04d013a127eb12ac764066e88a863095 Mon Sep 17 00:00:00 2001 From: Tpt Date: Mon, 12 Jun 2017 15:29:58 +0200 Subject: [PATCH] Adds noun chunks to French syntax iterators --- spacy/lang/fr/__init__.py | 2 ++ spacy/lang/fr/syntax_iterators.py | 42 +++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 spacy/lang/fr/syntax_iterators.py diff --git a/spacy/lang/fr/__init__.py b/spacy/lang/fr/__init__.py index e8c13777f..a243b6268 100644 --- a/spacy/lang/fr/__init__.py +++ b/spacy/lang/fr/__init__.py @@ -5,6 +5,7 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS, TOKEN_MATCH from .punctuation import TOKENIZER_SUFFIXES, TOKENIZER_INFIXES from .stop_words import STOP_WORDS from .lemmatizer import LOOKUP +from .syntax_iterators import SYNTAX_ITERATORS from ..tokenizer_exceptions import BASE_EXCEPTIONS from ..norm_exceptions import BASE_NORMS @@ -24,6 +25,7 @@ class FrenchDefaults(Language.Defaults): infixes = tuple(TOKENIZER_INFIXES) suffixes = tuple(TOKENIZER_SUFFIXES) token_match = TOKEN_MATCH + syntax_iterators = dict(SYNTAX_ITERATORS) @classmethod def create_lemmatizer(cls, nlp=None): diff --git a/spacy/lang/fr/syntax_iterators.py b/spacy/lang/fr/syntax_iterators.py new file mode 100644 index 000000000..c9de4f084 --- /dev/null +++ b/spacy/lang/fr/syntax_iterators.py @@ -0,0 +1,42 @@ +# coding: utf8 +from __future__ import unicode_literals + +from ...symbols import NOUN, PROPN, PRON + + +def noun_chunks(obj): + """ + Detect base noun phrases from a dependency parse. Works on both Doc and Span. + """ + labels = ['nsubj', 'nsubj:pass', 'obj', 'iobj', 'ROOT', 'appos', 'nmod', 'nmod:poss'] + doc = obj.doc # Ensure works on both Doc and Span. + np_deps = [doc.vocab.strings[label] for label in labels] + conj = doc.vocab.strings.add('conj') + np_label = doc.vocab.strings.add('NP') + seen = set() + for i, word in enumerate(obj): + if word.pos not in (NOUN, PROPN, PRON): + continue + # Prevent nested chunks from being produced + if word.i in seen: + continue + if word.dep in np_deps: + if any(w.i in seen for w in word.subtree): + continue + seen.update(j for j in range(word.left_edge.i, word.right_edge.i+1)) + yield word.left_edge.i, word.right_edge.i+1, np_label + elif word.dep == conj: + head = word.head + while head.dep == conj and head.head.i < head.i: + head = head.head + # If the head is an NP, and we're coordinated to it, we're an NP + if head.dep in np_deps: + if any(w.i in seen for w in word.subtree): + continue + seen.update(j for j in range(word.left_edge.i, word.right_edge.i+1)) + yield word.left_edge.i, word.right_edge.i+1, np_label + + +SYNTAX_ITERATORS = { + 'noun_chunks': noun_chunks +}