Modernise and merge lexeme vocab tests

This commit is contained in:
Ines Montani 2017-01-12 15:51:20 +01:00
parent c3d4516fc2
commit 8e3e58a7e6
2 changed files with 46 additions and 58 deletions

View File

@ -1,28 +1,58 @@
# coding: utf-8
from __future__ import unicode_literals
from ...attrs import *
import pytest
from spacy.attrs import *
@pytest.mark.parametrize('text1,prob1,text2,prob2', [("NOUN", -1, "opera", -2)])
def test_vocab_lexeme_lt(en_vocab, text1, text2, prob1, prob2):
"""More frequent is l.t. less frequent"""
lex1 = en_vocab[text1]
lex1.prob = prob1
lex2 = en_vocab[text2]
lex2.prob = prob2
assert lex1 < lex2
assert lex2 > lex1
@pytest.mark.parametrize('text1,text2', [("phantom", "opera")])
def test_vocab_lexeme_hash(en_vocab, text1, text2):
"""Test that lexemes are hashable."""
lex1 = en_vocab[text1]
lex2 = en_vocab[text2]
lexes = {lex1: lex1, lex2: lex2}
assert lexes[lex1].orth_ == text1
assert lexes[lex2].orth_ == text2
def test_lexeme_lt(en_vocab):
'''More frequent is l.t. less frequent'''
noun = en_vocab['NOUN']
opera = en_vocab['opera']
assert noun < opera
assert opera > noun
def test_vocab_lexeme_is_alpha(en_vocab):
assert en_vocab['the'].flags & (1 << IS_ALPHA)
assert not en_vocab['1999'].flags & (1 << IS_ALPHA)
assert not en_vocab['hello1'].flags & (1 << IS_ALPHA)
def test_lexeme_hash(en_vocab):
'''Test that lexemes are hashable.'''
phantom = en_vocab['phantom']
def test_vocab_lexeme_is_digit(en_vocab):
assert not en_vocab['the'].flags & (1 << IS_DIGIT)
assert en_vocab['1999'].flags & (1 << IS_DIGIT)
assert not en_vocab['hello1'].flags & (1 << IS_DIGIT)
opera = en_vocab['opera']
lexes = {phantom: phantom, opera: opera}
assert lexes[phantom].orth_ == 'phantom'
assert lexes[opera].orth_ == 'opera'
def test_vocab_lexeme_add_flag_auto_id(en_vocab):
is_len4 = en_vocab.add_flag(lambda string: len(string) == 4)
assert en_vocab['1999'].check_flag(is_len4) == True
assert en_vocab['1999'].check_flag(IS_DIGIT) == True
assert en_vocab['199'].check_flag(is_len4) == False
assert en_vocab['199'].check_flag(IS_DIGIT) == True
assert en_vocab['the'].check_flag(is_len4) == False
assert en_vocab['dogs'].check_flag(is_len4) == True
def test_vocab_lexeme_add_flag_provided_id(en_vocab):
is_len4 = en_vocab.add_flag(lambda string: len(string) == 4, flag_id=IS_DIGIT)
assert en_vocab['1999'].check_flag(is_len4) == True
assert en_vocab['199'].check_flag(is_len4) == False
assert en_vocab['199'].check_flag(IS_DIGIT) == False
assert en_vocab['the'].check_flag(is_len4) == False
assert en_vocab['dogs'].check_flag(is_len4) == True

View File

@ -1,42 +0,0 @@
from __future__ import unicode_literals
import pytest
from spacy.attrs import *
def test_is_alpha(en_vocab):
the = en_vocab['the']
assert the.flags & (1 << IS_ALPHA)
year = en_vocab['1999']
assert not year.flags & (1 << IS_ALPHA)
mixed = en_vocab['hello1']
assert not mixed.flags & (1 << IS_ALPHA)
def test_is_digit(en_vocab):
the = en_vocab['the']
assert not the.flags & (1 << IS_DIGIT)
year = en_vocab['1999']
assert year.flags & (1 << IS_DIGIT)
mixed = en_vocab['hello1']
assert not mixed.flags & (1 << IS_DIGIT)
def test_add_flag_auto_id(en_vocab):
is_len4 = en_vocab.add_flag(lambda string: len(string) == 4)
assert en_vocab['1999'].check_flag(is_len4) == True
assert en_vocab['1999'].check_flag(IS_DIGIT) == True
assert en_vocab['199'].check_flag(is_len4) == False
assert en_vocab['199'].check_flag(IS_DIGIT) == True
assert en_vocab['the'].check_flag(is_len4) == False
assert en_vocab['dogs'].check_flag(is_len4) == True
def test_add_flag_provided_id(en_vocab):
is_len4 = en_vocab.add_flag(lambda string: len(string) == 4, flag_id=IS_DIGIT)
assert en_vocab['1999'].check_flag(is_len4) == True
assert en_vocab['199'].check_flag(is_len4) == False
assert en_vocab['199'].check_flag(IS_DIGIT) == False
assert en_vocab['the'].check_flag(is_len4) == False
assert en_vocab['dogs'].check_flag(is_len4) == True