spaCy/spacy/tests/conftest.py

141 lines
2.8 KiB
Python
Raw Normal View History

2017-01-11 12:56:32 +00:00
# coding: utf-8
from __future__ import unicode_literals
from ..en import English
from ..de import German
2017-01-11 12:56:32 +00:00
from ..es import Spanish
from ..it import Italian
from ..fr import French
from ..pt import Portuguese
from ..nl import Dutch
from ..sv import Swedish
from ..hu import Hungarian
from ..fi import Finnish
2017-03-05 01:11:26 +00:00
from ..bn import Bengali
2017-03-24 15:27:44 +00:00
from ..he import Hebrew
2017-03-25 09:03:47 +00:00
2017-01-11 12:56:32 +00:00
from ..tokens import Doc
2017-01-12 14:05:40 +00:00
from ..strings import StringStore
2017-01-12 22:38:55 +00:00
from ..lemmatizer import Lemmatizer
2017-01-11 12:56:32 +00:00
from ..attrs import ORTH, TAG, HEAD, DEP
2017-01-13 01:23:50 +00:00
from io import StringIO, BytesIO
2017-01-12 22:38:47 +00:00
from pathlib import Path
import os
2017-01-11 12:56:32 +00:00
import pytest
LANGUAGES = [English, German, Spanish, Italian, French, Portuguese, Dutch,
2017-03-05 01:11:26 +00:00
Swedish, Hungarian, Finnish, Bengali]
2017-01-11 12:56:32 +00:00
@pytest.fixture(params=LANGUAGES)
def tokenizer(request):
lang = request.param
return lang.Defaults.create_tokenizer()
@pytest.fixture
def en_tokenizer():
return English.Defaults.create_tokenizer()
2017-01-11 12:56:32 +00:00
@pytest.fixture
def en_vocab():
return English.Defaults.create_vocab()
2017-01-11 20:29:59 +00:00
@pytest.fixture
def en_parser():
return English.Defaults.create_parser()
@pytest.fixture
def es_tokenizer():
return Spanish.Defaults.create_tokenizer()
2017-01-11 20:29:59 +00:00
2017-01-11 12:56:32 +00:00
@pytest.fixture
def de_tokenizer():
return German.Defaults.create_tokenizer()
@pytest.fixture(scope='module')
2017-01-24 09:55:02 +00:00
def fr_tokenizer():
return French.Defaults.create_tokenizer()
2017-01-11 12:56:32 +00:00
@pytest.fixture
def hu_tokenizer():
return Hungarian.Defaults.create_tokenizer()
2017-01-12 15:49:19 +00:00
@pytest.fixture
def fi_tokenizer():
return Finnish.Defaults.create_tokenizer()
@pytest.fixture
def sv_tokenizer():
return Swedish.Defaults.create_tokenizer()
2017-03-24 15:27:44 +00:00
@pytest.fixture
2017-03-05 01:11:26 +00:00
def bn_tokenizer():
return Bengali.Defaults.create_tokenizer()
@pytest.fixture
2017-03-24 15:27:44 +00:00
def he_tokenizer():
return Hebrew.Defaults.create_tokenizer()
2017-01-12 14:05:40 +00:00
@pytest.fixture
def stringstore():
return StringStore()
2017-01-11 12:56:32 +00:00
2017-01-12 15:49:19 +00:00
2017-01-12 20:56:32 +00:00
@pytest.fixture
def en_entityrecognizer():
return English.Defaults.create_entity()
2017-01-12 22:38:55 +00:00
@pytest.fixture
2017-03-17 00:48:00 +00:00
def lemmatizer():
2017-03-17 01:50:36 +00:00
return English.Defaults.create_lemmatizer()
2017-01-12 22:38:55 +00:00
2017-01-11 12:56:32 +00:00
@pytest.fixture
def text_file():
return StringIO()
2017-01-13 01:23:50 +00:00
@pytest.fixture
def text_file_b():
return BytesIO()
2017-01-11 12:56:32 +00:00
2017-01-12 21:03:07 +00:00
# only used for tests that require loading the models
# in all other cases, use specific instances
@pytest.fixture(scope="session")
def EN():
2016-10-16 23:52:49 +00:00
return English()
2016-05-03 10:51:47 +00:00
2017-01-11 12:56:32 +00:00
@pytest.fixture(scope="session")
2016-05-03 10:51:47 +00:00
def DE():
2016-10-16 23:52:49 +00:00
return German()
def pytest_addoption(parser):
parser.addoption("--models", action="store_true",
help="include tests that require full models")
parser.addoption("--vectors", action="store_true",
help="include word vectors tests")
parser.addoption("--slow", action="store_true",
help="include slow tests")
def pytest_runtest_setup(item):
for opt in ['models', 'vectors', 'slow']:
if opt in item.keywords and not item.config.getoption("--%s" % opt):
pytest.skip("need --%s option to run" % opt)