Fix formatting, tidy up and remove unused imports

This commit is contained in:
ines 2017-10-07 00:26:05 +02:00
parent 212c8f0711
commit 2586b61b15
1 changed files with 8 additions and 15 deletions

View File

@ -1,12 +1,9 @@
# coding: utf8 # coding: utf8
from __future__ import absolute_import, unicode_literals from __future__ import absolute_import, unicode_literals
from contextlib import contextmanager from contextlib import contextmanager
import dill
import numpy
from thinc.neural import Model from thinc.neural import Model
from thinc.neural.ops import NumpyOps, CupyOps from thinc.neural.optimizers import Adam
from thinc.neural.optimizers import Adam, SGD
import random import random
import ujson import ujson
from collections import OrderedDict from collections import OrderedDict
@ -17,24 +14,20 @@ from .vocab import Vocab
from .tagger import Tagger from .tagger import Tagger
from .lemmatizer import Lemmatizer from .lemmatizer import Lemmatizer
from .syntax.parser import get_templates from .syntax.parser import get_templates
from .syntax import nonproj
from .pipeline import NeuralDependencyParser, EntityRecognizer from .pipeline import NeuralDependencyParser, TokenVectorEncoder, NeuralTagger
from .pipeline import TokenVectorEncoder, NeuralTagger, NeuralEntityRecognizer from .pipeline import NeuralEntityRecognizer, SimilarityHook, TextCategorizer
from .pipeline import NeuralLabeller
from .pipeline import SimilarityHook
from .pipeline import TextCategorizer
from . import about
from .compat import json_dumps, izip from .compat import json_dumps, izip
from .scorer import Scorer
from ._ml import link_vectors_to_models
from .attrs import IS_STOP from .attrs import IS_STOP
from .lang.punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES from .lang.punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES
from .lang.tokenizer_exceptions import TOKEN_MATCH from .lang.tokenizer_exceptions import TOKEN_MATCH
from .lang.tag_map import TAG_MAP from .lang.tag_map import TAG_MAP
from .lang.lex_attrs import LEX_ATTRS from .lang.lex_attrs import LEX_ATTRS
from . import util from . import util
from .scorer import Scorer from . import about
from ._ml import link_vectors_to_models
class BaseDefaults(object): class BaseDefaults(object):
@ -289,7 +282,7 @@ class Language(object):
return self.pipeline.pop(self.pipe_names.index(name)) return self.pipeline.pop(self.pipe_names.index(name))
def __call__(self, text, disable=[]): def __call__(self, text, disable=[]):
"""'Apply the pipeline to some text. The text can span multiple sentences, """Apply the pipeline to some text. The text can span multiple sentences,
and can contain arbtrary whitespace. Alignment into the original string and can contain arbtrary whitespace. Alignment into the original string
is preserved. is preserved.
@ -387,7 +380,7 @@ class Language(object):
get_gold_tuples (function): Function returning gold data get_gold_tuples (function): Function returning gold data
**cfg: Config parameters. **cfg: Config parameters.
returns: An optimizer RETURNS: An optimizer
""" """
# Populate vocab # Populate vocab
if get_gold_tuples is not None: if get_gold_tuples is not None: