diff --git a/spacy/training/converters/conll_ner_to_docs.py b/spacy/training/converters/conll_ner_to_docs.py index 3b851039c..902db585b 100644 --- a/spacy/training/converters/conll_ner_to_docs.py +++ b/spacy/training/converters/conll_ner_to_docs.py @@ -2,9 +2,9 @@ from wasabi import Printer from .. import tags_to_entities from ...training import iob_to_biluo -from ...lang.xx import MultiLanguage from ...tokens import Doc, Span from ...util import load_model +from ...util import load_model, get_lang_class def conll_ner_to_docs( @@ -86,7 +86,7 @@ def conll_ner_to_docs( if model: nlp = load_model(model) else: - nlp = MultiLanguage() + nlp = get_lang_class("xx")() output_docs = [] for conll_doc in input_data.strip().split(doc_delimiter): conll_doc = conll_doc.strip() @@ -136,7 +136,7 @@ def segment_sents_and_docs(doc, n_sents, doc_delimiter, model=None, msg=None): "Segmenting sentences with sentencizer. (Use `-b model` for " "improved parser-based sentence segmentation.)" ) - nlp = MultiLanguage() + nlp = get_lang_class("xx")() sentencizer = nlp.create_pipe("sentencizer") lines = doc.strip().split("\n") words = [line.strip().split()[0] for line in lines]