[training] patience = 10000 eval_frequency = 200 dropout = 0.2 init_tok2vec = null vectors = null max_epochs = 100 orth_variant_level = 0.0 gold_preproc = true max_length = 0 use_gpu = -1 scores = ["tags_acc", "uas", "las"] score_weights = {"las": 0.8, "tags_acc": 0.2} limit = 0 seed = 0 accumulate_gradient = 2 [training.batch_size] @schedules = "compounding.v1" start = 100 stop = 1000 compound = 1.001 [optimizer] @optimizers = "Adam.v1" learn_rate = 0.001 beta1 = 0.9 beta2 = 0.999 [nlp] lang = "en" vectors = ${training:vectors} [nlp.pipeline.tok2vec] factory = "tok2vec" [nlp.pipeline.tagger] factory = "tagger" [nlp.pipeline.parser] factory = "parser" [nlp.pipeline.tagger.model] @architectures = "spacy.Tagger.v1" [nlp.pipeline.tagger.model.tok2vec] @architectures = "spacy.Tok2VecTensors.v1" width = ${nlp.pipeline.tok2vec.model:width} [nlp.pipeline.parser.model] @architectures = "spacy.TransitionBasedParser.v1" nr_feature_tokens = 8 hidden_width = 64 maxout_pieces = 3 [nlp.pipeline.parser.model.tok2vec] @architectures = "spacy.Tok2VecTensors.v1" width = ${nlp.pipeline.tok2vec.model:width} [nlp.pipeline.tok2vec.model] @architectures = "spacy.HashEmbedCNN.v1" pretrained_vectors = ${nlp:vectors} width = 96 depth = 4 window_size = 1 embed_size = 2000 maxout_pieces = 3 subword_features = true