Remove broken 'self attention' option in pretrain

This commit is contained in:
Matthew Honnibal 2020-07-02 22:32:25 +02:00
parent 7137a16df8
commit 56b820f6af
1 changed files with 0 additions and 2 deletions

View File

@ -39,7 +39,6 @@ from .train import _load_pretrained_tok2vec
cnn_window=("Window size for CNN layers", "option", "cW", int),
cnn_pieces=("Maxout size for CNN layers. 1 for Mish", "option", "cP", int),
use_chars=("Whether to use character-based embedding", "flag", "chr", bool),
sa_depth=("Depth of self-attention layers", "option", "sa", int),
bilstm_depth=("Depth of BiLSTM layers (requires PyTorch)", "option", "lstm", int),
embed_rows=("Number of embedding rows", "option", "er", int),
loss_func=(
@ -87,7 +86,6 @@ def pretrain(
width=96,
conv_depth=4,
cnn_pieces=3,
sa_depth=0,
cnn_window=1,
bilstm_depth=0,
use_chars=False,