genienlp/tests/test_kfserver.sh

34 lines
1.1 KiB
Bash
Raw Normal View History

2021-03-02 23:19:46 +00:00
#!/usr/bin/env bash
. ./tests/lib.sh
i=0
# test kfserver
for hparams in \
"--model TransformerSeq2Seq --pretrained_model sshleifer/bart-tiny-random" ;
do
# train
2021-04-20 04:27:09 +00:00
genienlp train --train_tasks almond --train_batch_tokens 100 --val_batch_size 100 --train_iterations 6 --preserve_case --save_every 2 --log_every 2 --val_every 2 --save $workdir/model_$i --data $SRCDIR/dataset/ $hparams --exist_ok --skip_cache --embeddings $EMBEDDING_DIR --no_commit
2021-03-02 23:19:46 +00:00
# run kfserver in background
2021-03-08 21:44:52 +00:00
(genienlp kfserver --path $workdir/model_$i)&
2021-03-02 23:19:46 +00:00
SERVER_PID=$!
# wait enough for the server to start
sleep 15
# send predict request via http
request='{"id":"123", "task": "generic", "instances": [{"context": "", "question": "what is the weather"}]}'
status=`curl -s -o /dev/stderr -w "%{http_code}" http://localhost:8080/v1/models/nlp:predict -d "$request"`
kill $SERVER_PID
if [[ "$status" -ne 200 ]]; then
echo "Unexpected http status: $status"
exit 1
fi
rm -rf $workdir/model_$i
i=$((i+1))
done
rm -fr $workdir
rm -rf $SRCDIR/torch-shm-file-*