spaCy/website/docs/api/language-models.jade

50 lines
1.8 KiB
Plaintext
Raw Normal View History

2016-11-02 10:25:09 +00:00
//- 💫 DOCS > API > LANGUAGE MODELS
include ../../_includes/_mixins
2016-12-20 23:54:52 +00:00
p spaCy currently supports the following languages and capabilities:
+aside-code("Download language models", "bash").
2017-03-25 09:22:05 +00:00
python -m spacy download en
python -m spacy download de
2017-04-26 18:50:02 +00:00
python -m spacy download fr
+table([ "Language", "Token", "SBD", "Lemma", "POS", "NER", "Dep", "Vector", "Sentiment"])
+row
+cell English #[code en]
each icon in [ "pro", "pro", "pro", "pro", "pro", "pro", "pro", "con" ]
+cell.u-text-center #[+procon(icon)]
+row
+cell German #[code de]
each icon in [ "pro", "pro", "con", "pro", "pro", "pro", "pro", "con" ]
+cell.u-text-center #[+procon(icon)]
2017-04-26 18:50:02 +00:00
+row
+cell French #[code fr]
2017-04-26 18:50:02 +00:00
each icon in [ "pro", "pro", "con", "pro", "con", "pro", "pro", "con" ]
+cell.u-text-center #[+procon(icon)]
+h(2, "available") Available models
include ../usage/_models-list
2016-12-20 23:54:52 +00:00
+h(2, "alpha-support") Alpha support
p
| Work has started on the following languages. You can help by improving
| the existing language data and extending the tokenization patterns.
+aside("Dependencies")
| Some language tokenizers require external dependencies. To use #[strong Chinese],
| you need to have #[+a("https://github.com/fxsjy/jieba") Jieba] installed.
| The #[strong Japanese] tokenizer requires
| #[+a("https://github.com/mocobeta/janome") Janome].
2016-12-20 23:54:52 +00:00
+table([ "Language", "Source" ])
each language, code in { es: "Spanish", it: "Italian", pt: "Portuguese", nl: "Dutch", sv: "Swedish", fi: "Finnish", nb: "Norwegian Bokmål", hu: "Hungarian", bn: "Bengali", he: "Hebrew", zh: "Chinese", ja: "Japanese" }
2016-12-20 23:54:52 +00:00
+row
+cell #{language} #[code=code]
+cell
+src(gh("spaCy", "spacy/" + code)) spacy/#{code}