2016-11-02 10:25:09 +00:00
|
|
|
//- 💫 DOCS > API > LANGUAGE MODELS
|
2016-11-02 10:24:13 +00:00
|
|
|
|
|
|
|
include ../../_includes/_mixins
|
|
|
|
|
2016-12-20 23:54:52 +00:00
|
|
|
p spaCy currently supports the following languages and capabilities:
|
2016-11-02 10:24:13 +00:00
|
|
|
|
|
|
|
+aside-code("Download language models", "bash").
|
2017-03-25 09:22:05 +00:00
|
|
|
python -m spacy download en
|
|
|
|
python -m spacy download de
|
2016-11-02 10:24:13 +00:00
|
|
|
|
|
|
|
+table([ "Language", "Token", "SBD", "Lemma", "POS", "NER", "Dep", "Vector", "Sentiment"])
|
|
|
|
+row
|
|
|
|
+cell English #[code en]
|
|
|
|
each icon in [ "pro", "pro", "pro", "pro", "pro", "pro", "pro", "con" ]
|
|
|
|
+cell.u-text-center #[+procon(icon)]
|
|
|
|
|
|
|
|
+row
|
|
|
|
+cell German #[code de]
|
|
|
|
each icon in [ "pro", "pro", "con", "pro", "pro", "pro", "pro", "con" ]
|
|
|
|
+cell.u-text-center #[+procon(icon)]
|
|
|
|
|
2016-12-20 23:54:52 +00:00
|
|
|
|
|
|
|
+h(2, "alpha-support") Alpha support
|
|
|
|
|
|
|
|
p
|
|
|
|
| Work has started on the following languages. You can help by improving
|
|
|
|
| the existing language data and extending the tokenization patterns.
|
|
|
|
|
|
|
|
+table([ "Language", "Source" ])
|
2017-03-25 09:22:46 +00:00
|
|
|
each language, code in { zh: "Chinese", es: "Spanish", it: "Italian", fr: "French", pt: "Portuguese", nl: "Dutch", sv: "Swedish", fi: "Finnish", hu: "Hungarian", bn: "Bengali", he: "Hebrew" }
|
2016-12-20 23:54:52 +00:00
|
|
|
+row
|
|
|
|
+cell #{language} #[code=code]
|
|
|
|
+cell
|
|
|
|
+src(gh("spaCy", "spacy/" + code)) spacy/#{code}
|
2017-02-04 12:49:03 +00:00
|
|
|
|
|
|
|
p
|
|
|
|
| Chinese tokenization requires the
|
|
|
|
| #[+a("https://github.com/fxsjy/jieba") Jieba] library. Statistical
|
|
|
|
| models are coming soon.
|