mirror of https://github.com/explosion/spaCy.git
50 lines
1.8 KiB
Plaintext
50 lines
1.8 KiB
Plaintext
//- 💫 DOCS > API > LANGUAGE MODELS
|
|
|
|
include ../../_includes/_mixins
|
|
|
|
p spaCy currently supports the following languages and capabilities:
|
|
|
|
+aside-code("Download language models", "bash").
|
|
python -m spacy download en
|
|
python -m spacy download de
|
|
python -m spacy download fr
|
|
|
|
+table([ "Language", "Token", "SBD", "Lemma", "POS", "NER", "Dep", "Vector", "Sentiment"])
|
|
+row
|
|
+cell English #[code en]
|
|
each icon in [ "pro", "pro", "pro", "pro", "pro", "pro", "pro", "con" ]
|
|
+cell.u-text-center #[+procon(icon)]
|
|
|
|
+row
|
|
+cell German #[code de]
|
|
each icon in [ "pro", "pro", "con", "pro", "pro", "pro", "pro", "con" ]
|
|
+cell.u-text-center #[+procon(icon)]
|
|
|
|
+row
|
|
+cell French #[code fr]
|
|
each icon in [ "pro", "pro", "con", "pro", "con", "pro", "pro", "con" ]
|
|
+cell.u-text-center #[+procon(icon)]
|
|
|
|
+h(2, "available") Available models
|
|
|
|
include ../usage/_models-list
|
|
|
|
+h(2, "alpha-support") Alpha support
|
|
|
|
p
|
|
| Work has started on the following languages. You can help by improving
|
|
| the existing language data and extending the tokenization patterns.
|
|
|
|
+aside("Dependencies")
|
|
| Some language tokenizers require external dependencies. To use #[strong Chinese],
|
|
| you need to have #[+a("https://github.com/fxsjy/jieba") Jieba] installed.
|
|
| The #[strong Japanese] tokenizer requires
|
|
| #[+a("https://github.com/mocobeta/janome") Janome].
|
|
|
|
+table([ "Language", "Source" ])
|
|
each language, code in { es: "Spanish", it: "Italian", pt: "Portuguese", nl: "Dutch", sv: "Swedish", fi: "Finnish", nb: "Norwegian Bokmål", hu: "Hungarian", bn: "Bengali", he: "Hebrew", zh: "Chinese", ja: "Japanese" }
|
|
+row
|
|
+cell #{language} #[code=code]
|
|
+cell
|
|
+src(gh("spaCy", "spacy/" + code)) spacy/#{code}
|