spaCy/website/docs/api/language-models.jade

49 lines
1.6 KiB
Plaintext

//- 💫 DOCS > API > LANGUAGE MODELS
include ../../_includes/_mixins
p spaCy currently supports the following languages and capabilities:
+aside-code("Download language models", "bash").
python -m spacy download en
python -m spacy download de
python -m spacy download fr
+table([ "Language", "Token", "SBD", "Lemma", "POS", "NER", "Dep", "Vector", "Sentiment"])
+row
+cell English #[code en]
each icon in [ "pro", "pro", "pro", "pro", "pro", "pro", "pro", "con" ]
+cell.u-text-center #[+procon(icon)]
+row
+cell German #[code de]
each icon in [ "pro", "pro", "con", "pro", "pro", "pro", "pro", "con" ]
+cell.u-text-center #[+procon(icon)]
+row
+cell French #[code de]
each icon in [ "pro", "pro", "con", "pro", "con", "pro", "pro", "con" ]
+cell.u-text-center #[+procon(icon)]
+h(2, "available") Available models
include ../usage/_models-list
+h(2, "alpha-support") Alpha support
p
| Work has started on the following languages. You can help by improving
| the existing language data and extending the tokenization patterns.
+table([ "Language", "Source" ])
each language, code in { zh: "Chinese", es: "Spanish", it: "Italian", pt: "Portuguese", nl: "Dutch", sv: "Swedish", fi: "Finnish", hu: "Hungarian", bn: "Bengali", he: "Hebrew" }
+row
+cell #{language} #[code=code]
+cell
+src(gh("spaCy", "spacy/" + code)) spacy/#{code}
p
| Chinese tokenization requires the
| #[+a("https://github.com/fxsjy/jieba") Jieba] library. Statistical
| models are coming soon.