mirror of https://github.com/explosion/spaCy.git
66 lines
2.5 KiB
Plaintext
66 lines
2.5 KiB
Plaintext
//- 💫 DOCS > USAGE > SPACY 101 > PIPELINES
|
||
|
||
p
|
||
| When you call #[code nlp] on a text, spaCy first tokenizes the text to
|
||
| produce a #[code Doc] object. The #[code Doc] is then processed in several
|
||
| different steps – this is also referred to as the
|
||
| #[strong processing pipeline]. The pipeline used by the
|
||
| #[+a("/docs/usage/models") default models] consists of a
|
||
| tensorizer, a tagger, a parser and an entity recognizer. Each pipeline
|
||
| component returns the processed #[code Doc], which is then passed on to
|
||
| the next component.
|
||
|
||
+image
|
||
include ../../../assets/img/docs/pipeline.svg
|
||
.u-text-right
|
||
+button("/assets/img/docs/pipeline.svg", false, "secondary").u-text-tag View large graphic
|
||
|
||
+aside
|
||
| #[strong Name:] ID of the pipeline component.#[br]
|
||
| #[strong Component:] spaCy's implementation of the component.#[br]
|
||
| #[strong Creates:] Objects, attributes and properties modified and set by
|
||
| the component.
|
||
|
||
+table(["Name", "Component", "Creates", "Description"])
|
||
+row
|
||
+cell tokenizer
|
||
+cell #[+api("tokenizer") #[code Tokenizer]]
|
||
+cell #[code Doc]
|
||
+cell Segment text into tokens.
|
||
|
||
+row("divider")
|
||
+cell tensorizer
|
||
+cell #[code TokenVectorEncoder]
|
||
+cell #[code Doc.tensor]
|
||
+cell Create feature representation tensor for #[code Doc].
|
||
|
||
+row
|
||
+cell tagger
|
||
+cell #[+api("tagger") #[code Tagger]]
|
||
+cell #[code Doc[i].tag]
|
||
+cell Assign part-of-speech tags.
|
||
|
||
+row
|
||
+cell parser
|
||
+cell #[+api("dependencyparser") #[code DependencyParser]]
|
||
+cell
|
||
| #[code Doc[i].head], #[code Doc[i].dep], #[code Doc.sents],
|
||
| #[code Doc.noun_chunks]
|
||
+cell Assign dependency labels.
|
||
|
||
+row
|
||
+cell ner
|
||
+cell #[+api("entityrecognizer") #[code EntityRecognizer]]
|
||
+cell #[code Doc.ents], #[code Doc[i].ent_iob], #[code Doc[i].ent_type]
|
||
+cell Detect and label named entities.
|
||
|
||
p
|
||
| The processing pipeline always #[strong depends on the statistical model]
|
||
| and its capabilities. For example, a pipeline can only include an entity
|
||
| recognizer component if the model includes data to make predictions of
|
||
| entity labels. This is why each model will specify the pipeline to use
|
||
| in its meta data, as a simple list containing the component names:
|
||
|
||
+code(false, "json").
|
||
"pipeline": ["tensorizer", "tagger", "parser", "ner"]
|