From 1d216a7ea6d1cb56dc94f8394914c9be7c4fe0e8 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Mon, 24 Jul 2023 10:41:04 +0200 Subject: [PATCH 01/36] Update README for v3.6 (#12844) * Update most recent release * Switch from azure to GHA CI tests badge * Remove link to survey * Format --- README.md | 72 +++++++++++++++++++++++++++---------------------------- 1 file changed, 35 insertions(+), 37 deletions(-) diff --git a/README.md b/README.md index 59d3ee9ee..9a8f90749 100644 --- a/README.md +++ b/README.md @@ -6,23 +6,20 @@ spaCy is a library for **advanced Natural Language Processing** in Python and Cython. It's built on the very latest research, and was designed from day one to be used in real products. -spaCy comes with -[pretrained pipelines](https://spacy.io/models) and -currently supports tokenization and training for **70+ languages**. It features -state-of-the-art speed and **neural network models** for tagging, -parsing, **named entity recognition**, **text classification** and more, -multi-task learning with pretrained **transformers** like BERT, as well as a +spaCy comes with [pretrained pipelines](https://spacy.io/models) and currently +supports tokenization and training for **70+ languages**. It features +state-of-the-art speed and **neural network models** for tagging, parsing, +**named entity recognition**, **text classification** and more, multi-task +learning with pretrained **transformers** like BERT, as well as a production-ready [**training system**](https://spacy.io/usage/training) and easy model packaging, deployment and workflow management. spaCy is commercial -open-source software, released under the [MIT license](https://github.com/explosion/spaCy/blob/master/LICENSE). +open-source software, released under the +[MIT license](https://github.com/explosion/spaCy/blob/master/LICENSE). -💥 **We'd love to hear more about your experience with spaCy!** -[Fill out our survey here.](https://form.typeform.com/to/aMel9q9f) - -💫 **Version 3.5 out now!** +💫 **Version 3.6 out now!** [Check out the release notes here.](https://github.com/explosion/spaCy/releases) -[![Azure Pipelines](https://img.shields.io/azure-devops/build/explosion-ai/public/8/master.svg?logo=azure-pipelines&style=flat-square&label=build)](https://dev.azure.com/explosion-ai/public/_build?definitionId=8) +[![tests](https://github.com/explosion/spaCy/actions/workflows/tests.yml/badge.svg)](https://github.com/explosion/spaCy/actions/workflows/tests.yml) [![Current Release Version](https://img.shields.io/github/release/explosion/spacy.svg?style=flat-square&logo=github)](https://github.com/explosion/spaCy/releases) [![pypi Version](https://img.shields.io/pypi/v/spacy.svg?style=flat-square&logo=pypi&logoColor=white)](https://pypi.org/project/spacy/) [![conda Version](https://img.shields.io/conda/vn/conda-forge/spacy.svg?style=flat-square&logo=conda-forge&logoColor=white)](https://anaconda.org/conda-forge/spacy) @@ -35,22 +32,22 @@ open-source software, released under the [MIT license](https://github.com/explos ## 📖 Documentation -| Documentation | | -| ----------------------------- | ---------------------------------------------------------------------- | -| ⭐️ **[spaCy 101]** | New to spaCy? Here's everything you need to know! | -| 📚 **[Usage Guides]** | How to use spaCy and its features. | -| 🚀 **[New in v3.0]** | New features, backwards incompatibilities and migration guide. | -| 🪐 **[Project Templates]** | End-to-end workflows you can clone, modify and run. | -| 🎛 **[API Reference]** | The detailed reference for spaCy's API. | -| 📦 **[Models]** | Download trained pipelines for spaCy. | -| 🌌 **[Universe]** | Plugins, extensions, demos and books from the spaCy ecosystem. | -| ⚙️ **[spaCy VS Code Extension]** | Additional tooling and features for working with spaCy's config files. | -| 👩‍🏫 **[Online Course]** | Learn spaCy in this free and interactive online course. | -| 📺 **[Videos]** | Our YouTube channel with video tutorials, talks and more. | -| 🛠 **[Changelog]** | Changes and version history. | -| 💝 **[Contribute]** | How to contribute to the spaCy project and code base. | -| spaCy Tailored Pipelines | Get a custom spaCy pipeline, tailor-made for your NLP problem by spaCy's core developers. Streamlined, production-ready, predictable and maintainable. Start by completing our 5-minute questionnaire to tell us what you need and we'll be in touch! **[Learn more →](https://explosion.ai/spacy-tailored-pipelines)** | -| spaCy Tailored Pipelines | Bespoke advice for problem solving, strategy and analysis for applied NLP projects. Services include data strategy, code reviews, pipeline design and annotation coaching. Curious? Fill in our 5-minute questionnaire to tell us what you need and we'll be in touch! **[Learn more →](https://explosion.ai/spacy-tailored-analysis)** | +| Documentation | | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| ⭐️ **[spaCy 101]** | New to spaCy? Here's everything you need to know! | +| 📚 **[Usage Guides]** | How to use spaCy and its features. | +| 🚀 **[New in v3.0]** | New features, backwards incompatibilities and migration guide. | +| 🪐 **[Project Templates]** | End-to-end workflows you can clone, modify and run. | +| 🎛 **[API Reference]** | The detailed reference for spaCy's API. | +| 📦 **[Models]** | Download trained pipelines for spaCy. | +| 🌌 **[Universe]** | Plugins, extensions, demos and books from the spaCy ecosystem. | +| ⚙️ **[spaCy VS Code Extension]** | Additional tooling and features for working with spaCy's config files. | +| 👩‍🏫 **[Online Course]** | Learn spaCy in this free and interactive online course. | +| 📺 **[Videos]** | Our YouTube channel with video tutorials, talks and more. | +| 🛠 **[Changelog]** | Changes and version history. | +| 💝 **[Contribute]** | How to contribute to the spaCy project and code base. | +| spaCy Tailored Pipelines | Get a custom spaCy pipeline, tailor-made for your NLP problem by spaCy's core developers. Streamlined, production-ready, predictable and maintainable. Start by completing our 5-minute questionnaire to tell us what you need and we'll be in touch! **[Learn more →](https://explosion.ai/spacy-tailored-pipelines)** | +| spaCy Tailored Pipelines | Bespoke advice for problem solving, strategy and analysis for applied NLP projects. Services include data strategy, code reviews, pipeline design and annotation coaching. Curious? Fill in our 5-minute questionnaire to tell us what you need and we'll be in touch! **[Learn more →](https://explosion.ai/spacy-tailored-analysis)** | [spacy 101]: https://spacy.io/usage/spacy-101 [new in v3.0]: https://spacy.io/usage/v3 @@ -58,7 +55,7 @@ open-source software, released under the [MIT license](https://github.com/explos [api reference]: https://spacy.io/api/ [models]: https://spacy.io/models [universe]: https://spacy.io/universe -[spaCy VS Code Extension]: https://github.com/explosion/spacy-vscode +[spacy vs code extension]: https://github.com/explosion/spacy-vscode [videos]: https://www.youtube.com/c/ExplosionAI [online course]: https://course.spacy.io [project templates]: https://github.com/explosion/projects @@ -92,7 +89,9 @@ more people can benefit from it. - State-of-the-art speed - Production-ready **training system** - Linguistically-motivated **tokenization** -- Components for named **entity recognition**, part-of-speech-tagging, dependency parsing, sentence segmentation, **text classification**, lemmatization, morphological analysis, entity linking and more +- Components for named **entity recognition**, part-of-speech-tagging, + dependency parsing, sentence segmentation, **text classification**, + lemmatization, morphological analysis, entity linking and more - Easily extensible with **custom components** and attributes - Support for custom models in **PyTorch**, **TensorFlow** and other frameworks - Built in **visualizers** for syntax and NER @@ -118,8 +117,8 @@ For detailed installation instructions, see the ### pip Using pip, spaCy releases are available as source packages and binary wheels. -Before you install spaCy and its dependencies, make sure that -your `pip`, `setuptools` and `wheel` are up to date. +Before you install spaCy and its dependencies, make sure that your `pip`, +`setuptools` and `wheel` are up to date. ```bash pip install -U pip setuptools wheel @@ -174,9 +173,9 @@ with the new version. ## 📦 Download model packages -Trained pipelines for spaCy can be installed as **Python packages**. This -means that they're a component of your application, just like any other module. -Models can be installed using spaCy's [`download`](https://spacy.io/api/cli#download) +Trained pipelines for spaCy can be installed as **Python packages**. This means +that they're a component of your application, just like any other module. Models +can be installed using spaCy's [`download`](https://spacy.io/api/cli#download) command, or manually by pointing pip to a path or URL. | Documentation | | @@ -242,8 +241,7 @@ do that depends on your system. | **Mac** | Install a recent version of [XCode](https://developer.apple.com/xcode/), including the so-called "Command Line Tools". macOS and OS X ship with Python and git preinstalled. | | **Windows** | Install a version of the [Visual C++ Build Tools](https://visualstudio.microsoft.com/visual-cpp-build-tools/) or [Visual Studio Express](https://visualstudio.microsoft.com/vs/express/) that matches the version that was used to compile your Python interpreter. | -For more details -and instructions, see the documentation on +For more details and instructions, see the documentation on [compiling spaCy from source](https://spacy.io/usage#source) and the [quickstart widget](https://spacy.io/usage#section-quickstart) to get the right commands for your platform and Python version. From e2b89012a2ecb7f778e72b57835232c08a218f22 Mon Sep 17 00:00:00 2001 From: Victoria <80417010+victorialslocum@users.noreply.github.com> Date: Mon, 24 Jul 2023 14:44:47 +0200 Subject: [PATCH 02/36] Add spacy-llm docs to website (#12782) * initial commit * update for v0.4.0 * Apply suggestions from code review * Fix formatting * Apply suggestions from code review * Update website/docs/api/large-language-models.mdx * Update website/docs/api/large-language-models.mdx * update usage page * Apply suggestions from review * Apply suggestions from review * fix links * fix relative links * Apply suggestions from code review Co-authored-by: Sofie Van Landeghem * Apply suggestions from code review Co-authored-by: Sofie Van Landeghem * Apply suggestions from review * Add section on Llama 2. Format. --------- Co-authored-by: Raphael Mitsch Co-authored-by: Sofie Van Landeghem --- website/docs/api/large-language-models.mdx | 1488 ++++++++++++++++++ website/docs/usage/large-language-models.mdx | 512 ++++++ website/meta/sidebars.json | 14 +- website/pages/index.tsx | 102 +- 4 files changed, 2060 insertions(+), 56 deletions(-) create mode 100644 website/docs/api/large-language-models.mdx create mode 100644 website/docs/usage/large-language-models.mdx diff --git a/website/docs/api/large-language-models.mdx b/website/docs/api/large-language-models.mdx new file mode 100644 index 000000000..cc8328790 --- /dev/null +++ b/website/docs/api/large-language-models.mdx @@ -0,0 +1,1488 @@ +--- +title: Large Language Models +teaser: Integrating LLMs into structured NLP pipelines +menu: + - ['Config', 'config'] + - ['Tasks', 'tasks'] + - ['Models', 'models'] + - ['Cache', 'cache'] + - ['Various Functions', 'various-functions'] +--- + +[The spacy-llm package](https://github.com/explosion/spacy-llm) integrates Large +Language Models (LLMs) into spaCy, featuring a modular system for **fast +prototyping** and **prompting**, and turning unstructured responses into +**robust outputs** for various NLP tasks, **no training data** required. + +## Config {id="config"} + +`spacy-llm` exposes a `llm` factory that accepts the following configuration +options: + +| Argument | Description | +| ---------------- | ------------------------------------------------------------------------------------------------------- | +| `task` | An LLMTask can generate prompts and parse LLM responses. See [docs](#tasks). ~~Optional[LLMTask]~~ | +| `model` | Callable querying a specific LLM API. See [docs](#models). ~~Callable[[Iterable[Any]], Iterable[Any]]~~ | +| `cache` | Cache to use for caching prompts and responses per doc (batch). See [docs](#cache). ~~Cache~~ | +| `save_io` | Whether to save prompts/responses within `Doc.user_data["llm_io"]`. ~~bool~~ | +| `validate_types` | Whether to check if signatures of configured model and task are consistent. ~~bool~~ | + +An `llm` component is defined by two main settings: + +- A [**task**](#tasks), defining the prompt to send to the LLM as well as the + functionality to parse the resulting response back into structured fields on + the [Doc](/api/doc) objects. +- A [**model**](#models) defining the model and how to connect to it. Note that + `spacy-llm` supports both access to external APIs (such as OpenAI) as well as + access to self-hosted open-source LLMs (such as using Dolly through Hugging + Face). + +Moreover, `spacy-llm` exposes a customizable [**caching**](#cache) functionality +to avoid running the same document through an LLM service (be it local or +through a REST API) more than once. + +Finally, you can choose to save a stringified version of LLM prompts/responses +within the `Doc.user_data["llm_io"]` attribute by setting `save_io` to `True`. +`Doc.user_data["llm_io"]` is a dictionary containing one entry for every LLM +component within the `nlp` pipeline. Each entry is itself a dictionary, with two +keys: `prompt` and `response`. + +A note on `validate_types`: by default, `spacy-llm` checks whether the +signatures of the `model` and `task` callables are consistent with each other +and emits a warning if they don't. `validate_types` can be set to `False` if you +want to disable this behavior. + +### Tasks {id="tasks"} + +A _task_ defines an NLP problem or question, that will be sent to the LLM via a +prompt. Further, the task defines how to parse the LLM's responses back into +structured information. All tasks are registered in the `llm_tasks` registry. + +#### task.generate_prompts {id="task-generate-prompts"} + +Takes a collection of documents, and returns a collection of "prompts", which +can be of type `Any`. Often, prompts are of type `str` - but this is not +enforced to allow for maximum flexibility in the framework. + +| Argument | Description | +| ----------- | ---------------------------------------- | +| `docs` | The input documents. ~~Iterable[Doc]~~ | +| **RETURNS** | The generated prompts. ~~Iterable[Any]~~ | + +#### task.parse_responses {id="task-parse-responses"} + +Takes a collection of LLM responses and the original documents, parses the +responses into structured information, and sets the annotations on the +documents. The `parse_responses` function is free to set the annotations in any +way, including `Doc` fields like `ents`, `spans` or `cats`, or using custom +defined fields. + +The `responses` are of type `Iterable[Any]`, though they will often be `str` +objects. This depends on the return type of the [model](#models). + +| Argument | Description | +| ----------- | ------------------------------------------ | +| `docs` | The input documents. ~~Iterable[Doc]~~ | +| `responses` | The generated prompts. ~~Iterable[Any]~~ | +| **RETURNS** | The annotated documents. ~~Iterable[Doc]~~ | + +#### spacy.Summarization.v1 {id="summarization-v1"} + +The `spacy.Summarization.v1` task supports both zero-shot and few-shot +prompting. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.Summarization.v1" +> examples = null +> max_n_words = null +> ``` + +| Argument | Description | +| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `template` | Custom prompt template to send to LLM model. Default templates for each task are located in the `spacy_llm/tasks/templates` directory. Defaults to [summarization.jinja](./spacy_llm/tasks/templates/summarization.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `max_n_words` | Maximum number of words to be used in summary. Note that this should not expected to work exactly. Defaults to `None`. ~~Optional[int]~~ | +| `field` | Name of extension attribute to store summary in (i. e. the summary will be available in `doc._.{field}`). Defaults to `summary`. ~~str~~ | + +The summarization task prompts the model for a concise summary of the provided +text. It optionally allows to limit the response to a certain number of tokens - +note that this requirement will be included in the prompt, but the task doesn't +perform a hard cut-off. It's hence possible that your summary exceeds +`max_n_words`. + +To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +```yaml +- text: > + The United Nations, referred to informally as the UN, is an + intergovernmental organization whose stated purposes are to maintain + international peace and security, develop friendly relations among nations, + achieve international cooperation, and serve as a centre for harmonizing the + actions of nations. It is the world's largest international organization. + The UN is headquartered on international territory in New York City, and the + organization has other offices in Geneva, Nairobi, Vienna, and The Hague, + where the International Court of Justice is headquartered.\n\n The UN was + established after World War II with the aim of preventing future world wars, + and succeeded the League of Nations, which was characterized as + ineffective. + summary: + 'The UN is an international organization that promotes global peace, + cooperation, and harmony. Established after WWII, its purpose is to prevent + future world wars.' +``` + +```ini +[components.llm.task] +@llm_tasks = "spacy.Summarization.v1" +max_n_words = 20 +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "summarization_examples.yml" +``` + +#### spacy.NER.v2 {id="ner-v2"} + +The built-in NER task supports both zero-shot and few-shot prompting. This +version also supports explicitly defining the provided labels with custom +descriptions. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.NER.v2" +> labels = ["PERSON", "ORGANISATION", "LOCATION"] +> examples = null +> ``` + +| Argument | Description | +| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `template` | Custom prompt template to send to LLM model. Default templates for each task are located in the `spacy_llm/tasks/templates` directory. Defaults to [ner.v2.jinja](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/ner.v2.jinja). ~~str~~ | +| `label_definitions` | Optional dict mapping a label to a description of that label. These descriptions are added to the prompt to help instruct the LLM on what to extract. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, defaults to `spacy.LowercaseNormalizer.v1`. Defaults to `None`. ~~Optional[Callable[[str], str]]~~ | +| `alignment_mode` | Alignment mode in case the LLM returns entities that do not align with token boundaries. Options are `"strict"`, `"contract"` or `"expand"`. Defaults to `"contract"`. ~~str~~ | +| `case_sensitive_matching` | Whether to search without case sensitivity. Defaults to `False`. ~~bool~~ | +| `single_match` | Whether to match an entity in the LLM's response only once (the first hit) or multiple times. Defaults to `False`. ~~bool~~ | + +The NER task implementation doesn't currently ask the LLM for specific offsets, +but simply expects a list of strings that represent the enties in the document. +This means that a form of string matching is required. This can be configured by +the following parameters: + +- The `single_match` parameter is typically set to `False` to allow for multiple + matches. For instance, the response from the LLM might only mention the entity + "Paris" once, but you'd still want to mark it every time it occurs in the + document. +- The case-sensitive matching is typically set to `False` to be robust against + case variances in the LLM's output. +- The `alignment_mode` argument is used to match entities as returned by the LLM + to the tokens from the original `Doc` - specifically it's used as argument in + the call to [`doc.char_span()`](/api/doc#char_span). The `"strict"` mode will + only keep spans that strictly adhere to the given token boundaries. + `"contract"` will only keep those tokens that are fully within the given + range, e.g. reducing `"New Y"` to `"New"`. Finally, `"expand"` will expand the + span to the next token boundaries, e.g. expanding `"New Y"` out to + `"New York"`. + +To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +```yaml +- text: Jack and Jill went up the hill. + entities: + PERSON: + - Jack + - Jill + LOCATION: + - hill +- text: Jack fell down and broke his crown. + entities: + PERSON: + - Jack +``` + +```ini +[components.llm.task] +@llm_tasks = "spacy.NER.v2" +labels = PERSON,ORGANISATION,LOCATION +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "ner_examples.yml" +``` + +> Label descriptions can also be used with explicit examples to give as much +> info to the LLM model as possible. + +You can also write definitions for each label and provide them via the +`label_definitions` argument. This lets you tell the LLM exactly what you're +looking for rather than relying on the LLM to interpret its task given just the +label name. Label descriptions are freeform so you can write whatever you want +here, but through some experiments a brief description along with some examples +and counter examples seems to work quite well. + +```ini +[components.llm.task] +@llm_tasks = "spacy.NER.v2" +labels = PERSON,SPORTS_TEAM +[components.llm.task.label_definitions] +PERSON = "Extract any named individual in the text." +SPORTS_TEAM = "Extract the names of any professional sports team. e.g. Golden State Warriors, LA Lakers, Man City, Real Madrid" +``` + +#### spacy.NER.v1 {id="ner-v1"} + +The original version of the built-in NER task supports both zero-shot and +few-shot prompting. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.NER.v1" +> labels = PERSON,ORGANISATION,LOCATION +> examples = null +> ``` + +| Argument | Description | +| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `labels` | Comma-separated list of labels. ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, defaults to `spacy.LowercaseNormalizer.v1`. ~~Optional[Callable[[str], str]]~~ | +| `alignment_mode` | Alignment mode in case the LLM returns entities that do not align with token boundaries. Options are `"strict"`, `"contract"` or `"expand"`. Defaults to `"contract"`. ~~str~~ | +| `case_sensitive_matching` | Whether to search without case sensitivity. Defaults to `False`. ~~bool~~ | +| `single_match` | Whether to match an entity in the LLM's response only once (the first hit) or multiple times. Defaults to `False`. ~~bool~~ | + +The NER task implementation doesn't currently ask the LLM for specific offsets, +but simply expects a list of strings that represent the enties in the document. +This means that a form of string matching is required. This can be configured by +the following parameters: + +- The `single_match` parameter is typically set to `False` to allow for multiple + matches. For instance, the response from the LLM might only mention the entity + "Paris" once, but you'd still want to mark it every time it occurs in the + document. +- The case-sensitive matching is typically set to `False` to be robust against + case variances in the LLM's output. +- The `alignment_mode` argument is used to match entities as returned by the LLM + to the tokens from the original `Doc` - specifically it's used as argument in + the call to [`doc.char_span()`](/api/doc#char_span). The `"strict"` mode will + only keep spans that strictly adhere to the given token boundaries. + `"contract"` will only keep those tokens that are fully within the given + range, e.g. reducing `"New Y"` to `"New"`. Finally, `"expand"` will expand the + span to the next token boundaries, e.g. expanding `"New Y"` out to + `"New York"`. + +To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +```yaml +- text: Jack and Jill went up the hill. + entities: + PERSON: + - Jack + - Jill + LOCATION: + - hill +- text: Jack fell down and broke his crown. + entities: + PERSON: + - Jack +``` + +```ini +[components.llm.task] +@llm_tasks = "spacy.NER.v1" +labels = PERSON,ORGANISATION,LOCATION +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "ner_examples.yml" +``` + +#### spacy.SpanCat.v2 {id="spancat-v2"} + +The built-in SpanCat task is a simple adaptation of the NER task to support +overlapping entities and store its annotations in `doc.spans`. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.SpanCat.v2" +> labels = ["PERSON", "ORGANISATION", "LOCATION"] +> examples = null +> ``` + +| Argument | Description | +| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `template` | Custom prompt template to send to LLM model. Default templates for each task are located in the `spacy_llm/tasks/templates` directory. Defaults to [`spancat.v2.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/spancat.v2.jinja). ~~str~~ | +| `label_definitions` | Optional dict mapping a label to a description of that label. These descriptions are added to the prompt to help instruct the LLM on what to extract. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| `spans_key` | Key of the `Doc.spans` dict to save the spans under. Defaults to `"sc"`. ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, defaults to `spacy.LowercaseNormalizer.v1`. ~~Optional[Callable[[str], str]]~~ | +| `alignment_mode` | Alignment mode in case the LLM returns entities that do not align with token boundaries. Options are `"strict"`, `"contract"` or `"expand"`. Defaults to `"contract"`. ~~str~~ | +| `case_sensitive_matching` | Whether to search without case sensitivity. Defaults to `False`. ~~bool~~ | +| `single_match` | Whether to match an entity in the LLM's response only once (the first hit) or multiple times. Defaults to `False`. ~~bool~~ | + +Except for the `spans_key` parameter, the SpanCat task reuses the configuration +from the NER task. Refer to [its documentation](#ner-v2) for more insight. + +#### spacy.SpanCat.v1 {id="spancat-v1"} + +The original version of the built-in SpanCat task is a simple adaptation of the +v1 NER task to support overlapping entities and store its annotations in +`doc.spans`. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.SpanCat.v1" +> labels = PERSON,ORGANISATION,LOCATION +> examples = null +> ``` + +| Argument | Description | +| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `labels` | Comma-separated list of labels. ~~str~~ | +| `spans_key` | Key of the `Doc.spans` dict to save the spans under. Defaults to `"sc"`. ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, defaults to `spacy.LowercaseNormalizer.v1`. ~~Optional[Callable[[str], str]]~~ | +| `alignment_mode` | Alignment mode in case the LLM returns entities that do not align with token boundaries. Options are `"strict"`, `"contract"` or `"expand"`. Defaults to `"contract"`. ~~str~~ | +| `case_sensitive_matching` | Whether to search without case sensitivity. Defaults to `False`. ~~bool~~ | +| `single_match` | Whether to match an entity in the LLM's response only once (the first hit) or multiple times. Defaults to `False`. ~~bool~~ | + +Except for the `spans_key` parameter, the SpanCat task reuses the configuration +from the NER task. Refer to [its documentation](#ner-v1) for more insight. + +#### spacy.TextCat.v3 {id="textcat-v3"} + +Version 3 (the most recent) of the built-in TextCat task supports both zero-shot +and few-shot prompting. It allows setting definitions of labels. Those +definitions are included in the prompt. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.TextCat.v3" +> labels = ["COMPLIMENT", "INSULT"] +> label_definitions = { +> "COMPLIMENT": "a polite expression of praise or admiration.", +> "INSULT": "a disrespectful or scornfully abusive remark or act." +> } +> examples = null +> ``` + +| Argument | Description | +| ------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `label_definitions` | Dictionary of label definitions. Included in the prompt, if set. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| `template` | Custom prompt template to send to LLM model. Default templates for each task are located in the `spacy_llm/tasks/templates` directory. Defaults to [`textcat.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/textcat.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, falls back to `spacy.LowercaseNormalizer.v1`. Defaults to `None`. ~~Optional[Callable[[str], str]]~~ | +| `exclusive_classes` | If set to `True`, only one label per document should be valid. If set to `False`, one document can have multiple labels. Defaults to `False`. ~~bool~~ | +| `allow_none` | When set to `True`, allows the LLM to not return any of the given label. The resulting dict in `doc.cats` will have `0.0` scores for all labels. Defaults to `True`. ~~bool~~ | +| `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Defaults to `False`. ~~bool~~ | + +To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +```json +[ + { + "text": "You look great!", + "answer": "Compliment" + }, + { + "text": "You are not very clever at all.", + "answer": "Insult" + } +] +``` + +```ini +[components.llm.task] +@llm_tasks = "spacy.TextCat.v3" +labels = ["COMPLIMENT", "INSULT"] +label_definitions = { + "COMPLIMENT": "a polite expression of praise or admiration.", + "INSULT": "a disrespectful or scornfully abusive remark or act." +} +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "textcat_examples.json" +``` + +#### spacy.TextCat.v2 {id="textcat-v2"} + +Version 2 of the built-in TextCat task supports both zero-shot and few-shot +prompting and includes an improved prompt template. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.TextCat.v2" +> labels = ["COMPLIMENT", "INSULT"] +> examples = null +> ``` + +| Argument | Description | +| ------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `template` | Custom prompt template to send to LLM model. Default templates for each task are located in the `spacy_llm/tasks/templates` directory. Defaults to [`textcat.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/textcat.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, falls back to `spacy.LowercaseNormalizer.v1`. ~~Optional[Callable[[str], str]]~~ | +| `exclusive_classes` | If set to `True`, only one label per document should be valid. If set to `False`, one document can have multiple labels. Defaults to `False`. ~~bool~~ | +| `allow_none` | When set to `True`, allows the LLM to not return any of the given label. The resulting dict in `doc.cats` will have `0.0` scores for all labels. Defaults to `True`. ~~bool~~ | +| `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Defaults to `False`. ~~bool~~ | + +To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +```json +[ + { + "text": "You look great!", + "answer": "Compliment" + }, + { + "text": "You are not very clever at all.", + "answer": "Insult" + } +] +``` + +```ini +[components.llm.task] +@llm_tasks = "spacy.TextCat.v2" +labels = ["COMPLIMENT", "INSULT"] +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "textcat_examples.json" +``` + +#### spacy.TextCat.v1 {id="textcat-v1"} + +Version 1 of the built-in TextCat task supports both zero-shot and few-shot +prompting. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.TextCat.v1" +> labels = COMPLIMENT,INSULT +> examples = null +> ``` + +| Argument | Description | +| ------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `labels` | Comma-separated list of labels. ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Deafults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, falls back to `spacy.LowercaseNormalizer.v1`. ~~Optional[Callable[[str], str]]~~ | +| `exclusive_classes` | If set to `True`, only one label per document should be valid. If set to `False`, one document can have multiple labels. Deafults to `False`. ~~bool~~ | +| `allow_none` | When set to `True`, allows the LLM to not return any of the given label. The resulting dict in `doc.cats` will have `0.0` scores for all labels. Deafults to `True`. ~~bool~~ | +| `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Deafults to `False`. ~~bool~~ | + +To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +```json +[ + { + "text": "You look great!", + "answer": "Compliment" + }, + { + "text": "You are not very clever at all.", + "answer": "Insult" + } +] +``` + +```ini +[components.llm.task] +@llm_tasks = "spacy.TextCat.v2" +labels = COMPLIMENT,INSULT +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "textcat_examples.json" +``` + +#### spacy.REL.v1 {id="rel-v1"} + +The built-in REL task supports both zero-shot and few-shot prompting. It relies +on an upstream NER component for entities extraction. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.REL.v1" +> labels = ["LivesIn", "Visits"] +> ``` + +| Argument | Description | +| ------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `template` | Custom prompt template to send to LLM model. Default templates for each task are located in the `spacy_llm/tasks/templates` directory. Defaults to [`rel.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/rel.jinja). ~~str~~ | +| `label_description` | Dictionary providing a description for each relation label. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, falls back to `spacy.LowercaseNormalizer.v1`. Defaults to `None`. ~~Optional[Callable[[str], str]]~~ | +| `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Defaults to `False`. ~~bool~~ | + +To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +```json +{"text": "Laura bought a house in Boston with her husband Mark.", "ents": [{"start_char": 0, "end_char": 5, "label": "PERSON"}, {"start_char": 24, "end_char": 30, "label": "GPE"}, {"start_char": 48, "end_char": 52, "label": "PERSON"}], "relations": [{"dep": 0, "dest": 1, "relation": "LivesIn"}, {"dep": 2, "dest": 1, "relation": "LivesIn"}]} +{"text": "Michael travelled through South America by bike.", "ents": [{"start_char": 0, "end_char": 7, "label": "PERSON"}, {"start_char": 26, "end_char": 39, "label": "LOC"}], "relations": [{"dep": 0, "dest": 1, "relation": "Visits"}]} +``` + +```ini +[components.llm.task] +@llm_tasks = "spacy.REL.v1" +labels = ["LivesIn", "Visits"] +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "rel_examples.jsonl" +``` + +Note: the REL task relies on pre-extracted entities to make its prediction. +Hence, you'll need to add a component that populates `doc.ents` with recognized +spans to your spaCy pipeline and put it _before_ the REL component. + +#### spacy.Lemma.v1 {id="lemma-v1"} + +The `Lemma.v1` task lemmatizes the provided text and updates the `lemma_` +attribute in the doc's tokens accordingly. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.Lemma.v1" +> examples = null +> ``` + +| Argument | Description | +| ---------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `template` | Custom prompt template to send to LLM model. Default templates for each task are located in the `spacy_llm/tasks/templates` directory. Defaults to [lemma.jinja](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/lemma.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | + +`Lemma.v1` prompts the LLM to lemmatize the passed text and return the +lemmatized version as a list of tokens and their corresponding lemma. E. g. the +text `I'm buying ice cream for my friends` should invoke the response + +``` +I: I +'m: be +buying: buy +ice: ice +cream: cream +for: for +my: my +friends: friend +.: . +``` + +If for any given text/doc instance the number of lemmas returned by the LLM +doesn't match the number of tokens from the pipeline's tokenizer, no lemmas are +stored in the corresponding doc's tokens. Otherwise the tokens `.lemma_` +property is updated with the lemma suggested by the LLM. + +To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +```yaml +- text: I'm buying ice cream. + lemmas: + - 'I': 'I' + - "'m": 'be' + - 'buying': 'buy' + - 'ice': 'ice' + - 'cream': 'cream' + - '.': '.' + +- text: I've watered the plants. + lemmas: + - 'I': 'I' + - "'ve": 'have' + - 'watered': 'water' + - 'the': 'the' + - 'plants': 'plant' + - '.': '.' +``` + +```ini +[components.llm.task] +@llm_tasks = "spacy.Lemma.v1" +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "lemma_examples.yml" +``` + +#### spacy.Sentiment.v1 {id="sentiment-v1"} + +Performs sentiment analysis on provided texts. Scores between 0 and 1 are stored +in `Doc._.sentiment` - the higher, the more positive. Note in cases of parsing +issues (e. g. in case of unexpected LLM responses) the value might be `None`. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.Sentiment.v1" +> examples = null +> ``` + +| Argument | Description | +| ---------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `template` | Custom prompt template to send to LLM model. Default templates for each task are located in the `spacy_llm/tasks/templates` directory. Defaults to [sentiment.jinja](./spacy_llm/tasks/templates/sentiment.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `field` | Name of extension attribute to store summary in (i. e. the summary will be available in `doc._.{field}`). Defaults to `sentiment`. ~~str~~ | + +To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +```yaml +- text: 'This is horrifying.' + score: 0 +- text: 'This is underwhelming.' + score: 0.25 +- text: 'This is ok.' + score: 0.5 +- text: "I'm looking forward to this!" + score: 1.0 +``` + +```ini +[components.llm.task] +@llm_tasks = "spacy.Sentiment.v1" +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "sentiment_examples.yml" +``` + +#### spacy.NoOp.v1 {id="noop-v1"} + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.NoOp.v1" +> ``` + +This task is only useful for testing - it tells the LLM to do nothing, and does +not set any fields on the `docs`. + +### Models {id="models"} + +A _model_ defines which LLM model to query, and how to query it. It can be a +simple function taking a collection of prompts (consistent with the output type +of `task.generate_prompts()`) and returning a collection of responses +(consistent with the expected input of `parse_responses`). Generally speaking, +it's a function of type `Callable[[Iterable[Any]], Iterable[Any]]`, but specific +implementations can have other signatures, like +`Callable[[Iterable[str]], Iterable[str]]`. + +#### API Keys {id="api-keys"} + +Note that when using hosted services, you have to ensure that the proper API +keys are set as environment variables as described by the corresponding +provider's documentation. + +E. g. when using OpenAI, you have to get an API key from openai.com, and ensure +that the keys are set as environmental variables: + +```shell +export OPENAI_API_KEY="sk-..." +export OPENAI_API_ORG="org-..." +``` + +For Cohere it's + +```shell +export CO_API_KEY="..." +``` + +and for Anthropic + +```shell +export ANTHROPIC_API_KEY="..." +``` + +#### spacy.GPT-4.v1 {id="gpt-4"} + +OpenAI's `gpt-4` model family. + +> #### Example config: +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.GPT-4.v1" +> name = "gpt-4" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"gpt-4"`. ~~Literal["gpt-4", "gpt-4-0314", "gpt-4-32k", "gpt-4-32k-0314"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.GPT-3-5.v1 {id="gpt-3-5"} + +OpenAI's `gpt-3-5` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.GPT-3-5.v1" +> name = "gpt-3.5-turbo" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"gpt-3.5-turbo"`. ~~Literal["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-0613-16k"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Text-Davinci.v1 {id="text-davinci"} + +OpenAI's `text-davinci` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Text-Davinci.v1" +> name = "text-davinci-003" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"text-davinci-003"`. ~~Literal["text-davinci-002", "text-davinci-003"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Code-Davinci.v1 {id="code-davinci"} + +OpenAI's `code-davinci` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Code-Davinci.v1" +> name = "code-davinci-002" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"code-davinci-002"`. ~~Literal["code-davinci-002"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Text-Curie.v1 {id="text-curie"} + +OpenAI's `text-curie` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Text-Curie.v1" +> name = "text-curie-001" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"text-curie-001"`. ~~Literal["text-curie-001"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Text-Babbage.v1 {id="text-babbage"} + +OpenAI's `text-babbage` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Text-Babbage.v1" +> name = "text-babbage-001" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"text-babbage-001"`. ~~Literal["text-babbage-001"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Text-Ada.v1 {id="text-ada"} + +OpenAI's `text-ada` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Text-Ada.v1" +> name = "text-ada-001" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"text-ada-001"`. ~~Literal["text-ada-001"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Davinci.v1 {id="davinci"} + +OpenAI's `davinci` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Davinci.v1 " +> name = "davinci" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"davinci"`. ~~Literal["davinci"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Curie.v1 {id="curie"} + +OpenAI's `curie` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Curie.v1 " +> name = "curie" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"curie"`. ~~Literal["curie"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Babbage.v1 {id="babbage"} + +OpenAI's `babbage` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Babbage.v1 " +> name = "babbage" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"babbage"`. ~~Literal["babbage"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Ada.v1 {id="ada"} + +OpenAI's `ada` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Ada.v1 " +> name = "ada" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"ada"`. ~~Literal["ada"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Command.v1 {id="command"} + +Cohere's `command` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Command.v1 " +> name = "command" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"command"`. ~~Literal["command", "command-light", "command-light-nightly", "command-nightly"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Claude-2.v1 {id="claude-2"} + +Anthropic's `claude-2` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Claude-2.v1 " +> name = "claude-2" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"claude-2"`. ~~Literal["claude-2", "claude-2-100k"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Claude-1.v1 {id="claude-1"} + +Anthropic's `claude-1` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Claude-1.v1 " +> name = "claude-1" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"claude-1"`. ~~Literal["claude-1", "claude-1-100k"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Claude-instant-1.v1 {id="claude-instant-1"} + +Anthropic's `claude-instant-1` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Claude-instant-1.v1 " +> name = "claude-instant-1" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"claude-instant-1"`. ~~Literal["claude-instant-1", "claude-instant-1-100k"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Claude-instant-1-1.v1 {id="claude-instant-1-1"} + +Anthropic's `claude-instant-1.1` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Claude-instant-1-1.v1 " +> name = "claude-instant-1.1" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"claude-instant-1.1"`. ~~Literal["claude-instant-1.1", "claude-instant-1.1-100k"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Claude-1-0.v1 {id="claude-1-0"} + +Anthropic's `claude-1.0` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Claude-1-0.v1 " +> name = "claude-1.0" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"claude-1.0"`. ~~Literal["claude-1.0"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Claude-1-2.v1 {id="claude-1-2"} + +Anthropic's `claude-1.2` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Claude-1-2.v1 " +> name = "claude-1.2" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"claude-1.2"`. ~~Literal["claude-1.2"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Claude-1-3.v1 {id="claude-1-3"} + +Anthropic's `claude-1.3` model family. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Claude-1-3.v1 " +> name = "claude-1.3" +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"claude-1.3"`. ~~Literal["claude-1.3", "claude-1.3-100k"]~~ | +| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | +| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | + +#### spacy.Dolly.v1 {id="dolly"} + +To use this model, ideally you have a GPU enabled and have installed +`transformers`, `torch` and CUDA in your virtual environment. This allows you to +have the setting `device=cuda:0` in your config, which ensures that the model is +loaded entirely on the GPU (and fails otherwise). + +You can do so with + +```shell +python -m pip install "spacy-llm[transformers]" "transformers[sentencepiece]" +``` + +If you don't have access to a GPU, you can install `accelerate` and +set`device_map=auto` instead, but be aware that this may result in some layers +getting distributed to the CPU or even the hard drive, which may ultimately +result in extremely slow queries. + +```shell +python -m pip install "accelerate>=0.16.0,<1.0" +``` + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Dolly.v1" +> name = "dolly-v2-3b" +> ``` + +| Argument | Description | +| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | The name of a Dolly model that is supported (e. g. "dolly-v2-3b" or "dolly-v2-12b"). ~~Literal["dolly-v2-3b", "dolly-v2-7b", "dolly-v2-12b"]~~ | +| `config_init` | Further configuration passed on to the construction of the model with `transformers.pipeline()`. Defaults to `{}`. ~~Dict[str, Any]~~ | +| `config_run` | Further configuration used during model inference. Defaults to `{}`. ~~Dict[str, Any]~~ | + +Supported models (see the +[Databricks models page](https://huggingface.co/databricks) on Hugging Face for +details): + +- `"databricks/dolly-v2-3b"` +- `"databricks/dolly-v2-7b"` +- `"databricks/dolly-v2-12b"` + +Note that Hugging Face will download this model the first time you use it - you +can +[define the cached directory](https://huggingface.co/docs/huggingface_hub/main/en/guides/manage-cache) +by setting the environmental variable `HF_HOME`. + +#### spacy.Llama2.v1 {id="llama2"} + +To use this model, ideally you have a GPU enabled and have installed +`transformers`, `torch` and CUDA in your virtual environment. This allows you to +have the setting `device=cuda:0` in your config, which ensures that the model is +loaded entirely on the GPU (and fails otherwise). + +You can do so with + +```shell +python -m pip install "spacy-llm[transformers]" "transformers[sentencepiece]" +``` + +If you don't have access to a GPU, you can install `accelerate` and +set`device_map=auto` instead, but be aware that this may result in some layers +getting distributed to the CPU or even the hard drive, which may ultimately +result in extremely slow queries. + +```shell +python -m pip install "accelerate>=0.16.0,<1.0" +``` + +Note that the chat models variants of Llama 2 are currently not supported. This +is because they need a particular prompting setup and don't add any discernible +benefits in the use case of `spacy-llm` (i. e. no interactive chat) compared the +completion model variants. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Llama2.v1" +> name = "llama2-7b-hf" +> ``` + +| Argument | Description | +| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `name` | The name of a Llama 2 model variant that is supported. Defaults to `"Llama-2-7b-hf"`. ~~Literal["Llama-2-7b-hf", "Llama-2-13b-hf", "Llama-2-70b-hf"]~~ | +| `config_init` | Further configuration passed on to the construction of the model with `transformers.pipeline()`. Defaults to `{}`. ~~Dict[str, Any]~~ | +| `config_run` | Further configuration used during model inference. Defaults to `{}`. ~~Dict[str, Any]~~ | + +Note that Hugging Face will download this model the first time you use it - you +can +[define the cache directory](https://huggingface.co/docs/huggingface_hub/main/en/guides/manage-cache) +by setting the environmental variable `HF_HOME`. + +#### spacy.Falcon.v1 {id="falcon"} + +To use this model, ideally you have a GPU enabled and have installed +`transformers`, `torch` and CUDA in your virtual environment. This allows you to +have the setting `device=cuda:0` in your config, which ensures that the model is +loaded entirely on the GPU (and fails otherwise). + +You can do so with + +```shell +python -m pip install "spacy-llm[transformers]" "transformers[sentencepiece]" +``` + +If you don't have access to a GPU, you can install `accelerate` and +set`device_map=auto` instead, but be aware that this may result in some layers +getting distributed to the CPU or even the hard drive, which may ultimately +result in extremely slow queries. + +```shell +python -m pip install "accelerate>=0.16.0,<1.0" +``` + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.Falcon.v1" +> name = "falcon-7b" +> ``` + +| Argument | Description | +| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `name` | The name of a Falcon model variant that is supported. Defaults to `"7b-instruct"`. ~~Literal["falcon-rw-1b", "falcon-7b", "falcon-7b-instruct", "falcon-40b-instruct"]~~ | +| `config_init` | Further configuration passed on to the construction of the model with `transformers.pipeline()`. Defaults to `{}`. ~~Dict[str, Any]~~ | +| `config_run` | Further configuration used during model inference. Defaults to `{}`. ~~Dict[str, Any]~~ | + +Note that Hugging Face will download this model the first time you use it - you +can +[define the cache directory](https://huggingface.co/docs/huggingface_hub/main/en/guides/manage-cache) +by setting the environmental variable `HF_HOME`. + +#### spacy.StableLM.v1 {id="stablelm"} + +To use this model, ideally you have a GPU enabled and have installed +`transformers`, `torch` and CUDA in your virtual environment. + +You can do so with + +```shell +python -m pip install "spacy-llm[transformers]" "transformers[sentencepiece]" +``` + +If you don't have access to a GPU, you can install `accelerate` and +set`device_map=auto` instead, but be aware that this may result in some layers +getting distributed to the CPU or even the hard drive, which may ultimately +result in extremely slow queries. + +```shell +python -m pip install "accelerate>=0.16.0,<1.0" +``` + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.StableLM.v1" +> name = "stablelm-tuned-alpha-7b" +> ``` + +| Argument | Description | +| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | The name of a StableLM model that is supported (e. g. "stablelm-tuned-alpha-7b"). ~~Literal["stablelm-base-alpha-3b", "stablelm-base-alpha-7b", "stablelm-tuned-alpha-3b", "stablelm-tuned-alpha-7b"]~~ | +| `config_init` | Further configuration passed on to the construction of the model with `transformers.AutoModelForCausalLM.from_pretrained()`. Defaults to `{}`. ~~Dict[str, Any]~~ | +| `config_run` | Further configuration used during model inference. Defaults to `{}`. ~~Dict[str, Any]~~ | + +See the +[Stability AI StableLM GitHub repo](https://github.com/Stability-AI/StableLM/#stablelm-alpha) +for details. + +Note that Hugging Face will download this model the first time you use it - you +can +[define the cached directory](https://huggingface.co/docs/huggingface_hub/main/en/guides/manage-cache) +by setting the environmental variable `HF_HOME`. + +#### spacy.OpenLLaMA.v1 {id="openllama"} + +To use this model, ideally you have a GPU enabled and have installed + +- `transformers[sentencepiece]` +- `torch` +- CUDA in your virtual environment. + +You can do so with + +```shell +python -m pip install "spacy-llm[transformers]" "transformers[sentencepiece]" +``` + +If you don't have access to a GPU, you can install `accelerate` and +set`device_map=auto` instead, but be aware that this may result in some layers +getting distributed to the CPU or even the hard drive, which may ultimately +result in extremely slow queries. + +```shell +python -m pip install "accelerate>=0.16.0,<1.0" +``` + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.OpenLLaMA.v1" +> name = "open_llama_3b" +> ``` + +| Argument | Description | +| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | The name of a OpenLLaMA model that is supported. ~~Literal["open_llama_3b", "open_llama_7b", "open_llama_7b_v2", "open_llama_13b"]~~ | +| `config_init` | Further configuration passed on to the construction of the model with `transformers.AutoModelForCausalLM.from_pretrained()`. Defaults to `{}`. ~~Dict[str, Any]~~ | +| `config_run` | Further configuration used during model inference. Defaults to `{}`. ~~Dict[str, Any]~~ | + +See the +[OpenLM Research OpenLLaMA GitHub repo](https://github.com/openlm-research/open_llama) +for details. + +Note that Hugging Face will download this model the first time you use it - you +can +[define the cached directory](https://huggingface.co/docs/huggingface_hub/main/en/guides/manage-cache) +by setting the environmental variable `HF_HOME`. + +#### LangChain models {id="langchain-models"} + +To use [LangChain](https://github.com/hwchase17/langchain) for the API retrieval +part, make sure you have installed it first: + +```shell +python -m pip install "langchain==0.0.191" +# Or install with spacy-llm directly +python -m pip install "spacy-llm[extras]" +``` + +Note that LangChain currently only supports Python 3.9 and beyond. + +LangChain models in `spacy-llm` work slightly differently. `langchain`'s models +are parsed automatically, each LLM class in `langchain` has one entry in +`spacy-llm`'s registry. As `langchain`'s design has one class per API and not +per model, this results in registry entries like `langchain.OpenAI.v1` - i. e. +there is one registry entry per API and not per model (family), as for the REST- +and HuggingFace-based entries. + +The name of the model to be used has to be passed in via the `name` attribute. + +> #### Example config +> +> ```ini +> [components.llm.model] +> @llm_models = "langchain.OpenAI.v1" +> name = "gpt-3.5-turbo" +> query = {"@llm_queries": "spacy.CallLangChain.v1"} +> config = {"temperature": 0.3} +> ``` + +| Argument | Description | +| -------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | The name of a mdodel supported by LangChain for this API. ~~str~~ | +| `config` | Configuration passed on to the LangChain model. Defaults to `{}`. ~~Dict[Any, Any]~~ | +| `query` | Function that executes the prompts. If `None`, defaults to `spacy.CallLangChain.v1`. ~~Optional[Callable[["langchain.llms.BaseLLM", Iterable[Any]], Iterable[Any]]]~~ | + +The default `query` (`spacy.CallLangChain.v1`) executes the prompts by running +`model(text)` for each given textual prompt. + +### Cache {id="cache"} + +Interacting with LLMs, either through an external API or a local instance, is +costly. Since developing an NLP pipeline generally means a lot of exploration +and prototyping, `spacy-llm` implements a built-in cache to avoid reprocessing +the same documents at each run that keeps batches of documents stored on disk. + +> #### Example config +> +> ```ini +> [components.llm.cache] +> @llm_misc = "spacy.BatchCache.v1" +> path = "path/to/cache" +> batch_size = 64 +> max_batches_in_mem = 4 +> ``` + +| Argument | Description | +| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | +| `path` | Cache directory. If `None`, no caching is performed, and this component will act as a NoOp. Defaults to `None`. ~~Optional[Union[str, Path]]~~ | +| `batch_size` | Number of docs in one batch (file). Once a batch is full, it will be peristed to disk. Defaults to 64. ~~int~~ | +| `max_batches_in_mem` | Max. number of batches to hold in memory. Allows you to limit the effect on your memory if you're handling a lot of docs. Defaults to 4. ~~int~~ | + +When retrieving a document, the `BatchCache` will first figure out what batch +the document belongs to. If the batch isn't in memory it will try to load the +batch from disk and then move it into memory. + +Note that since the cache is generated by a registered function, you can also +provide your own registered function returning your own cache implementation. If +you wish to do so, ensure that your cache object adheres to the `Protocol` +defined in `spacy_llm.ty.Cache`. + +### Various functions {id="various-functions"} + +#### spacy.FewShotReader.v1 {id="fewshotreader-v1"} + +This function is registered in spaCy's `misc` registry, and reads in examples +from a `.yml`, `.yaml`, `.json` or `.jsonl` file. It uses +[`srsly`](https://github.com/explosion/srsly) to read in these files and parses +them depending on the file extension. + +> #### Example config +> +> ```ini +> [components.llm.task.examples] +> @misc = "spacy.FewShotReader.v1" +> path = "ner_examples.yml" +> ``` + +| Argument | Description | +| -------- | ----------------------------------------------------------------------------------------------- | +| `path` | Path to an examples file with suffix `.yml`, `.yaml`, `.json` or `.jsonl`. ~~Union[str, Path]~~ | + +#### spacy.FileReader.v1 {id="filereader-v1"} + +This function is registered in spaCy's `misc` registry, and reads a file +provided to the `path` to return a `str` representation of its contents. This +function is typically used to read +[Jinja](https://jinja.palletsprojects.com/en/3.1.x/) files containing the prompt +template. + +> #### Example config +> +> ```ini +> [components.llm.task.template] +> @misc = "spacy.FileReader.v1" +> path = "ner_template.jinja2" +> ``` + +| Argument | Description | +| -------- | ------------------------------------------------- | +| `path` | Path to the file to be read. ~~Union[str, Path]~~ | + +#### Normalizer functions {id="normalizer-functions"} + +These functions provide simple normalizations for string comparisons, e.g. +between a list of specified labels and a label given in the raw text of the LLM +response. They are registered in spaCy's `misc` registry and have the signature +`Callable[[str], str]`. + +- `spacy.StripNormalizer.v1`: only apply `text.strip()` +- `spacy.LowercaseNormalizer.v1`: applies `text.strip().lower()` to compare + strings in a case-insensitive way. diff --git a/website/docs/usage/large-language-models.mdx b/website/docs/usage/large-language-models.mdx new file mode 100644 index 000000000..3c2c52c68 --- /dev/null +++ b/website/docs/usage/large-language-models.mdx @@ -0,0 +1,512 @@ +--- +title: Large Language Models +teaser: Integrating LLMs into structured NLP pipelines +menu: + - ['Motivation', 'motivation'] + - ['Install', 'install'] + - ['Usage', 'usage'] + - ['Logging', 'logging'] + - ['API', 'api'] + - ['Tasks', 'tasks'] + - ['Models', 'models'] +--- + +[The spacy-llm package](https://github.com/explosion/spacy-llm) integrates Large +Language Models (LLMs) into spaCy pipelines, featuring a modular system for +**fast prototyping** and **prompting**, and turning unstructured responses into +**robust outputs** for various NLP tasks, **no training data** required. + +- Serializable `llm` **component** to integrate prompts into your pipeline +- **Modular functions** to define the [**task**](#tasks) (prompting and parsing) + and [**model**](#models) (model to use) +- Support for **hosted APIs** and self-hosted **open-source models** +- Integration with [`LangChain`](https://github.com/hwchase17/langchain) +- Access to + **[OpenAI API](https://platform.openai.com/docs/api-reference/introduction)**, + including GPT-4 and various GPT-3 models +- Built-in support for various **open-source** models hosted on + [Hugging Face](https://huggingface.co/) +- Usage examples for standard NLP tasks such as **Named Entity Recognition** and + **Text Classification** +- Easy implementation of **your own functions** via the + [registry](/api/top-level#registry) for custom prompting, parsing and model + integrations + +## Motivation {id="motivation"} + +Large Language Models (LLMs) feature powerful natural language understanding +capabilities. With only a few (and sometimes no) examples, an LLM can be +prompted to perform custom NLP tasks such as text categorization, named entity +recognition, coreference resolution, information extraction and more. + +Supervised learning is much worse than LLM prompting for prototyping, but for +many tasks it's much better for production. A transformer model that runs +comfortably on a single GPU is extremely powerful, and it's likely to be a +better choice for any task for which you have a well-defined output. You train +the model with anything from a few hundred to a few thousand labelled examples, +and it will learn to do exactly that. Efficiency, reliability and control are +all better with supervised learning, and accuracy will generally be higher than +LLM prompting as well. + +`spacy-llm` lets you have **the best of both worlds**. You can quickly +initialize a pipeline with components powered by LLM prompts, and freely mix in +components powered by other approaches. As your project progresses, you can look +at replacing some or all of the LLM-powered components as you require. + +Of course, there can be components in your system for which the power of an LLM +is fully justified. If you want a system that can synthesize information from +multiple documents in subtle ways and generate a nuanced summary for you, bigger +is better. However, even if your production system needs an LLM for some of the +task, that doesn't mean you need an LLM for all of it. Maybe you want to use a +cheap text classification model to help you find the texts to summarize, or +maybe you want to add a rule-based system to sanity check the output of the +summary. These before-and-after tasks are much easier with a mature and +well-thought-out library, which is exactly what spaCy provides. + +## Install {id="install"} + +`spacy-llm` will be installed automatically in future spaCy versions. For now, +you can run the following in the same virtual environment where you already have +`spacy` [installed](/usage). + +> ⚠️ This package is still experimental and it is possible that changes made to +> the interface will be breaking in minor version updates. + +```bash +python -m pip install spacy-llm +``` + +## Usage {id="usage"} + +The task and the model have to be supplied to the `llm` pipeline component using +the [config system](/api/data-formats#config). This package provides various +built-in functionality, as detailed in the [API](#-api) documentation. + +### Example 1: Add a text classifier using a GPT-3 model from OpenAI {id="example-1"} + +Create a new API key from openai.com or fetch an existing one, and ensure the +keys are set as environmental variables. For more background information, see +the [OpenAI](/api/large-language-models#gpt-3-5) section. + +Create a config file `config.cfg` containing at least the following (or see the +full example +[here](https://github.com/explosion/spacy-llm/tree/main/usage_examples/textcat_openai)): + +```ini +[nlp] +lang = "en" +pipeline = ["llm"] + +[components] + +[components.llm] +factory = "llm" + +[components.llm.task] +@llm_tasks = "spacy.TextCat.v2" +labels = ["COMPLIMENT", "INSULT"] + +[components.llm.model] +@llm_models = "spacy.GPT-3-5.v1" +config = {"temperature": 0.3} +``` + +Now run: + +```python +from spacy_llm.util import assemble + +nlp = assemble("config.cfg") +doc = nlp("You look gorgeous!") +print(doc.cats) +``` + +### Example 2: Add NER using an open-source model through Hugging Face {id="example-2"} + +To run this example, ensure that you have a GPU enabled, and `transformers`, +`torch` and CUDA installed. For more background information, see the +[DollyHF](/api/large-language-models#dolly) section. + +Create a config file `config.cfg` containing at least the following (or see the +full example +[here](https://github.com/explosion/spacy-llm/tree/main/usage_examples/ner_dolly)): + +```ini +[nlp] +lang = "en" +pipeline = ["llm"] + +[components] + +[components.llm] +factory = "llm" + +[components.llm.task] +@llm_tasks = "spacy.NER.v2" +labels = ["PERSON", "ORGANISATION", "LOCATION"] + +[components.llm.model] +@llm_models = "spacy.Dolly.v1" +# For better performance, use dolly-v2-12b instead +name = "dolly-v2-3b" +``` + +Now run: + +```python +from spacy_llm.util import assemble + +nlp = assemble("config.cfg") +doc = nlp("Jack and Jill rode up the hill in Les Deux Alpes") +print([(ent.text, ent.label_) for ent in doc.ents]) +``` + +Note that Hugging Face will download the `"databricks/dolly-v2-3b"` model the +first time you use it. You can +[define the cached directory](https://huggingface.co/docs/huggingface_hub/main/en/guides/manage-cache) +by setting the environmental variable `HF_HOME`. Also, you can upgrade the model +to be `"databricks/dolly-v2-12b"` for better performance. + +### Example 3: Create the component directly in Python {id="example-3"} + +The `llm` component behaves as any other component does, so adding it to an +existing pipeline follows the same pattern: + +```python +import spacy + +nlp = spacy.blank("en") +nlp.add_pipe( + "llm", + config={ + "task": { + "@llm_tasks": "spacy.NER.v2", + "labels": ["PERSON", "ORGANISATION", "LOCATION"] + }, + "model": { + "@llm_models": "spacy.gpt-3.5.v1", + }, + }, +) +nlp.initialize() +doc = nlp("Jack and Jill rode up the hill in Les Deux Alpes") +print([(ent.text, ent.label_) for ent in doc.ents]) +``` + +Note that for efficient usage of resources, typically you would use +[`nlp.pipe(docs)`](/api/language#pipe) with a batch, instead of calling +`nlp(doc)` with a single document. + +### Example 4: Implement your own custom task {id="example-4"} + +To write a [`task`](#tasks), you need to implement two functions: +`generate_prompts` that takes a list of [`Doc`](/api/doc) objects and transforms +them into a list of prompts, and `parse_responses` that transforms the LLM +outputs into annotations on the [`Doc`](/api/doc), e.g. entity spans, text +categories and more. + +To register your custom task, decorate a factory function using the +`spacy_llm.registry.llm_tasks` decorator with a custom name that you can refer +to in your config. + +> 📖 For more details, see the +> [**usage example on writing your own task**](https://github.com/explosion/spacy-llm/tree/main/usage_examples#writing-your-own-task) + +```python +from typing import Iterable, List +from spacy.tokens import Doc +from spacy_llm.registry import registry +from spacy_llm.util import split_labels + + +@registry.llm_tasks("my_namespace.MyTask.v1") +def make_my_task(labels: str, my_other_config_val: float) -> "MyTask": + labels_list = split_labels(labels) + return MyTask(labels=labels_list, my_other_config_val=my_other_config_val) + + +class MyTask: + def __init__(self, labels: List[str], my_other_config_val: float): + ... + + def generate_prompts(self, docs: Iterable[Doc]) -> Iterable[str]: + ... + + def parse_responses( + self, docs: Iterable[Doc], responses: Iterable[str] + ) -> Iterable[Doc]: + ... +``` + +```ini +# config.cfg (excerpt) +[components.llm.task] +@llm_tasks = "my_namespace.MyTask.v1" +labels = LABEL1,LABEL2,LABEL3 +my_other_config_val = 0.3 +``` + +## Logging {id="logging"} + +spacy-llm has a built-in logger that can log the prompt sent to the LLM as well +as its raw response. This logger uses the debug level and by default has a +`logging.NullHandler()` configured. + +In order to use this logger, you can setup a simple handler like this: + +```python +import logging +import spacy_llm + + +spacy_llm.logger.addHandler(logging.StreamHandler()) +spacy_llm.logger.setLevel(logging.DEBUG) +``` + +> NOTE: Any `logging` handler will work here so you probably want to use some +> sort of rotating `FileHandler` as the generated prompts can be quite long, +> especially for tasks with few-shot examples. + +Then when using the pipeline you'll be able to view the prompt and response. + +E.g. with the config and code from [Example 1](#example-1) above: + +```python +from spacy_llm.util import assemble + + +nlp = assemble("config.cfg") +doc = nlp("You look gorgeous!") +print(doc.cats) +``` + +You will see `logging` output similar to: + +``` +Generated prompt for doc: You look gorgeous! + +You are an expert Text Classification system. Your task is to accept Text as input +and provide a category for the text based on the predefined labels. + +Classify the text below to any of the following labels: COMPLIMENT, INSULT +The task is non-exclusive, so you can provide more than one label as long as +they're comma-delimited. For example: Label1, Label2, Label3. +Do not put any other text in your answer, only one or more of the provided labels with nothing before or after. +If the text cannot be classified into any of the provided labels, answer `==NONE==`. + +Here is the text that needs classification + + +Text: +''' +You look gorgeous! +''' + +Model response for doc: You look gorgeous! +COMPLIMENT +``` + +`print(doc.cats)` to standard output should look like: + +``` +{'COMPLIMENT': 1.0, 'INSULT': 0.0} +``` + +## API {id="api"} + +`spacy-llm` exposes a `llm` factory with +[configurable settings](/api/large-language-models#config). + +An `llm` component is defined by two main settings: + +- A [**task**](#tasks), defining the prompt to send to the LLM as well as the + functionality to parse the resulting response back into structured fields on + the [Doc](/api/doc) objects. +- A [**model**](#models) defining the model to use and how to connect to it. + Note that `spacy-llm` supports both access to external APIs (such as OpenAI) + as well as access to self-hosted open-source LLMs (such as using Dolly through + Hugging Face). + +Moreover, `spacy-llm` exposes a customizable [**caching**](#cache) functionality +to avoid running the same document through an LLM service (be it local or +through a REST API) more than once. + +Finally, you can choose to save a stringified version of LLM prompts/responses +within the `Doc.user_data["llm_io"]` attribute by setting `save_io` to `True`. +`Doc.user_data["llm_io"]` is a dictionary containing one entry for every LLM +component within the `nlp` pipeline. Each entry is itself a dictionary, with two +keys: `prompt` and `response`. + +A note on `validate_types`: by default, `spacy-llm` checks whether the +signatures of the `model` and `task` callables are consistent with each other +and emits a warning if they don't. `validate_types` can be set to `False` if you +want to disable this behavior. + +### Tasks {id="tasks"} + +A _task_ defines an NLP problem or question, that will be sent to the LLM via a +prompt. Further, the task defines how to parse the LLM's responses back into +structured information. All tasks are registered in the `llm_tasks` registry. + +Practically speaking, a task should adhere to the `Protocol` `LLMTask` defined +in [`ty.py`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/ty.py). +It needs to define a `generate_prompts` function and a `parse_responses` +function. + +| Task | Description | +| --------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| [`task.generate_prompts`](/api/large-language-models#task-generate-prompts) | Takes a collection of documents, and returns a collection of "prompts", which can be of type `Any`. | +| [`task.parse_responses`](/api/large-language-models#task-parse-responses) | Takes a collection of LLM responses and the original documents, parses the responses into structured information, and sets the annotations on the documents. | + +Moreover, the task may define an optional [`scorer` method](/api/scorer#score). +It should accept an iterable of `Example`s as input and return a score +dictionary. If the `scorer` method is defined, `spacy-llm` will call it to +evaluate the component. + +| Component | Description | +| ----------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [`spacy.Summarization.v1`](/api/large-language-models#summarization-v1) | The summarization task prompts the model for a concise summary of the provided text. | +| [`spacy.NER.v2`](/api/large-language-models#ner-v2) | The built-in NER task supports both zero-shot and few-shot prompting. This version also supports explicitly defining the provided labels with custom descriptions. | +| [`spacy.NER.v1`](/api/large-language-models#ner-v1) | The original version of the built-in NER task supports both zero-shot and few-shot prompting. | +| [`spacy.SpanCat.v2`](/api/large-language-models#spancat-v2) | The built-in SpanCat task is a simple adaptation of the NER task to support overlapping entities and store its annotations in `doc.spans`. | +| [`spacy.SpanCat.v1`](/api/large-language-models#spancat-v1) | The original version of the built-in SpanCat task is a simple adaptation of the v1 NER task to support overlapping entities and store its annotations in `doc.spans`. | +| [`spacy.TextCat.v3`](/api/large-language-models#textcat-v3) | Version 3 (the most recent) of the built-in TextCat task supports both zero-shot and few-shot prompting. It allows setting definitions of labels. | +| [`spacy.TextCat.v2`](/api/large-language-models#textcat-v2) | Version 2 of the built-in TextCat task supports both zero-shot and few-shot prompting and includes an improved prompt template. | +| [`spacy.TextCat.v1`](/api/large-language-models#textcat-v1) | Version 1 of the built-in TextCat task supports both zero-shot and few-shot prompting. | +| [`spacy.REL.v1`](/api/large-language-models#rel-v1) | The built-in REL task supports both zero-shot and few-shot prompting. It relies on an upstream NER component for entities extraction. | +| [`spacy.Lemma.v1`](/api/large-language-models#lemma-v1) | The `Lemma.v1` task lemmatizes the provided text and updates the `lemma_` attribute in the doc's tokens accordingly. | +| [`spacy.Sentiment.v1`](/api/large-language-models#sentiment-v1) | Performs sentiment analysis on provided texts. | +| [`spacy.NoOp.v1`](/api/large-language-models#noop-v1) | This task is only useful for testing - it tells the LLM to do nothing, and does not set any fields on the `docs`. | + +#### Providing examples for few-shot prompts {id="few-shot-prompts"} + +All built-in tasks support few-shot prompts, i. e. including examples in a +prompt. Examples can be supplied in two ways: (1) as a separate file containing +only examples or (2) by initializing `llm` with a `get_examples()` callback +(like any other pipeline component). + +##### (1) Few-shot example file + +A file containing examples for few-shot prompting can be configured like this: + +```ini +[components.llm.task] +@llm_tasks = "spacy.NER.v2" +labels = PERSON,ORGANISATION,LOCATION +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "ner_examples.yml" +``` + +The supplied file has to conform to the format expected by the required task +(see the task documentation further down). + +##### (2) Initializing the `llm` component with a `get_examples()` callback + +Alternatively, you can initialize your `nlp` pipeline by providing a +`get_examples` callback for [`nlp.initialize`](/api/language#initialize) and +setting `n_prompt_examples` to a positive number to automatically fetch a few +examples for few-shot learning. Set `n_prompt_examples` to `-1` to use all +examples as part of the few-shot learning prompt. + +```ini +[initialize.components.llm] +n_prompt_examples = 3 +``` + +### Model {id="models"} + +A _model_ defines which LLM model to query, and how to query it. It can be a +simple function taking a collection of prompts (consistent with the output type +of `task.generate_prompts()`) and returning a collection of responses +(consistent with the expected input of `parse_responses`). Generally speaking, +it's a function of type `Callable[[Iterable[Any]], Iterable[Any]]`, but specific +implementations can have other signatures, like +`Callable[[Iterable[str]], Iterable[str]]`. + +All built-in models are registered in `llm_models`. If no model is specified, +the repo currently connects to the `OpenAI` API by default using REST, and +accesses the `"gpt-3.5-turbo"` model. + +Currently three different approaches to use LLMs are supported: + +1. `spacy-llm`s native REST interface. This is the default for all hosted models + (e. g. OpenAI, Cohere, Anthropic, ...). +2. A HuggingFace integration that allows to run a limited set of HF models + locally. +3. A LangChain integration that allows to run any model supported by LangChain + (hosted or locally). + +Approaches 1. and 2 are the default for hosted model and local models, +respectively. Alternatively you can use LangChain to access hosted or local +models by specifying one of the models registered with the `langchain.` prefix. + + +_Why LangChain if there are also are a native REST and a HuggingFace interface? When should I use what?_ + +Third-party libraries like `langchain` focus on prompt management, integration +of many different LLM APIs, and other related features such as conversational +memory or agents. `spacy-llm` on the other hand emphasizes features we consider +useful in the context of NLP pipelines utilizing LLMs to process documents +(mostly) independent from each other. It makes sense that the feature sets of +such third-party libraries and `spacy-llm` aren't identical - and users might +want to take advantage of features not available in `spacy-llm`. + +The advantage of implementing our own REST and HuggingFace integrations is that +we can ensure a larger degree of stability and robustness, as we can guarantee +backwards-compatibility and more smoothly integrated error handling. + +If however there are features or APIs not natively covered by `spacy-llm`, it's +trivial to utilize LangChain to cover this - and easy to customize the prompting +mechanism, if so required. + + + + +Note that when using hosted services, you have to ensure that the [proper API +keys](/api/large-language-models#api-keys) are set as environment variables as described by the corresponding +provider's documentation. + + + +| Component | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| [`spacy.GPT-4.v1`](/api/large-language-models#gpt-4) | OpenAI’s `gpt-4` model family. | +| [`spacy.GPT-3-5.v1`](/api/large-language-models#gpt-3-5) | OpenAI’s `gpt-3-5` model family. | +| [`spacy.Text-Davinci.v1`](/api/large-language-models#text-davinci) | OpenAI’s `text-davinci` model family. | +| [`spacy.Code-Davinci.v1`](/api/large-language-models#code-davinci) | OpenAI’s `code-davinci` model family. | +| [`spacy.Text-Curie.v1`](/api/large-language-models#text-curie) | OpenAI’s `text-curie` model family. | +| [`spacy.Text-Babbage.v1`](/api/large-language-models#text-babbage) | OpenAI’s `text-babbage` model family. | +| [`spacy.Text-Ada.v1`](/api/large-language-models#text-ada) | OpenAI’s `text-ada` model family. | +| [`spacy.Davinci.v1`](/api/large-language-models#davinci) | OpenAI’s `davinci` model family. | +| [`spacy.Curie.v1`](/api/large-language-models#curie) | OpenAI’s `curie` model family. | +| [`spacy.Babbage.v1`](/api/large-language-models#babbage) | OpenAI’s `babbage` model family. | +| [`spacy.Ada.v1`](/api/large-language-models#ada) | OpenAI’s `ada` model family. | +| [`spacy.Command.v1`](/api/large-language-models#command) | Cohere’s `command` model family. | +| [`spacy.Claude-1.v1`](/api/large-language-models#claude-1) | Anthropic’s `claude-1` model family. | +| [`spacy.Claude-instant-1.v1`](/api/large-language-models#claude-instant-1) | Anthropic’s `claude-instant-1` model family. | +| [`spacy.Claude-instant-1-1.v1`](/api/large-language-models#claude-instant-1-1) | Anthropic’s `claude-instant-1.1` model family. | +| [`spacy.Claude-1-0.v1`](/api/large-language-models#claude-1-0) | Anthropic’s `claude-1.0` model family. | +| [`spacy.Claude-1-2.v1`](/api/large-language-models#claude-1-2) | Anthropic’s `claude-1.2` model family. | +| [`spacy.Claude-1-3.v1`](/api/large-language-models#claude-1-3) | Anthropic’s `claude-1.3` model family. | +| [`spacy.Dolly.v1`](/api/large-language-models#dolly) | Dolly models through [Databricks](https://huggingface.co/databricks) on HuggingFace. | +| [`spacy.Falcon.v1`](/api/large-language-models#falcon) | Falcon model through HuggingFace. | +| [`spacy.StableLM.v1`](/api/large-language-models#stablelm) | StableLM model through HuggingFace. | +| [`spacy.OpenLLaMA.v1`](/api/large-language-models#openllama) | OpenLLaMA model through HuggingFace. | +| [LangChain models](/api/large-language-models#langchain-models) | LangChain models for API retrieval. | + +### Cache {id="cache"} + +Interacting with LLMs, either through an external API or a local instance, is +costly. Since developing an NLP pipeline generally means a lot of exploration +and prototyping, `spacy-llm` implements a built-in +[cache](/api/large-language-models#cache) to avoid reprocessing the same +documents at each run that keeps batches of documents stored on disk. + +### Various functions {id="various-functions"} + +| Component | Description | +| ----------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| [`spacy.FewShotReader.v1`](/api/large-language-models#fewshotreader-v1) | This function is registered in spaCy's `misc` registry, and reads in examples from a `.yml`, `.yaml`, `.json` or `.jsonl` file. It uses [`srsly`](https://github.com/explosion/srsly) to read in these files and parses them depending on the file extension. | +| [`spacy.FileReader.v1`](/api/large-language-models#filereader-v1) | This function is registered in spaCy's `misc` registry, and reads a file provided to the `path` to return a `str` representation of its contents. This function is typically used to read [Jinja](https://jinja.palletsprojects.com/en/3.1.x/) files containing the prompt template. | +| [Normalizer functions](/api/large-language-models#normalizer-functions) | These functions provide simple normalizations for string comparisons, e.g. between a list of specified labels and a label given in the raw text of the LLM response. | diff --git a/website/meta/sidebars.json b/website/meta/sidebars.json index 04102095f..033f71b12 100644 --- a/website/meta/sidebars.json +++ b/website/meta/sidebars.json @@ -26,16 +26,19 @@ { "text": "Processing Pipelines", "url": "/usage/processing-pipelines" }, { "text": "Embeddings & Transformers", - "url": "/usage/embeddings-transformers", + "url": "/usage/embeddings-transformers" + }, + { + "text": "Large Language Models", + "url": "/usage/large-language-models", "tag": "new" }, - { "text": "Training Models", "url": "/usage/training", "tag": "new" }, + { "text": "Training Models", "url": "/usage/training" }, { "text": "Layers & Model Architectures", - "url": "/usage/layers-architectures", - "tag": "new" + "url": "/usage/layers-architectures" }, - { "text": "spaCy Projects", "url": "/usage/projects", "tag": "new" }, + { "text": "spaCy Projects", "url": "/usage/projects" }, { "text": "Saving & Loading", "url": "/usage/saving-loading" }, { "text": "Visualizers", "url": "/usage/visualizers" } ] @@ -102,6 +105,7 @@ { "text": "EntityLinker", "url": "/api/entitylinker" }, { "text": "EntityRecognizer", "url": "/api/entityrecognizer" }, { "text": "EntityRuler", "url": "/api/entityruler" }, + { "text": "Large Language Models", "url": "/api/large-language-models" }, { "text": "Lemmatizer", "url": "/api/lemmatizer" }, { "text": "Morphologizer", "url": "/api/morphologizer" }, { "text": "SentenceRecognizer", "url": "/api/sentencerecognizer" }, diff --git a/website/pages/index.tsx b/website/pages/index.tsx index fc0dba378..089d75b52 100644 --- a/website/pages/index.tsx +++ b/website/pages/index.tsx @@ -106,50 +106,21 @@ const Landing = () => {

- - - + + The spacy-llm package + {' '} + integrates Large Language Models (LLMs) into spaCy, featuring a modular + system for fast prototyping and prompting, + and turning unstructured responses into robust outputs for + various NLP tasks, no training data required.

-

- - Get a custom spaCy pipeline, tailor-made for your NLP problem by - spaCy's core developers. - -

-
    -
  • - Streamlined. Nobody knows spaCy better than we do. Send - us your pipeline requirements and we'll be ready to start producing - your solution in no time at all. -
  • -
  • - Production ready. spaCy pipelines are robust and easy - to deploy. You'll get a complete spaCy project folder which is - ready to spacy project run. -
  • -
  • - Predictable. You'll know exactly what you're - going to get and what it's going to cost. We quote fees up-front, - let you try before you buy, and don't charge for over-runs at our - end — all the risk is on us. -
  • -
  • - Maintainable. spaCy is an industry standard, and - we'll deliver your pipeline with full code, data, tests and - documentation, so your team can retrain, update and extend the solution - as your requirements change. -
  • -
{

- spaCy v3.0 features all new transformer-based pipelines{' '} - that bring spaCy's accuracy right up to the current{' '} - state-of-the-art. You can use any pretrained transformer to - train your own pipelines, and even share one transformer between multiple - components with multi-task learning. Training is now fully - configurable and extensible, and you can define your own custom models using{' '} - PyTorch, TensorFlow and other frameworks. + + +

+

+ + Get a custom spaCy pipeline, tailor-made for your NLP problem by + spaCy's core developers. + +

+
    +
  • + Streamlined. Nobody knows spaCy better than we do. Send + us your pipeline requirements and we'll be ready to start producing + your solution in no time at all. +
  • +
  • + Production ready. spaCy pipelines are robust and easy + to deploy. You'll get a complete spaCy project folder which is + ready to spacy project run. +
  • +
  • + Predictable. You'll know exactly what you're + going to get and what it's going to cost. We quote fees up-front, + let you try before you buy, and don't charge for over-runs at our + end — all the risk is on us. +
  • +
  • + Maintainable. spaCy is an industry standard, and + we'll deliver your pipeline with full code, data, tests and + documentation, so your team can retrain, update and extend the solution + as your requirements change. +
  • +
{ small >

- + Date: Mon, 24 Jul 2023 16:58:27 +0200 Subject: [PATCH 03/36] Switch from distutils to setuptools/sysconfig (#12853) Additionally remove outdated `is_new_osx` check and settings. --- setup.py | 31 +++---------------------------- 1 file changed, 3 insertions(+), 28 deletions(-) diff --git a/setup.py b/setup.py index 243554c7a..3b6fae37b 100755 --- a/setup.py +++ b/setup.py @@ -1,10 +1,9 @@ #!/usr/bin/env python from setuptools import Extension, setup, find_packages import sys -import platform import numpy -from distutils.command.build_ext import build_ext -from distutils.sysconfig import get_python_inc +from setuptools.command.build_ext import build_ext +from sysconfig import get_path from pathlib import Path import shutil from Cython.Build import cythonize @@ -88,30 +87,6 @@ COPY_FILES = { } -def is_new_osx(): - """Check whether we're on OSX >= 10.7""" - if sys.platform != "darwin": - return False - mac_ver = platform.mac_ver()[0] - if mac_ver.startswith("10"): - minor_version = int(mac_ver.split(".")[1]) - if minor_version >= 7: - return True - else: - return False - return False - - -if is_new_osx(): - # On Mac, use libc++ because Apple deprecated use of - # libstdc - COMPILE_OPTIONS["other"].append("-stdlib=libc++") - LINK_OPTIONS["other"].append("-lc++") - # g++ (used by unix compiler on mac) links to libstdc++ as a default lib. - # See: https://stackoverflow.com/questions/1653047/avoid-linking-to-libstdc - LINK_OPTIONS["other"].append("-nodefaultlibs") - - # By subclassing build_extensions we have the actual compiler that will be used which is really known only after finalize_options # http://stackoverflow.com/questions/724664/python-distutils-how-to-get-a-compiler-that-is-going-to-be-used class build_ext_options: @@ -204,7 +179,7 @@ def setup_package(): include_dirs = [ numpy.get_include(), - get_python_inc(plat_specific=True), + get_path("include"), ] ext_modules = [] ext_modules.append( From 98799d849e2a78f54797178e45fbe37c5161943d Mon Sep 17 00:00:00 2001 From: Madeesh Kannan Date: Wed, 26 Jul 2023 13:56:31 +0200 Subject: [PATCH 04/36] `SpanCat`: Remove invalid `threshold` config argument (#12860) --- website/docs/api/spancategorizer.mdx | 1 - 1 file changed, 1 deletion(-) diff --git a/website/docs/api/spancategorizer.mdx b/website/docs/api/spancategorizer.mdx index 81a473ac2..2b63d31ce 100644 --- a/website/docs/api/spancategorizer.mdx +++ b/website/docs/api/spancategorizer.mdx @@ -67,7 +67,6 @@ architectures and their arguments and hyperparameters. > ```python > from spacy.pipeline.spancat import DEFAULT_SPANCAT_SINGLELABEL_MODEL > config = { -> "threshold": 0.5, > "spans_key": "labeled_spans", > "model": DEFAULT_SPANCAT_SINGLELABEL_MODEL, > "suggester": {"@misc": "spacy.ngram_suggester.v1", "sizes": [1, 2, 3]}, From 51b9655470aca59df3adacc4b05c77cde6e5579b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20Kardos?= Date: Wed, 26 Jul 2023 16:05:53 +0200 Subject: [PATCH 05/36] Added OdyCy to spaCy Universe (#12826) * Added OdyCy to spaCy Universe * Replaced template tags Co-authored-by: Adriane Boyd --------- Co-authored-by: Adriane Boyd --- website/meta/universe.json | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/website/meta/universe.json b/website/meta/universe.json index 75ec5fb5c..041ebbff8 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -67,6 +67,33 @@ "category": ["pipeline", "research"], "tags": ["latin"] }, + { + "id": "odycy", + "title": "OdyCy", + "slogan": "General-purpose language pipelines for premodern Greek.", + "description": "Academically validated modular NLP pipelines for premodern Greek. odyCy achieves state of the art performance on multiple tasks on unseen test data from the Universal Dependencies Perseus treebank, and performs second best on the PROIEL treebank’s test set on even more tasks. In addition performance also seems relatively stable across the two evaluation datasets in comparison with other NLP pipelines. OdyCy is being used at the Center for Humanities Computing for preprocessing and analyzing Ancient Greek corpora for New Testament research, meaning that you can expect consistent maintenance and improvements.", + "github": "centre-for-humanities-computing/odyCy", + "code_example": [ + "# To install the high-accuracy transformer-based pipeline", + "# pip install https://huggingface.co/chcaa/grc_odycy_joint_trf/resolve/main/grc_odycy_joint_trf-any-py3-none-any.whl", + "import spacy", + "", + "nlp = spacy.load('grc_odycy_joint_trf')", + "", + "doc = nlp('τὴν γοῦν Ἀττικὴν ἐκ τοῦ ἐπὶ πλεῖστον διὰ τὸ λεπτόγεων ἀστασίαστον οὖσαν ἄνθρωποι ᾤκουν οἱ αὐτοὶ αἰεί.')" + ], + "code_language": "python", + "url": "https://centre-for-humanities-computing.github.io/odyCy/", + "thumb": "https://raw.githubusercontent.com/centre-for-humanities-computing/odyCy/7b94fec60679d06272dca88a4dcfe0f329779aea/docs/_static/logo.svg", + "image": "https://github.com/centre-for-humanities-computing/odyCy/raw/main/docs/_static/logo_with_text_below.svg", + "author": "Jan Kostkan, Márton Kardos (Center for Humanities Computing, Aarhus University)", + "author_links": { + "github": "centre-for-humanities-computing", + "website": "https://chc.au.dk/" + }, + "category": ["pipeline", "standalone", "research"], + "tags": ["ancient Greek"] + }, { "id": "spacy-wasm", "title": "spacy-wasm", From 49055ed7c825bc5c6ce828ddf0ff0bcbf1527a7b Mon Sep 17 00:00:00 2001 From: Victoria <80417010+victorialslocum@users.noreply.github.com> Date: Mon, 31 Jul 2023 09:39:00 +0200 Subject: [PATCH 06/36] Add cli for finding locations of registered func (#12757) * Add cli for finding locations of registered func * fixes: naming and typing * isort * update naming * remove to find-function * remove file:// bit * use registry name if given and exit gracefully if a registry was not found * clean up failure msg * specify registry_name options * mypy fixes * return location for internal usage * add documentation * more mypy fixes * clean up example * add section to menu * add tests --------- Co-authored-by: svlandeg --- spacy/cli/__init__.py | 1 + spacy/cli/find_function.py | 69 +++++++++++++++++++++++++++++++++++++ spacy/tests/test_cli_app.py | 34 ++++++++++++++++++ website/docs/api/cli.mdx | 36 +++++++++++++++---- 4 files changed, 133 insertions(+), 7 deletions(-) create mode 100644 spacy/cli/find_function.py diff --git a/spacy/cli/__init__.py b/spacy/cli/__init__.py index 549a27616..60fe718c7 100644 --- a/spacy/cli/__init__.py +++ b/spacy/cli/__init__.py @@ -14,6 +14,7 @@ from .debug_diff import debug_diff # noqa: F401 from .debug_model import debug_model # noqa: F401 from .download import download # noqa: F401 from .evaluate import evaluate # noqa: F401 +from .find_function import find_function # noqa: F401 from .find_threshold import find_threshold # noqa: F401 from .info import info # noqa: F401 from .init_config import fill_config, init_config # noqa: F401 diff --git a/spacy/cli/find_function.py b/spacy/cli/find_function.py new file mode 100644 index 000000000..f99ce2adc --- /dev/null +++ b/spacy/cli/find_function.py @@ -0,0 +1,69 @@ +from typing import Optional, Tuple + +from catalogue import RegistryError +from wasabi import msg + +from ..util import registry +from ._util import Arg, Opt, app + + +@app.command("find-function") +def find_function_cli( + # fmt: off + func_name: str = Arg(..., help="Name of the registered function."), + registry_name: Optional[str] = Opt(None, "--registry", "-r", help="Name of the catalogue registry."), + # fmt: on +): + """ + Find the module, path and line number to the file the registered + function is defined in, if available. + + func_name (str): Name of the registered function. + registry_name (Optional[str]): Name of the catalogue registry. + + DOCS: https://spacy.io/api/cli#find-function + """ + if not registry_name: + registry_names = registry.get_registry_names() + for name in registry_names: + if registry.has(name, func_name): + registry_name = name + break + + if not registry_name: + msg.fail( + f"Couldn't find registered function: '{func_name}'", + exits=1, + ) + + assert registry_name is not None + find_function(func_name, registry_name) + + +def find_function(func_name: str, registry_name: str) -> Tuple[str, int]: + registry_desc = None + try: + registry_desc = registry.find(registry_name, func_name) + except RegistryError as e: + msg.fail( + f"Couldn't find registered function: '{func_name}' in registry '{registry_name}'", + ) + msg.fail(f"{e}", exits=1) + assert registry_desc is not None + + registry_path = None + line_no = None + if registry_desc["file"]: + registry_path = registry_desc["file"] + line_no = registry_desc["line_no"] + + if not registry_path or not line_no: + msg.fail( + f"Couldn't find path to registered function: '{func_name}' in registry '{registry_name}'", + exits=1, + ) + assert registry_path is not None + assert line_no is not None + + msg.good(f"Found registered function '{func_name}' at {registry_path}:{line_no}") + return str(registry_path), int(line_no) diff --git a/spacy/tests/test_cli_app.py b/spacy/tests/test_cli_app.py index 3a426113b..0e6d8e252 100644 --- a/spacy/tests/test_cli_app.py +++ b/spacy/tests/test_cli_app.py @@ -233,3 +233,37 @@ def test_project_push_pull(project_dir): result = CliRunner().invoke(app, ["project", "pull", remote, str(project_dir)]) assert result.exit_code == 0 assert test_file.is_file() + + +def test_find_function_valid(): + # example of architecture in main code base + function = "spacy.TextCatBOW.v2" + result = CliRunner().invoke(app, ["find-function", function, "-r", "architectures"]) + assert f"Found registered function '{function}'" in result.stdout + assert "textcat.py" in result.stdout + + result = CliRunner().invoke(app, ["find-function", function]) + assert f"Found registered function '{function}'" in result.stdout + assert "textcat.py" in result.stdout + + # example of architecture in spacy-legacy + function = "spacy.TextCatBOW.v1" + result = CliRunner().invoke(app, ["find-function", function]) + assert f"Found registered function '{function}'" in result.stdout + assert "spacy_legacy" in result.stdout + assert "textcat.py" in result.stdout + + +def test_find_function_invalid(): + # invalid registry + function = "spacy.TextCatBOW.v2" + registry = "foobar" + result = CliRunner().invoke( + app, ["find-function", function, "--registry", registry] + ) + assert f"Unknown function registry: '{registry}'" in result.stdout + + # invalid function + function = "spacy.TextCatBOW.v666" + result = CliRunner().invoke(app, ["find-function", function]) + assert f"Couldn't find registered function: '{function}'" in result.stdout diff --git a/website/docs/api/cli.mdx b/website/docs/api/cli.mdx index 6a87f78b8..d63ac6e1d 100644 --- a/website/docs/api/cli.mdx +++ b/website/docs/api/cli.mdx @@ -7,6 +7,7 @@ menu: - ['info', 'info'] - ['validate', 'validate'] - ['init', 'init'] + - ['find-function', 'find-function'] - ['convert', 'convert'] - ['debug', 'debug'] - ['train', 'train'] @@ -251,6 +252,27 @@ $ python -m spacy init labels [config_path] [output_path] [--code] [--verbose] [ | overrides | Config parameters to override. Should be options starting with `--` that correspond to the config section and value to override, e.g. `--paths.train ./train.spacy`. ~~Any (option/flag)~~ | | **CREATES** | The label files. | +## find-function {id="find-function",version="3.7",tag="command"} + +Find the module, path and line number to the file for a given registered +function. This functionality is helpful to understand where registered +functions, as used in the config file, are defined. + +```bash +$ python -m spacy find-function [func_name] [--registry] +``` + +> #### Example +> +> ```bash +> $ python -m spacy find-function spacy.TextCatBOW.v1 +> ``` + +| Name | Description | +| ------------------ | ----------------------------------------------------- | +| `func_name` | Name of the registered function. ~~str (positional)~~ | +| `--registry`, `-r` | Name of the catalogue registry. ~~str (option)~~ | + ## convert {id="convert",tag="command"} Convert files into spaCy's @@ -1651,10 +1673,10 @@ $ python -m spacy huggingface-hub push [whl_path] [--org] [--msg] [--verbose] > $ python -m spacy huggingface-hub push en_ner_fashion-0.0.0-py3-none-any.whl > ``` -| Name | Description | -| -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | -| `whl_path` | The path to the `.whl` file packaged with [`spacy package`](https://spacy.io/api/cli#package). ~~Path(positional)~~ | -| `--org`, `-o` | Optional name of organization to which the pipeline should be uploaded. ~~str (option)~~ | -| `--msg`, `-m` | Commit message to use for update. Defaults to `"Update spaCy pipeline"`. ~~str (option)~~ | -| `--verbose`, `-V` | Output additional info for debugging, e.g. the full generated hub metadata. ~~bool (flag)~~ | -| **UPLOADS** | The pipeline to the hub. | +| Name | Description | +| ----------------- | ------------------------------------------------------------------------------------------------------------------- | +| `whl_path` | The path to the `.whl` file packaged with [`spacy package`](https://spacy.io/api/cli#package). ~~Path(positional)~~ | +| `--org`, `-o` | Optional name of organization to which the pipeline should be uploaded. ~~str (option)~~ | +| `--msg`, `-m` | Commit message to use for update. Defaults to `"Update spaCy pipeline"`. ~~str (option)~~ | +| `--verbose`, `-V` | Output additional info for debugging, e.g. the full generated hub metadata. ~~bool (flag)~~ | +| **UPLOADS** | The pipeline to the hub. | From c9e9dccf7951bd474c5be8ca46dad7290ae86ee2 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Mon, 31 Jul 2023 10:47:57 +0200 Subject: [PATCH 07/36] Add displaCy data structures to docs (2) (#12875) * Add data structures to docs * Adjusted descriptions for more consistency * Add _optional_ flag to parameters * Add tests and adjust optional title key in doc * Add title to dep visualizations * fix typo --------- Co-authored-by: thomashacker --- spacy/displacy/render.py | 2 + spacy/tests/test_displacy.py | 72 +++++++++++++++++ website/docs/api/top-level.mdx | 124 +++++++++++++++++++++++++++++ website/docs/usage/visualizers.mdx | 3 +- 4 files changed, 200 insertions(+), 1 deletion(-) diff --git a/spacy/displacy/render.py b/spacy/displacy/render.py index 47407bcb7..758dc07d5 100644 --- a/spacy/displacy/render.py +++ b/spacy/displacy/render.py @@ -313,6 +313,8 @@ class DependencyRenderer: self.lang = settings.get("lang", DEFAULT_LANG) render_id = f"{id_prefix}-{i}" svg = self.render_svg(render_id, p["words"], p["arcs"]) + if p.get("title"): + svg = TPL_TITLE.format(title=p.get("title")) + svg rendered.append(svg) if page: content = "".join([TPL_FIGURE.format(content=svg) for svg in rendered]) diff --git a/spacy/tests/test_displacy.py b/spacy/tests/test_displacy.py index 1570f8d09..e9b5a9aba 100644 --- a/spacy/tests/test_displacy.py +++ b/spacy/tests/test_displacy.py @@ -350,6 +350,78 @@ def test_displacy_render_wrapper(en_vocab): displacy.set_render_wrapper(lambda html: html) +def test_displacy_render_manual_dep(): + """Test displacy.render with manual data for dep style""" + parsed_dep = { + "words": [ + {"text": "This", "tag": "DT"}, + {"text": "is", "tag": "VBZ"}, + {"text": "a", "tag": "DT"}, + {"text": "sentence", "tag": "NN"}, + ], + "arcs": [ + {"start": 0, "end": 1, "label": "nsubj", "dir": "left"}, + {"start": 2, "end": 3, "label": "det", "dir": "left"}, + {"start": 1, "end": 3, "label": "attr", "dir": "right"}, + ], + "title": "Title", + } + html = displacy.render([parsed_dep], style="dep", manual=True) + for word in parsed_dep["words"]: + assert word["text"] in html + assert word["tag"] in html + + +def test_displacy_render_manual_ent(): + """Test displacy.render with manual data for ent style""" + parsed_ents = [ + { + "text": "But Google is starting from behind.", + "ents": [{"start": 4, "end": 10, "label": "ORG"}], + }, + { + "text": "But Google is starting from behind.", + "ents": [{"start": -100, "end": 100, "label": "COMPANY"}], + "title": "Title", + }, + ] + + html = displacy.render(parsed_ents, style="ent", manual=True) + for parsed_ent in parsed_ents: + assert parsed_ent["ents"][0]["label"] in html + if "title" in parsed_ent: + assert parsed_ent["title"] in html + + +def test_displacy_render_manual_span(): + """Test displacy.render with manual data for span style""" + parsed_spans = [ + { + "text": "Welcome to the Bank of China.", + "spans": [ + {"start_token": 3, "end_token": 6, "label": "ORG"}, + {"start_token": 5, "end_token": 6, "label": "GPE"}, + ], + "tokens": ["Welcome", "to", "the", "Bank", "of", "China", "."], + }, + { + "text": "Welcome to the Bank of China.", + "spans": [ + {"start_token": 3, "end_token": 6, "label": "ORG"}, + {"start_token": 5, "end_token": 6, "label": "GPE"}, + ], + "tokens": ["Welcome", "to", "the", "Bank", "of", "China", "."], + "title": "Title", + }, + ] + + html = displacy.render(parsed_spans, style="span", manual=True) + for parsed_span in parsed_spans: + assert parsed_span["spans"][0]["label"] in html + if "title" in parsed_span: + assert parsed_span["title"] in html + + def test_displacy_options_case(): ents = ["foo", "BAR"] colors = {"FOO": "red", "bar": "green"} diff --git a/website/docs/api/top-level.mdx b/website/docs/api/top-level.mdx index 64ec342cd..37e86a4bc 100644 --- a/website/docs/api/top-level.mdx +++ b/website/docs/api/top-level.mdx @@ -343,6 +343,130 @@ use with the `manual=True` argument in `displacy.render`. | `options` | Span-specific visualisation options. ~~Dict[str, Any]~~ | | **RETURNS** | Generated entities keyed by text (original text) and ents. ~~dict~~ | +### Visualizer data structures {id="displacy_structures"} + +You can use displaCy's data format to manually render data. This can be useful +if you want to visualize output from other libraries. You can find examples of +displaCy's different data formats below. + +> #### DEP example data structure +> +> ```json +> { +> "words": [ +> { "text": "This", "tag": "DT" }, +> { "text": "is", "tag": "VBZ" }, +> { "text": "a", "tag": "DT" }, +> { "text": "sentence", "tag": "NN" } +> ], +> "arcs": [ +> { "start": 0, "end": 1, "label": "nsubj", "dir": "left" }, +> { "start": 2, "end": 3, "label": "det", "dir": "left" }, +> { "start": 1, "end": 3, "label": "attr", "dir": "right" } +> ] +> } +> ``` + +#### Dependency Visualizer data structure {id="structure-dep"} + +| Dictionary Key | Description | +| -------------- | ----------------------------------------------------------------------------------------------------------- | +| `words` | List of dictionaries describing a word token (see structure below). ~~List[Dict[str, Any]]~~ | +| `arcs` | List of dictionaries describing the relations between words (see structure below). ~~List[Dict[str, Any]]~~ | +| _Optional_ | | +| `title` | Title of the visualization. ~~Optional[str]~~ | +| `settings` | Dependency Visualizer options (see [here](/api/top-level#displacy_options)). ~~Dict[str, Any]~~ | + + + +| Dictionary Key | Description | +| -------------- | ---------------------------------------- | +| `text` | Text content of the word. ~~str~~ | +| `tag` | Fine-grained part-of-speech. ~~str~~ | +| `lemma` | Base form of the word. ~~Optional[str]~~ | + + + + + +| Dictionary Key | Description | +| -------------- | ---------------------------------------------------- | +| `start` | The index of the starting token. ~~int~~ | +| `end` | The index of the ending token. ~~int~~ | +| `label` | The type of dependency relation. ~~str~~ | +| `dir` | Direction of the relation (`left`, `right`). ~~str~~ | + + + +> #### ENT example data structure +> +> ```json +> { +> "text": "But Google is starting from behind.", +> "ents": [{ "start": 4, "end": 10, "label": "ORG" }] +> } +> ``` + +#### Named Entity Recognition data structure {id="structure-ent"} + +| Dictionary Key | Description | +| -------------- | ------------------------------------------------------------------------------------------- | +| `text` | String representation of the document text. ~~str~~ | +| `ents` | List of dictionaries describing entities (see structure below). ~~List[Dict[str, Any]]~~ | +| _Optional_ | | +| `title` | Title of the visualization. ~~Optional[str]~~ | +| `settings` | Entity Visualizer options (see [here](/api/top-level#displacy_options)). ~~Dict[str, Any]~~ | + + + +| Dictionary Key | Description | +| -------------- | ---------------------------------------------------------------------- | +| `start` | The index of the first character of the entity. ~~int~~ | +| `end` | The index of the last character of the entity. (not inclusive) ~~int~~ | +| `label` | Label attached to the entity. ~~str~~ | +| _Optional_ | | +| `kb_id` | `KnowledgeBase` ID. ~~str~~ | +| `kb_url` | `KnowledgeBase` URL. ~~str~~ | + + + +> #### SPAN example data structure +> +> ```json +> { +> "text": "Welcome to the Bank of China.", +> "spans": [ +> { "start_token": 3, "end_token": 6, "label": "ORG" }, +> { "start_token": 5, "end_token": 6, "label": "GPE" } +> ], +> "tokens": ["Welcome", "to", "the", "Bank", "of", "China", "."] +> } +> ``` + +#### Span Classification data structure {id="structure-span"} + +| Dictionary Key | Description | +| -------------- | ----------------------------------------------------------------------------------------- | +| `text` | String representation of the document text. ~~str~~ | +| `spans` | List of dictionaries describing spans (see structure below). ~~List[Dict[str, Any]]~~ | +| `tokens` | List of word tokens. ~~List[str]~~ | +| _Optional_ | | +| `title` | Title of the visualization. ~~Optional[str]~~ | +| `settings` | Span Visualizer options (see [here](/api/top-level#displacy_options)). ~~Dict[str, Any]~~ | + + + +| Dictionary Key | Description | +| -------------- | ------------------------------------------------------------- | +| `start_token` | The index of the first token of the span in `tokens`. ~~int~~ | +| `end_token` | The index of the last token of the span in `tokens`. ~~int~~ | +| `label` | Label attached to the span. ~~str~~ | +| _Optional_ | | +| `kb_id` | `KnowledgeBase` ID. ~~str~~ | +| `kb_url` | `KnowledgeBase` URL. ~~str~~ | + + + ### Visualizer options {id="displacy_options"} The `options` argument lets you specify additional settings for each visualizer. diff --git a/website/docs/usage/visualizers.mdx b/website/docs/usage/visualizers.mdx index 1ac931753..e73c4a16a 100644 --- a/website/docs/usage/visualizers.mdx +++ b/website/docs/usage/visualizers.mdx @@ -349,7 +349,8 @@ or [SyntaxNet](https://github.com/tensorflow/models/tree/master/research/syntaxnet). If you set `manual=True` on either `render()` or `serve()`, you can pass in data in displaCy's format as a dictionary (instead of `Doc` objects). There are -helper functions for converting `Doc` objects to displaCy's format for use with +helper functions for converting `Doc` objects to +[displaCy's format](/api/top-level#displacy_structures) for use with `manual=True`: [`displacy.parse_deps`](/api/top-level#displacy.parse_deps), [`displacy.parse_ents`](/api/top-level#displacy.parse_ents), and [`displacy.parse_spans`](/api/top-level#displacy.parse_spans). From 186889ec9c4c7a5b6b9b88bea0c6c74a763998ee Mon Sep 17 00:00:00 2001 From: Andy Friedman Date: Mon, 31 Jul 2023 04:52:32 -0400 Subject: [PATCH 08/36] added entry for SaysWho (#12828) * Update universe.json added entry for Sayswho * Update universe.json updated sayswho entry * Update universe.json * Update website/meta/universe.json * Update website/meta/universe.json --------- Co-authored-by: Adriane Boyd --- website/meta/universe.json | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/website/meta/universe.json b/website/meta/universe.json index 041ebbff8..2ed8b4b41 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -17,6 +17,31 @@ "category": ["extension"], "tags": [] }, + { + "id": "sayswho", + "title": "SaysWho", + "slogan": "Quote identification, attribution and resolution", + "description": "A Python package for identifying and attributing quotes in text. It uses a combination of spaCy functionality, logic and grammar to find quotes and their speakers, then uses the spaCy coreferencing model to better clarify who is speaking. Currently English only.", + "github": "afriedman412/sayswho", + "pip": "sayswho", + "code_language": "python", + "author": "Andy Friedman", + "author_links": { + "twitter": "@steadynappin", + "github": "afriedman412" + }, + "code_example": [ + "from sayswho import SaysWho", + "text = open(\"path/to/your/text_file.txt\").read()", + "sw = SaysWho()", + "sw.attribute(text)", + + "sw.expand_match() # see quote/cluster matches", + "sw.render_to_html() # output your text, quotes and cluster matches to an html file called \"temp.html\"" + ], + "category": ["standalone"], + "tags": ["attribution", "coref", "text-processing"] + }, { "id": "parsigs", "title": "parsigs", From a0a195688f87ffa6c37d7d4994a37a2acfd7e2ad Mon Sep 17 00:00:00 2001 From: Peter Baumgartner <5107405+pmbaumgartner@users.noreply.github.com> Date: Mon, 31 Jul 2023 08:45:04 -0400 Subject: [PATCH 09/36] Tests for CLI app - `init config` generates `train`-able config (#12173) * remove migration support form * initial test commit * add fixture * add combo test * pull out parameter example data * fix formatting on examples * remove unused import * remove unncessary fmt:off instructions * only set logger level if verbose flag is explicitly set --------- Co-authored-by: svlandeg --- spacy/cli/assemble.py | 3 +- spacy/cli/find_threshold.py | 4 +- spacy/cli/init_pipeline.py | 9 +- spacy/cli/train.py | 3 +- spacy/tests/test_cli_app.py | 161 +++++++++++++++++++++++++++++++++++- 5 files changed, 172 insertions(+), 8 deletions(-) diff --git a/spacy/cli/assemble.py b/spacy/cli/assemble.py index ee2500b27..f74bbacb5 100644 --- a/spacy/cli/assemble.py +++ b/spacy/cli/assemble.py @@ -40,7 +40,8 @@ def assemble_cli( DOCS: https://spacy.io/api/cli#assemble """ - util.logger.setLevel(logging.DEBUG if verbose else logging.INFO) + if verbose: + util.logger.setLevel(logging.DEBUG) # Make sure all files and paths exists if they are needed if not config_path or (str(config_path) != "-" and not config_path.exists()): msg.fail("Config file not found", config_path, exits=1) diff --git a/spacy/cli/find_threshold.py b/spacy/cli/find_threshold.py index 7aa32c0c6..48077fa51 100644 --- a/spacy/cli/find_threshold.py +++ b/spacy/cli/find_threshold.py @@ -52,8 +52,8 @@ def find_threshold_cli( DOCS: https://spacy.io/api/cli#find-threshold """ - - util.logger.setLevel(logging.DEBUG if verbose else logging.INFO) + if verbose: + util.logger.setLevel(logging.DEBUG) import_code(code_path) find_threshold( model=model, diff --git a/spacy/cli/init_pipeline.py b/spacy/cli/init_pipeline.py index 13202cb60..21eea8edf 100644 --- a/spacy/cli/init_pipeline.py +++ b/spacy/cli/init_pipeline.py @@ -39,7 +39,8 @@ def init_vectors_cli( you can use in the [initialize] block of your config to initialize a model with vectors. """ - util.logger.setLevel(logging.DEBUG if verbose else logging.INFO) + if verbose: + util.logger.setLevel(logging.DEBUG) msg.info(f"Creating blank nlp object for language '{lang}'") nlp = util.get_lang_class(lang)() if jsonl_loc is not None: @@ -87,7 +88,8 @@ def init_pipeline_cli( use_gpu: int = Opt(-1, "--gpu-id", "-g", help="GPU ID or -1 for CPU") # fmt: on ): - util.logger.setLevel(logging.DEBUG if verbose else logging.INFO) + if verbose: + util.logger.setLevel(logging.DEBUG) overrides = parse_config_overrides(ctx.args) import_code(code_path) setup_gpu(use_gpu) @@ -116,7 +118,8 @@ def init_labels_cli( """Generate JSON files for the labels in the data. This helps speed up the training process, since spaCy won't have to preprocess the data to extract the labels.""" - util.logger.setLevel(logging.DEBUG if verbose else logging.INFO) + if verbose: + util.logger.setLevel(logging.DEBUG) if not output_path.exists(): output_path.mkdir(parents=True) overrides = parse_config_overrides(ctx.args) diff --git a/spacy/cli/train.py b/spacy/cli/train.py index 8bdabd39c..c72e13b26 100644 --- a/spacy/cli/train.py +++ b/spacy/cli/train.py @@ -47,7 +47,8 @@ def train_cli( DOCS: https://spacy.io/api/cli#train """ - util.logger.setLevel(logging.DEBUG if verbose else logging.INFO) + if verbose: + util.logger.setLevel(logging.DEBUG) overrides = parse_config_overrides(ctx.args) import_code(code_path) train(config_path, output_path, use_gpu=use_gpu, overrides=overrides) diff --git a/spacy/tests/test_cli_app.py b/spacy/tests/test_cli_app.py index 0e6d8e252..e6f3b5912 100644 --- a/spacy/tests/test_cli_app.py +++ b/spacy/tests/test_cli_app.py @@ -6,7 +6,7 @@ import srsly from typer.testing import CliRunner from spacy.cli._util import app, get_git_version -from spacy.tokens import Doc, DocBin +from spacy.tokens import Doc, DocBin, Span from .util import make_tempdir, normalize_whitespace @@ -267,3 +267,162 @@ def test_find_function_invalid(): function = "spacy.TextCatBOW.v666" result = CliRunner().invoke(app, ["find-function", function]) assert f"Couldn't find registered function: '{function}'" in result.stdout + + +example_words_1 = ["I", "like", "cats"] +example_words_2 = ["I", "like", "dogs"] +example_lemmas_1 = ["I", "like", "cat"] +example_lemmas_2 = ["I", "like", "dog"] +example_tags = ["PRP", "VBP", "NNS"] +example_morphs = [ + "Case=Nom|Number=Sing|Person=1|PronType=Prs", + "Tense=Pres|VerbForm=Fin", + "Number=Plur", +] +example_deps = ["nsubj", "ROOT", "dobj"] +example_pos = ["PRON", "VERB", "NOUN"] +example_ents = ["O", "O", "I-ANIMAL"] +example_spans = [(2, 3, "ANIMAL")] + +TRAIN_EXAMPLE_1 = dict( + words=example_words_1, + lemmas=example_lemmas_1, + tags=example_tags, + morphs=example_morphs, + deps=example_deps, + heads=[1, 1, 1], + pos=example_pos, + ents=example_ents, + spans=example_spans, + cats={"CAT": 1.0, "DOG": 0.0}, +) +TRAIN_EXAMPLE_2 = dict( + words=example_words_2, + lemmas=example_lemmas_2, + tags=example_tags, + morphs=example_morphs, + deps=example_deps, + heads=[1, 1, 1], + pos=example_pos, + ents=example_ents, + spans=example_spans, + cats={"CAT": 0.0, "DOG": 1.0}, +) + + +@pytest.mark.slow +@pytest.mark.parametrize( + "component,examples", + [ + ("tagger", [TRAIN_EXAMPLE_1, TRAIN_EXAMPLE_2]), + ("morphologizer", [TRAIN_EXAMPLE_1, TRAIN_EXAMPLE_2]), + ("trainable_lemmatizer", [TRAIN_EXAMPLE_1, TRAIN_EXAMPLE_2]), + ("parser", [TRAIN_EXAMPLE_1] * 30), + ("ner", [TRAIN_EXAMPLE_1, TRAIN_EXAMPLE_2]), + ("spancat", [TRAIN_EXAMPLE_1, TRAIN_EXAMPLE_2]), + ("textcat", [TRAIN_EXAMPLE_1, TRAIN_EXAMPLE_2]), + ], +) +def test_init_config_trainable(component, examples, en_vocab): + if component == "textcat": + train_docs = [] + for example in examples: + doc = Doc(en_vocab, words=example["words"]) + doc.cats = example["cats"] + train_docs.append(doc) + elif component == "spancat": + train_docs = [] + for example in examples: + doc = Doc(en_vocab, words=example["words"]) + doc.spans["sc"] = [ + Span(doc, start, end, label) for start, end, label in example["spans"] + ] + train_docs.append(doc) + else: + train_docs = [] + for example in examples: + # cats, spans are not valid kwargs for instantiating a Doc + example = {k: v for k, v in example.items() if k not in ("cats", "spans")} + doc = Doc(en_vocab, **example) + train_docs.append(doc) + + with make_tempdir() as d_in: + train_bin = DocBin(docs=train_docs) + train_bin.to_disk(d_in / "train.spacy") + dev_bin = DocBin(docs=train_docs) + dev_bin.to_disk(d_in / "dev.spacy") + init_config_result = CliRunner().invoke( + app, + [ + "init", + "config", + f"{d_in}/config.cfg", + "--lang", + "en", + "--pipeline", + component, + ], + ) + assert init_config_result.exit_code == 0 + train_result = CliRunner().invoke( + app, + [ + "train", + f"{d_in}/config.cfg", + "--paths.train", + f"{d_in}/train.spacy", + "--paths.dev", + f"{d_in}/dev.spacy", + "--output", + f"{d_in}/model", + ], + ) + assert train_result.exit_code == 0 + assert Path(d_in / "model" / "model-last").exists() + + +@pytest.mark.slow +@pytest.mark.parametrize( + "component,examples", + [("tagger,parser,morphologizer", [TRAIN_EXAMPLE_1, TRAIN_EXAMPLE_2] * 15)], +) +def test_init_config_trainable_multiple(component, examples, en_vocab): + train_docs = [] + for example in examples: + example = {k: v for k, v in example.items() if k not in ("cats", "spans")} + doc = Doc(en_vocab, **example) + train_docs.append(doc) + + with make_tempdir() as d_in: + train_bin = DocBin(docs=train_docs) + train_bin.to_disk(d_in / "train.spacy") + dev_bin = DocBin(docs=train_docs) + dev_bin.to_disk(d_in / "dev.spacy") + init_config_result = CliRunner().invoke( + app, + [ + "init", + "config", + f"{d_in}/config.cfg", + "--lang", + "en", + "--pipeline", + component, + ], + ) + assert init_config_result.exit_code == 0 + train_result = CliRunner().invoke( + app, + [ + "train", + f"{d_in}/config.cfg", + "--paths.train", + f"{d_in}/train.spacy", + "--paths.dev", + f"{d_in}/dev.spacy", + "--output", + f"{d_in}/model", + ], + ) + assert train_result.exit_code == 0 + assert Path(d_in / "model" / "model-last").exists() From 222bd3c5b16968d7b6743b010d1bbbebb9b6df47 Mon Sep 17 00:00:00 2001 From: Madeesh Kannan Date: Wed, 2 Aug 2023 08:06:41 +0200 Subject: [PATCH 10/36] Display model's full base version string in incompatiblity warning (#12857) --- spacy/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/util.py b/spacy/util.py index 762699a97..79fcb8b8d 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -894,7 +894,7 @@ def load_meta(path: Union[str, Path]) -> Dict[str, Any]: if "spacy_version" in meta: if not is_compatible_version(about.__version__, meta["spacy_version"]): lower_version = get_model_lower_version(meta["spacy_version"]) - lower_version = get_minor_version(lower_version) # type: ignore[arg-type] + lower_version = get_base_version(lower_version) # type: ignore[arg-type] if lower_version is not None: lower_version = "v" + lower_version elif "spacy_git_version" in meta: From 07407e07ab8de5864bb61c5fa6c857b3373922b6 Mon Sep 17 00:00:00 2001 From: Arman Mohammadi <45389988+arplusman@users.noreply.github.com> Date: Wed, 2 Aug 2023 18:22:26 +0330 Subject: [PATCH 11/36] fix the regular expression matching on the full text (#12883) There was a mistake in the regex pattern which caused not matching all the desired tokens. The problem was that when we use r string literal prefix to suppose a raw text, we should not use two backslashes to demonstrate a backslash. --- website/docs/usage/rule-based-matching.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/usage/rule-based-matching.mdx b/website/docs/usage/rule-based-matching.mdx index 39be5f47b..4f54415cb 100644 --- a/website/docs/usage/rule-based-matching.mdx +++ b/website/docs/usage/rule-based-matching.mdx @@ -311,7 +311,7 @@ import re nlp = spacy.load("en_core_web_sm") doc = nlp("The United States of America (USA) are commonly known as the United States (U.S. or US) or America.") -expression = r"[Uu](nited|\\.?) ?[Ss](tates|\\.?)" +expression = r"[Uu](nited|\.?) ?[Ss](tates|\.?)" for match in re.finditer(expression, doc.text): start, end = match.span() span = doc.char_span(start, end) From 3b7faf4f5ea2346a1d71ba1d4583119d1508ba72 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Thu, 3 Aug 2023 08:37:43 +0200 Subject: [PATCH 12/36] fix (#12881) --- website/docs/api/spanruler.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/api/spanruler.mdx b/website/docs/api/spanruler.mdx index d2d41f620..5889b1906 100644 --- a/website/docs/api/spanruler.mdx +++ b/website/docs/api/spanruler.mdx @@ -117,7 +117,7 @@ config. Any existing patterns are removed on initialization. > > [initialize.components.span_ruler.patterns] > @readers = "srsly.read_jsonl.v1" -> path = "corpus/span_ruler_patterns.jsonl +> path = "corpus/span_ruler_patterns.jsonl" > ``` | Name | Description | From 45af8a5dcfe8134ec7b7320393cbfc57b7c2b967 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Fri, 4 Aug 2023 10:52:41 +0200 Subject: [PATCH 13/36] Update br tags (#12882) * Fix displacy br tag * Prefer
, also update package CLI --- spacy/cli/package.py | 2 +- spacy/displacy/render.py | 4 ++-- spacy/tests/test_displacy.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/spacy/cli/package.py b/spacy/cli/package.py index 4545578e6..12f195be1 100644 --- a/spacy/cli/package.py +++ b/spacy/cli/package.py @@ -403,7 +403,7 @@ def _format_sources(data: Any) -> str: if author: result += " ({})".format(author) sources.append(result) - return "
".join(sources) + return "
".join(sources) def _format_accuracy(data: Dict[str, Any], exclude: List[str] = ["speed"]) -> str: diff --git a/spacy/displacy/render.py b/spacy/displacy/render.py index 758dc07d5..2ab41ccc2 100644 --- a/spacy/displacy/render.py +++ b/spacy/displacy/render.py @@ -567,7 +567,7 @@ class EntityRenderer: for i, fragment in enumerate(fragments): markup += escape_html(fragment) if len(fragments) > 1 and i != len(fragments) - 1: - markup += "
" + markup += "
" if self.ents is None or label.upper() in self.ents: color = self.colors.get(label.upper(), self.default_color) ent_settings = { @@ -585,7 +585,7 @@ class EntityRenderer: for i, fragment in enumerate(fragments): markup += escape_html(fragment) if len(fragments) > 1 and i != len(fragments) - 1: - markup += "
" + markup += "
" markup = TPL_ENTS.format(content=markup, dir=self.direction) if title: markup = TPL_TITLE.format(title=title) + markup diff --git a/spacy/tests/test_displacy.py b/spacy/tests/test_displacy.py index e9b5a9aba..12d903dca 100644 --- a/spacy/tests/test_displacy.py +++ b/spacy/tests/test_displacy.py @@ -113,7 +113,7 @@ def test_issue5838(): doc = nlp(sample_text) doc.ents = [Span(doc, 7, 8, label="test")] html = displacy.render(doc, style="ent") - found = html.count("
") + found = html.count("
") assert found == 4 From 245e2ddc2584ff14d0c4a2725e078accd26bd310 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 8 Aug 2023 11:27:28 +0200 Subject: [PATCH 14/36] Allow pydantic v2 using transitional v1 support (#12888) --- requirements.txt | 2 +- setup.cfg | 2 +- .../pipeline/_edit_tree_internals/schemas.py | 8 +++- spacy/schemas.py | 41 +++++++++++++------ spacy/tests/pipeline/test_initialize.py | 7 +++- spacy/tests/pipeline/test_pipe_factories.py | 7 +++- spacy/tests/test_misc.py | 7 +++- 7 files changed, 54 insertions(+), 20 deletions(-) diff --git a/requirements.txt b/requirements.txt index 4a131d18c..4bc7d3820 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,7 +16,7 @@ smart-open>=5.2.1,<7.0.0 numpy>=1.15.0 requests>=2.13.0,<3.0.0 tqdm>=4.38.0,<5.0.0 -pydantic>=1.7.4,!=1.8,!=1.8.1,<1.11.0 +pydantic>=1.7.4,!=1.8,!=1.8.1,<3.0.0 jinja2 langcodes>=3.2.0,<4.0.0 # Official Python utilities diff --git a/setup.cfg b/setup.cfg index 45734888f..05044ef5c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -58,7 +58,7 @@ install_requires = tqdm>=4.38.0,<5.0.0 numpy>=1.15.0 requests>=2.13.0,<3.0.0 - pydantic>=1.7.4,!=1.8,!=1.8.1,<1.11.0 + pydantic>=1.7.4,!=1.8,!=1.8.1,<3.0.0 jinja2 # Official Python utilities setuptools diff --git a/spacy/pipeline/_edit_tree_internals/schemas.py b/spacy/pipeline/_edit_tree_internals/schemas.py index 1e307b66c..89f2861ce 100644 --- a/spacy/pipeline/_edit_tree_internals/schemas.py +++ b/spacy/pipeline/_edit_tree_internals/schemas.py @@ -1,8 +1,12 @@ from collections import defaultdict from typing import Any, Dict, List, Union -from pydantic import BaseModel, Field, ValidationError -from pydantic.types import StrictBool, StrictInt, StrictStr +try: + from pydantic.v1 import BaseModel, Field, ValidationError + from pydantic.v1.types import StrictBool, StrictInt, StrictStr +except ImportError: + from pydantic import BaseModel, Field, ValidationError # type: ignore + from pydantic.types import StrictBool, StrictInt, StrictStr # type: ignore class MatchNodeSchema(BaseModel): diff --git a/spacy/schemas.py b/spacy/schemas.py index 22f45372c..525885456 100644 --- a/spacy/schemas.py +++ b/spacy/schemas.py @@ -16,19 +16,34 @@ from typing import ( Union, ) -from pydantic import ( - BaseModel, - ConstrainedStr, - Field, - StrictBool, - StrictFloat, - StrictInt, - StrictStr, - ValidationError, - create_model, - validator, -) -from pydantic.main import ModelMetaclass +try: + from pydantic.v1 import ( + BaseModel, + ConstrainedStr, + Field, + StrictBool, + StrictFloat, + StrictInt, + StrictStr, + ValidationError, + create_model, + validator, + ) + from pydantic.v1.main import ModelMetaclass +except ImportError: + from pydantic import ( # type: ignore + BaseModel, + ConstrainedStr, + Field, + StrictBool, + StrictFloat, + StrictInt, + StrictStr, + ValidationError, + create_model, + validator, + ) + from pydantic.main import ModelMetaclass # type: ignore from thinc.api import ConfigValidationError, Model, Optimizer from thinc.config import Promise diff --git a/spacy/tests/pipeline/test_initialize.py b/spacy/tests/pipeline/test_initialize.py index 6dd4114f1..9854b391e 100644 --- a/spacy/tests/pipeline/test_initialize.py +++ b/spacy/tests/pipeline/test_initialize.py @@ -1,5 +1,10 @@ import pytest -from pydantic import StrictBool + +try: + from pydantic.v1 import StrictBool +except ImportError: + from pydantic import StrictBool # type: ignore + from thinc.api import ConfigValidationError from spacy.lang.en import English diff --git a/spacy/tests/pipeline/test_pipe_factories.py b/spacy/tests/pipeline/test_pipe_factories.py index 0f1454b55..83b986784 100644 --- a/spacy/tests/pipeline/test_pipe_factories.py +++ b/spacy/tests/pipeline/test_pipe_factories.py @@ -1,5 +1,10 @@ import pytest -from pydantic import StrictInt, StrictStr + +try: + from pydantic.v1 import StrictInt, StrictStr +except ImportError: + from pydantic import StrictInt, StrictStr # type: ignore + from thinc.api import ConfigValidationError, Linear, Model import spacy diff --git a/spacy/tests/test_misc.py b/spacy/tests/test_misc.py index 438f458ec..704a40485 100644 --- a/spacy/tests/test_misc.py +++ b/spacy/tests/test_misc.py @@ -3,7 +3,12 @@ import os from pathlib import Path import pytest -from pydantic import ValidationError + +try: + from pydantic.v1 import ValidationError +except ImportError: + from pydantic import ValidationError # type: ignore + from thinc.api import ( Config, ConfigValidationError, From c4e378df97bb3d966228b637184b6d81a50d5441 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 8 Aug 2023 12:58:28 +0200 Subject: [PATCH 15/36] Update CuPy extras (#12890) * Add `cuda12x` for `cupy-cuda12x`. * Drop `cuda-autodetect` from quickstart, set default to `cuda11x` instead. --- setup.cfg | 2 ++ website/src/widgets/quickstart-install.js | 14 +++++++++----- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/setup.cfg b/setup.cfg index 05044ef5c..4aaf0271b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -111,6 +111,8 @@ cuda117 = cupy-cuda117>=5.0.0b4,<13.0.0 cuda11x = cupy-cuda11x>=11.0.0,<13.0.0 +cuda12x = + cupy-cuda12x>=11.5.0,<13.0.0 cuda-autodetect = cupy-wheel>=11.0.0,<13.0.0 apple = diff --git a/website/src/widgets/quickstart-install.js b/website/src/widgets/quickstart-install.js index b6c8b9b4c..43e3a0eeb 100644 --- a/website/src/widgets/quickstart-install.js +++ b/website/src/widgets/quickstart-install.js @@ -10,15 +10,19 @@ const DEFAULT_PLATFORM = 'x86' const DEFAULT_MODELS = ['en'] const DEFAULT_OPT = 'efficiency' const DEFAULT_HARDWARE = 'cpu' -const DEFAULT_CUDA = 'cuda-autodetect' +const DEFAULT_CUDA = 'cuda11x' const CUDA = { '8.0': 'cuda80', '9.0': 'cuda90', - 9.1: 'cuda91', - 9.2: 'cuda92', + '9.1': 'cuda91', + '9.2': 'cuda92', '10.0': 'cuda100', - 10.1: 'cuda101', - '10.2, 11.0+': 'cuda-autodetect', + '10.1': 'cuda101', + '10.2': 'cuda102', + '11.0': 'cuda110', + '11.1': 'cuda111', + '11.2-11.x': 'cuda11x', + '12.x': 'cuda12x', } const LANG_EXTRAS = ['ja'] // only for languages with models From 458bc5f45c5e371bdbef43d58d078436ee496e43 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Tue, 8 Aug 2023 15:04:13 +0200 Subject: [PATCH 16/36] Set version to v3.6.1 (#12892) --- spacy/about.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/about.py b/spacy/about.py index cad6158da..0f8eee0ff 100644 --- a/spacy/about.py +++ b/spacy/about.py @@ -1,6 +1,6 @@ # fmt: off __title__ = "spacy" -__version__ = "3.6.0" +__version__ = "3.6.1" __download_url__ = "https://github.com/explosion/spacy-models/releases/download" __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json" __projects__ = "https://github.com/explosion/projects" From d50b8d51e20f4c66ac111e94fdc589e98769c03d Mon Sep 17 00:00:00 2001 From: denizcodeyaa <141595121+denizcodeyaa@users.noreply.github.com> Date: Fri, 11 Aug 2023 09:38:06 -0400 Subject: [PATCH 17/36] Update examples.py (#12895) Add: example sentences to improve the Turkish model. Let's get the tr_web_core_sm out in the the world yaa --- spacy/lang/tr/examples.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/spacy/lang/tr/examples.py b/spacy/lang/tr/examples.py index dfb324a4e..c912c950d 100644 --- a/spacy/lang/tr/examples.py +++ b/spacy/lang/tr/examples.py @@ -15,4 +15,7 @@ sentences = [ "Türkiye'nin başkenti neresi?", "Bakanlar Kurulu 180 günlük eylem planını açıkladı.", "Merkez Bankası, beklentiler doğrultusunda faizlerde değişikliğe gitmedi.", + "Cemal Sureya kimdir?", + "Bunlari Biliyor muydunuz?", + "Altinoluk Turkiye haritasinin neresinde yer alir?", ] From 64b8ee2dbe07ad70321a87cc55b653ef335f5c66 Mon Sep 17 00:00:00 2001 From: William Mattingly <62964060+wjbmattingly@users.noreply.github.com> Date: Mon, 14 Aug 2023 17:44:14 +0300 Subject: [PATCH 18/36] Update universe.json (#12904) * Update universe.json added hobbit-spacy to the universe json * Update universe.json removed displacy from hobbit-spacy and added a default text. --- website/meta/universe.json | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/website/meta/universe.json b/website/meta/universe.json index 2ed8b4b41..ec380f847 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -4444,6 +4444,31 @@ }, "category": ["pipeline", "standalone", "scientific"], "tags": ["ner"] + }, + { + "id": "hobbit-spacy", + "title": "Hobbit spaCy", + "slogan": "NLP for Middle Earth", + "description": "Hobbit spaCy is a custom spaCy pipeline designed specifically for working with Middle Earth and texts from the world of J.R.R. Tolkien.", + "github": "wjbmattingly/hobbit-spacy", + "pip": "en-hobbit", + "code_example": [ + "import spacy", + "", + "nlp = spacy.load('en_hobbit')", + "doc = nlp('Frodo saw Glorfindel and Glóin; and in a corner alone Strider was sitting, clad in his old travel - worn clothes again')" + ], + "code_language": "python", + "thumb": "https://github.com/wjbmattingly/hobbit-spacy/blob/main/images/hobbit-thumbnail.png?raw=true", + "image": "https://github.com/wjbmattingly/hobbit-spacy/raw/main/images/hobbitspacy.png", + "author": "W.J.B. Mattingly", + "author_links": { + "twitter": "wjb_mattingly", + "github": "wjbmattingly", + "website": "https://wjbmattingly.com" + }, + "category": ["pipeline", "standalone"], + "tags": ["spans", "rules", "ner"] } ], From 76a9f9c6c6546ec50cb00fab70dbf5f8ac6e0929 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Wed, 16 Aug 2023 17:28:34 +0200 Subject: [PATCH 19/36] Docs: clarify abstract spacy.load examples (#12889) --- website/docs/api/top-level.mdx | 2 +- website/docs/usage/processing-pipelines.mdx | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/website/docs/api/top-level.mdx b/website/docs/api/top-level.mdx index 37e86a4bc..9cdc0c8ab 100644 --- a/website/docs/api/top-level.mdx +++ b/website/docs/api/top-level.mdx @@ -68,7 +68,7 @@ weights, and returns it. cls = spacy.util.get_lang_class(lang) # 1. Get Language class, e.g. English nlp = cls() # 2. Initialize it for name in pipeline: - nlp.add_pipe(name) # 3. Add the component to the pipeline + nlp.add_pipe(name, config={...}) # 3. Add the component to the pipeline nlp.from_disk(data_path) # 4. Load in the binary data ``` diff --git a/website/docs/usage/processing-pipelines.mdx b/website/docs/usage/processing-pipelines.mdx index 307cb9dcb..6ec8a0513 100644 --- a/website/docs/usage/processing-pipelines.mdx +++ b/website/docs/usage/processing-pipelines.mdx @@ -244,7 +244,7 @@ tagging pipeline. This is also why the pipeline state is always held by the together and returns an instance of `Language` with a pipeline set and access to the binary data: -```python {title="spacy.load under the hood"} +```python {title="spacy.load under the hood (abstract example)"} lang = "en" pipeline = ["tok2vec", "tagger", "parser", "ner", "attribute_ruler", "lemmatizer"] data_path = "path/to/en_core_web_sm/en_core_web_sm-3.0.0" @@ -252,7 +252,7 @@ data_path = "path/to/en_core_web_sm/en_core_web_sm-3.0.0" cls = spacy.util.get_lang_class(lang) # 1. Get Language class, e.g. English nlp = cls() # 2. Initialize it for name in pipeline: - nlp.add_pipe(name) # 3. Add the component to the pipeline + nlp.add_pipe(name, config={...}) # 3. Add the component to the pipeline nlp.from_disk(data_path) # 4. Load in the binary data ``` From 6dd56868de3c5e8308ef2ad31d7b63e40a87fe01 Mon Sep 17 00:00:00 2001 From: Connor Brinton Date: Mon, 21 Aug 2023 04:52:32 -0400 Subject: [PATCH 20/36] =?UTF-8?q?=F0=9F=93=9D=20Fix=20formula=20for=20rece?= =?UTF-8?q?ptive=20field=20in=20docs=20(#12918)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SpaCy's HashEmbedCNN layer performs convolutions over tokens to produce contextualized embeddings using a `MaxoutWindowEncoder` layer. These convolutions are implemented using Thinc's `expand_window` layer, which concatenates `window_size` neighboring sequence items on either side of the sequence item being processed. This is repeated across `depth` convolutional layers. For example, consider the sequence "ABCDE" and a `MaxoutWindowEncoder` layer with a context window of 1 and a depth of 2. We'll focus on the token "C". We can visually represent the contextual embedding produced for "C" as: ```mermaid flowchart LR A0(A0) B0(B0) C0(C0) D0(D0) E0(E0) B1(B1) C1(C1) D1(D1) C2(C2) A0 --> B1 B0 --> B1 C0 --> B1 B0 --> C1 C0 --> C1 D0 --> C1 C0 --> D1 D0 --> D1 E0 --> D1 B1 --> C2 C1 --> C2 D1 --> C2 ``` Described in words, this graph shows that before the first layer of the convolution, the "receptive field" centered at each token consists only of that same token. That is to say, that we have a receptive field of 1. The first layer of the convolution adds one neighboring token on either side to the receptive field. Since this is done on both sides, the receptive field increases by 2, giving the first layer a receptive field of 3. The second layer of the convolutions adds an _additional_ neighboring token on either side to the receptive field, giving a final receptive field of 5. However, this doesn't match the formula currently given in the docs, which read: > The receptive field of the CNN will be > `depth * (window_size * 2 + 1)`, so a 4-layer network with a window > size of `2` will be sensitive to 20 words at a time. Substituting in our depth of 2 and window size of 1, this formula gives us a receptive field of: ``` depth * (window_size * 2 + 1) = 2 * (1 * 2 + 1) = 2 * (2 + 1) = 2 * 3 = 6 ``` This not only doesn't match our computations from above, it's also an even number! This is suspicious, since the receptive field is supposed to be centered on a token, and not between tokens. Generally, this formula results in an even number for any even value of `depth`. The error in this formula is that the adjustment for the center token is multiplied by the depth, when it should occur only once. The corrected formula, `depth * window_size * 2 + 1`, gives the correct value for our small example from above: ``` depth * window_size * 2 + 1 = 2 * 1 * 2 + 1 = 4 + 1 = 5 ``` These changes update the docs to correct the receptive field formula and the example receptive field size. --- spacy/ml/models/tok2vec.py | 4 ++-- website/docs/api/architectures.mdx | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/spacy/ml/models/tok2vec.py b/spacy/ml/models/tok2vec.py index 2e9d21ef4..0edc89991 100644 --- a/spacy/ml/models/tok2vec.py +++ b/spacy/ml/models/tok2vec.py @@ -67,8 +67,8 @@ def build_hash_embed_cnn_tok2vec( are between 2 and 8. window_size (int): The number of tokens on either side to concatenate during the convolutions. The receptive field of the CNN will be - depth * (window_size * 2 + 1), so a 4-layer network with window_size of - 2 will be sensitive to 20 words at a time. Recommended value is 1. + depth * window_size * 2 + 1, so a 4-layer network with window_size of + 2 will be sensitive to 17 words at a time. Recommended value is 1. embed_size (int): The number of rows in the hash embedding tables. This can be surprisingly small, due to the use of the hash embeddings. Recommended values are between 2000 and 10000. diff --git a/website/docs/api/architectures.mdx b/website/docs/api/architectures.mdx index bab24f13b..a292194e9 100644 --- a/website/docs/api/architectures.mdx +++ b/website/docs/api/architectures.mdx @@ -83,7 +83,7 @@ consisting of a CNN and a layer-normalized maxout activation function. | `width` | The width of the input and output. These are required to be the same, so that residual connections can be used. Recommended values are `96`, `128` or `300`. ~~int~~ | | `depth` | The number of convolutional layers to use. Recommended values are between `2` and `8`. ~~int~~ | | `embed_size` | The number of rows in the hash embedding tables. This can be surprisingly small, due to the use of the hash embeddings. Recommended values are between `2000` and `10000`. ~~int~~ | -| `window_size` | The number of tokens on either side to concatenate during the convolutions. The receptive field of the CNN will be `depth * (window_size * 2 + 1)`, so a 4-layer network with a window size of `2` will be sensitive to 20 words at a time. Recommended value is `1`. ~~int~~ | +| `window_size` | The number of tokens on either side to concatenate during the convolutions. The receptive field of the CNN will be `depth * window_size * 2 + 1`, so a 4-layer network with a window size of `2` will be sensitive to 17 words at a time. Recommended value is `1`. ~~int~~ | | `maxout_pieces` | The number of pieces to use in the maxout non-linearity. If `1`, the [`Mish`](https://thinc.ai/docs/api-layers#mish) non-linearity is used instead. Recommended values are `1`-`3`. ~~int~~ | | `subword_features` | Whether to also embed subword features, specifically the prefix, suffix and word shape. This is recommended for alphabetic languages like English, but not if single-character tokens are used for a language such as Chinese. ~~bool~~ | | `pretrained_vectors` | Whether to also use static vectors. ~~bool~~ | From d8a32c1050d2acb4fd121968d7e8780aae0b1382 Mon Sep 17 00:00:00 2001 From: PD Hall <20580126+pdhall99@users.noreply.github.com> Date: Tue, 29 Aug 2023 10:10:58 +0100 Subject: [PATCH 21/36] docs: fix ngram_range_suggester max_size description (#12939) --- website/docs/api/spancategorizer.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/api/spancategorizer.mdx b/website/docs/api/spancategorizer.mdx index 2b63d31ce..bfe33dfb9 100644 --- a/website/docs/api/spancategorizer.mdx +++ b/website/docs/api/spancategorizer.mdx @@ -521,7 +521,7 @@ has two columns, indicating the start and end position. | Name | Description | | ----------- | ---------------------------------------------------------------------------- | | `min_size` | The minimal phrase lengths to suggest (inclusive). ~~[int]~~ | -| `max_size` | The maximal phrase lengths to suggest (exclusive). ~~[int]~~ | +| `max_size` | The maximal phrase lengths to suggest (inclusive). ~~[int]~~ | | **CREATES** | The suggester function. ~~Callable[[Iterable[Doc], Optional[Ops]], Ragged]~~ | ### spacy.preset_spans_suggester.v1 {id="preset_spans_suggester"} From 52758e1afaa99b2ac47e0ae825f0a86d209952f4 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Wed, 30 Aug 2023 11:55:23 +0200 Subject: [PATCH 22/36] Add headers to netlify.toml [ci skip] --- website/netlify.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/website/netlify.toml b/website/netlify.toml index db7ae27c4..a99395918 100644 --- a/website/netlify.toml +++ b/website/netlify.toml @@ -16,3 +16,9 @@ NETLIFY_NEXT_PLUGIN_SKIP = "true" [[plugins]] package = "@netlify/plugin-nextjs" + +[[headers]] + for = "/*" + [headers.values] + X-Frame-Options = "DENY" + X-XSS-Protection = "1; mode=block" From 3e4264899c3b12f8eabc5cd700146177a34824d0 Mon Sep 17 00:00:00 2001 From: vincent d warmerdam Date: Wed, 30 Aug 2023 11:58:14 +0200 Subject: [PATCH 23/36] Update large-language-models.mdx (#12944) --- website/docs/api/large-language-models.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/website/docs/api/large-language-models.mdx b/website/docs/api/large-language-models.mdx index cc8328790..94b426cc8 100644 --- a/website/docs/api/large-language-models.mdx +++ b/website/docs/api/large-language-models.mdx @@ -893,7 +893,7 @@ OpenAI's `davinci` model family. > > ```ini > [components.llm.model] -> @llm_models = "spacy.Davinci.v1 " +> @llm_models = "spacy.Davinci.v1" > name = "davinci" > config = {"temperature": 0.3} > ``` @@ -914,7 +914,7 @@ OpenAI's `curie` model family. > > ```ini > [components.llm.model] -> @llm_models = "spacy.Curie.v1 " +> @llm_models = "spacy.Curie.v1" > name = "curie" > config = {"temperature": 0.3} > ``` @@ -935,7 +935,7 @@ OpenAI's `babbage` model family. > > ```ini > [components.llm.model] -> @llm_models = "spacy.Babbage.v1 " +> @llm_models = "spacy.Babbage.v1" > name = "babbage" > config = {"temperature": 0.3} > ``` @@ -956,7 +956,7 @@ OpenAI's `ada` model family. > > ```ini > [components.llm.model] -> @llm_models = "spacy.Ada.v1 " +> @llm_models = "spacy.Ada.v1" > name = "ada" > config = {"temperature": 0.3} > ``` @@ -977,7 +977,7 @@ Cohere's `command` model family. > > ```ini > [components.llm.model] -> @llm_models = "spacy.Command.v1 " +> @llm_models = "spacy.Command.v1" > name = "command" > config = {"temperature": 0.3} > ``` @@ -998,7 +998,7 @@ Anthropic's `claude-2` model family. > > ```ini > [components.llm.model] -> @llm_models = "spacy.Claude-2.v1 " +> @llm_models = "spacy.Claude-2.v1" > name = "claude-2" > config = {"temperature": 0.3} > ``` @@ -1019,7 +1019,7 @@ Anthropic's `claude-1` model family. > > ```ini > [components.llm.model] -> @llm_models = "spacy.Claude-1.v1 " +> @llm_models = "spacy.Claude-1.v1" > name = "claude-1" > config = {"temperature": 0.3} > ``` @@ -1040,7 +1040,7 @@ Anthropic's `claude-instant-1` model family. > > ```ini > [components.llm.model] -> @llm_models = "spacy.Claude-instant-1.v1 " +> @llm_models = "spacy.Claude-instant-1.v1" > name = "claude-instant-1" > config = {"temperature": 0.3} > ``` @@ -1061,7 +1061,7 @@ Anthropic's `claude-instant-1.1` model family. > > ```ini > [components.llm.model] -> @llm_models = "spacy.Claude-instant-1-1.v1 " +> @llm_models = "spacy.Claude-instant-1-1.v1" > name = "claude-instant-1.1" > config = {"temperature": 0.3} > ``` @@ -1082,7 +1082,7 @@ Anthropic's `claude-1.0` model family. > > ```ini > [components.llm.model] -> @llm_models = "spacy.Claude-1-0.v1 " +> @llm_models = "spacy.Claude-1-0.v1" > name = "claude-1.0" > config = {"temperature": 0.3} > ``` @@ -1124,7 +1124,7 @@ Anthropic's `claude-1.3` model family. > > ```ini > [components.llm.model] -> @llm_models = "spacy.Claude-1-3.v1 " +> @llm_models = "spacy.Claude-1-3.v1" > name = "claude-1.3" > config = {"temperature": 0.3} > ``` From 065ead4eed2608666c95dcad6037913e53fbf424 Mon Sep 17 00:00:00 2001 From: David Berenstein Date: Fri, 1 Sep 2023 11:05:36 +0200 Subject: [PATCH 24/36] updated `add_pipe` docs (#12947) --- website/meta/universe.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/meta/universe.json b/website/meta/universe.json index ec380f847..46de8121c 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -2806,7 +2806,7 @@ "", "# see github repo for examples on sentence-transformers and Huggingface", "nlp = spacy.load('en_core_web_md')", - "nlp.add_pipe(\"text_categorizer\", ", + "nlp.add_pipe(\"classy_classification\", ", " config={", " \"data\": data,", " \"model\": \"spacy\"", @@ -3010,8 +3010,8 @@ "# Load the spaCy language model:", "nlp = spacy.load(\"en_core_web_sm\")", "", - "# Add the \"text_categorizer\" pipeline component to the spaCy model, and configure it with SetFit parameters:", - "nlp.add_pipe(\"text_categorizer\", config={", + "# Add the \"spacy_setfit\" pipeline component to the spaCy model, and configure it with SetFit parameters:", + "nlp.add_pipe(\"spacy_setfit\", config={", " \"pretrained_model_name_or_path\": \"paraphrase-MiniLM-L3-v2\",", " \"setfit_trainer_args\": {", " \"train_dataset\": train_dataset", From 5c1f9264c219cfbd7b9c266c0bb8e00238f238c7 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Fri, 1 Sep 2023 13:47:20 +0200 Subject: [PATCH 25/36] fix typo in link (#12948) * fix typo in link * fix REL.v1 parameter --- website/docs/api/large-language-models.mdx | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/website/docs/api/large-language-models.mdx b/website/docs/api/large-language-models.mdx index 94b426cc8..e65945357 100644 --- a/website/docs/api/large-language-models.mdx +++ b/website/docs/api/large-language-models.mdx @@ -113,7 +113,7 @@ note that this requirement will be included in the prompt, but the task doesn't perform a hard cut-off. It's hence possible that your summary exceeds `max_n_words`. -To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), you can write down a few examples in a separate file, and provide these to be injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` supports `.yml`, `.yaml`, `.json` and `.jsonl`. @@ -192,7 +192,7 @@ the following parameters: span to the next token boundaries, e.g. expanding `"New Y"` out to `"New York"`. -To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), you can write down a few examples in a separate file, and provide these to be injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` supports `.yml`, `.yaml`, `.json` and `.jsonl`. @@ -282,7 +282,7 @@ the following parameters: span to the next token boundaries, e.g. expanding `"New Y"` out to `"New York"`. -To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), you can write down a few examples in a separate file, and provide these to be injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` supports `.yml`, `.yaml`, `.json` and `.jsonl`. @@ -397,7 +397,7 @@ definitions are included in the prompt. | `allow_none` | When set to `True`, allows the LLM to not return any of the given label. The resulting dict in `doc.cats` will have `0.0` scores for all labels. Defaults to `True`. ~~bool~~ | | `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Defaults to `False`. ~~bool~~ | -To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), you can write down a few examples in a separate file, and provide these to be injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` supports `.yml`, `.yaml`, `.json` and `.jsonl`. @@ -452,7 +452,7 @@ prompting and includes an improved prompt template. | `allow_none` | When set to `True`, allows the LLM to not return any of the given label. The resulting dict in `doc.cats` will have `0.0` scores for all labels. Defaults to `True`. ~~bool~~ | | `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Defaults to `False`. ~~bool~~ | -To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), you can write down a few examples in a separate file, and provide these to be injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` supports `.yml`, `.yaml`, `.json` and `.jsonl`. @@ -502,7 +502,7 @@ prompting. | `allow_none` | When set to `True`, allows the LLM to not return any of the given label. The resulting dict in `doc.cats` will have `0.0` scores for all labels. Deafults to `True`. ~~bool~~ | | `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Deafults to `False`. ~~bool~~ | -To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), you can write down a few examples in a separate file, and provide these to be injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` supports `.yml`, `.yaml`, `.json` and `.jsonl`. @@ -546,12 +546,12 @@ on an upstream NER component for entities extraction. | ------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | | `template` | Custom prompt template to send to LLM model. Default templates for each task are located in the `spacy_llm/tasks/templates` directory. Defaults to [`rel.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/rel.jinja). ~~str~~ | -| `label_description` | Dictionary providing a description for each relation label. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| `label_definitions` | Dictionary providing a description for each relation label. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | | `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | | `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, falls back to `spacy.LowercaseNormalizer.v1`. Defaults to `None`. ~~Optional[Callable[[str], str]]~~ | | `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Defaults to `False`. ~~bool~~ | -To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), you can write down a few examples in a separate file, and provide these to be injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` supports `.yml`, `.yaml`, `.json` and `.jsonl`. @@ -565,6 +565,7 @@ supports `.yml`, `.yaml`, `.json` and `.jsonl`. [components.llm.task] @llm_tasks = "spacy.REL.v1" labels = ["LivesIn", "Visits"] + [components.llm.task.examples] @misc = "spacy.FewShotReader.v1" path = "rel_examples.jsonl" @@ -613,7 +614,7 @@ doesn't match the number of tokens from the pipeline's tokenizer, no lemmas are stored in the corresponding doc's tokens. Otherwise the tokens `.lemma_` property is updated with the lemma suggested by the LLM. -To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), you can write down a few examples in a separate file, and provide these to be injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` supports `.yml`, `.yaml`, `.json` and `.jsonl`. @@ -666,7 +667,7 @@ issues (e. g. in case of unexpected LLM responses) the value might be `None`. | `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | | `field` | Name of extension attribute to store summary in (i. e. the summary will be available in `doc._.{field}`). Defaults to `sentiment`. ~~str~~ | -To perform [few-shot learning](/usage/large-langauge-models#few-shot-prompts), +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), you can write down a few examples in a separate file, and provide these to be injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` supports `.yml`, `.yaml`, `.json` and `.jsonl`. From 6d1f6d9a23b4232e2ca67b5c2a15b62add8b5411 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Mon, 4 Sep 2023 09:05:50 +0200 Subject: [PATCH 26/36] Fix LLM usage example (#12950) * fix usage example * revert back to v2 to allow hot fix on main --- website/docs/usage/large-language-models.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/usage/large-language-models.mdx b/website/docs/usage/large-language-models.mdx index 3c2c52c68..4da9a8f16 100644 --- a/website/docs/usage/large-language-models.mdx +++ b/website/docs/usage/large-language-models.mdx @@ -184,7 +184,7 @@ nlp.add_pipe( "labels": ["PERSON", "ORGANISATION", "LOCATION"] }, "model": { - "@llm_models": "spacy.gpt-3.5.v1", + "@llm_models": "spacy.GPT-3-5.v1", }, }, ) From cc788476881ca456ddcb985675dc292fd75d5f40 Mon Sep 17 00:00:00 2001 From: Magdalena Aniol <96200718+magdaaniol@users.noreply.github.com> Date: Wed, 6 Sep 2023 16:38:13 +0200 Subject: [PATCH 27/36] fix training.batch_size example (#12963) --- website/docs/usage/training.mdx | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/website/docs/usage/training.mdx b/website/docs/usage/training.mdx index 98333db72..abb1b9cfd 100644 --- a/website/docs/usage/training.mdx +++ b/website/docs/usage/training.mdx @@ -180,7 +180,7 @@ Some of the main advantages and features of spaCy's training config are: Under the hood, the config is parsed into a dictionary. It's divided into sections and subsections, indicated by the square brackets and dot notation. For -example, `[training]` is a section and `[training.batch_size]` a subsection. +example, `[training]` is a section and `[training.batcher]` a subsection. Subsections can define values, just like a dictionary, or use the `@` syntax to refer to [registered functions](#config-functions). This allows the config to not just define static settings, but also construct objects like architectures, @@ -254,7 +254,7 @@ For cases like this, you can set additional command-line options starting with block. ```bash -$ python -m spacy train config.cfg --paths.train ./corpus/train.spacy --paths.dev ./corpus/dev.spacy --training.batch_size 128 +$ python -m spacy train config.cfg --paths.train ./corpus/train.spacy --paths.dev ./corpus/dev.spacy --training.max_epochs 3 ``` Only existing sections and values in the config can be overwritten. At the end @@ -279,7 +279,7 @@ process. Environment variables **take precedence** over CLI overrides and values defined in the config file. ```bash -$ SPACY_CONFIG_OVERRIDES="--system.gpu_allocator pytorch --training.batch_size 128" ./your_script.sh +$ SPACY_CONFIG_OVERRIDES="--system.gpu_allocator pytorch --training.max_epochs 3" ./your_script.sh ``` ### Reading from standard input {id="config-stdin"} @@ -578,16 +578,17 @@ now-updated model to the predicted docs. The training configuration defined in the config file doesn't have to only consist of static values. Some settings can also be **functions**. For instance, -the `batch_size` can be a number that doesn't change, or a schedule, like a +the batch size can be a number that doesn't change, or a schedule, like a sequence of compounding values, which has shown to be an effective trick (see [Smith et al., 2017](https://arxiv.org/abs/1711.00489)). ```ini {title="With static value"} -[training] -batch_size = 128 +[training.batcher] +@batchers = "spacy.batch_by_words.v1" +size = 3000 ``` -To refer to a function instead, you can make `[training.batch_size]` its own +To refer to a function instead, you can make `[training.batcher.size]` its own section and use the `@` syntax to specify the function and its arguments – in this case [`compounding.v1`](https://thinc.ai/docs/api-schedules#compounding) defined in the [function registry](/api/top-level#registry). All other values @@ -606,7 +607,7 @@ from your configs. > optimizer. ```ini {title="With registered function"} -[training.batch_size] +[training.batcher.size] @schedules = "compounding.v1" start = 100 stop = 1000 @@ -1027,14 +1028,14 @@ def my_custom_schedule(start: int = 1, factor: float = 1.001): ``` In your config, you can now reference the schedule in the -`[training.batch_size]` block via `@schedules`. If a block contains a key +`[training.batcher.size]` block via `@schedules`. If a block contains a key starting with an `@`, it's interpreted as a reference to a function. All other settings in the block will be passed to the function as keyword arguments. Keep in mind that the config shouldn't have any hidden defaults and all arguments on the functions need to be represented in the config. ```ini {title="config.cfg (excerpt)"} -[training.batch_size] +[training.batcher.size] @schedules = "my_custom_schedule.v1" start = 2 factor = 1.005 From def7013eec784347bdc37060f1a04f52f1d708e0 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Fri, 8 Sep 2023 10:25:14 +0200 Subject: [PATCH 28/36] Docs for spacy-llm 0.5.0 (#12968) * Update incorrect example config. (#12893) * spacy-llm docs cleanup (#12945) * Shorten NER section * fix template references * simplify sections * set temperature to 0.0 in examples * condense model information * fix parameters for REST models * set temperature to 0.0 * spelling fix * trigger preview * fix quotes * add small note on noop.v1 * move up example noop config * set appropriate model example configs * explain config * fix Co-authored-by: Raphael Mitsch --------- Co-authored-by: Raphael Mitsch * Docs for ner.v3 and spancat.v3 spacy-llm tasks (#12949) * formatting * update usage table with NER.v3 * fix typo in links * v3 overview of parameters * add spancat.v3 * add further v3 explanations * remove TODO comment * few more small fixes * Add doc section on LLM + task factories (#12905) * Add section on LLM + task factories. * Apply suggestions from code review --------- Co-authored-by: Sofie Van Landeghem * add default config to openai models (#12961) * Docs for spacy-llm 0.5.0 (#12967) * simplify Python example * simplify Python example * Refer only to latest OpenAI model versions from usage doc * Typo fix Co-authored-by: Raphael Mitsch * clarify accuracy claim --------- Co-authored-by: Raphael Mitsch --------- Co-authored-by: Raphael Mitsch --- website/docs/api/large-language-models.mdx | 1384 +++++++----------- website/docs/usage/large-language-models.mdx | 117 +- 2 files changed, 606 insertions(+), 895 deletions(-) diff --git a/website/docs/api/large-language-models.mdx b/website/docs/api/large-language-models.mdx index e65945357..1ac9b0cef 100644 --- a/website/docs/api/large-language-models.mdx +++ b/website/docs/api/large-language-models.mdx @@ -2,7 +2,7 @@ title: Large Language Models teaser: Integrating LLMs into structured NLP pipelines menu: - - ['Config', 'config'] + - ['Config and implementation', 'config'] - ['Tasks', 'tasks'] - ['Models', 'models'] - ['Cache', 'cache'] @@ -14,45 +14,200 @@ Language Models (LLMs) into spaCy, featuring a modular system for **fast prototyping** and **prompting**, and turning unstructured responses into **robust outputs** for various NLP tasks, **no training data** required. -## Config {id="config"} +## Config and implementation {id="config"} -`spacy-llm` exposes a `llm` factory that accepts the following configuration -options: +An LLM component is implemented through the `LLMWrapper` class. It is accessible +through a generic `llm` +[component factory](https://spacy.io/usage/processing-pipelines#custom-components-factories) +as well as through task-specific component factories: -| Argument | Description | -| ---------------- | ------------------------------------------------------------------------------------------------------- | -| `task` | An LLMTask can generate prompts and parse LLM responses. See [docs](#tasks). ~~Optional[LLMTask]~~ | -| `model` | Callable querying a specific LLM API. See [docs](#models). ~~Callable[[Iterable[Any]], Iterable[Any]]~~ | -| `cache` | Cache to use for caching prompts and responses per doc (batch). See [docs](#cache). ~~Cache~~ | -| `save_io` | Whether to save prompts/responses within `Doc.user_data["llm_io"]`. ~~bool~~ | -| `validate_types` | Whether to check if signatures of configured model and task are consistent. ~~bool~~ | +- `llm_ner` +- `llm_spancat` +- `llm_rel` +- `llm_textcat` +- `llm_sentiment` +- `llm_summarization` -An `llm` component is defined by two main settings: +### LLMWrapper.\_\_init\_\_ {id="init",tag="method"} -- A [**task**](#tasks), defining the prompt to send to the LLM as well as the - functionality to parse the resulting response back into structured fields on - the [Doc](/api/doc) objects. -- A [**model**](#models) defining the model and how to connect to it. Note that - `spacy-llm` supports both access to external APIs (such as OpenAI) as well as - access to self-hosted open-source LLMs (such as using Dolly through Hugging - Face). +> #### Example +> +> ```python +> # Construction via add_pipe with default GPT3.5 model and NER task +> config = {"task": {"@llm_tasks": "spacy.NER.v3", "labels": ["PERSON", "ORGANISATION", "LOCATION"]}} +> llm = nlp.add_pipe("llm") +> +> # Construction via add_pipe with task-specific factory and default GPT3.5 model +> parser = nlp.add_pipe("llm-ner", config=config) +> +> # Construction from class +> from spacy_llm.pipeline import LLMWrapper +> llm = LLMWrapper(vocab=nlp.vocab, task=task, model=model, cache=cache, save_io=True) +> ``` -Moreover, `spacy-llm` exposes a customizable [**caching**](#cache) functionality -to avoid running the same document through an LLM service (be it local or -through a REST API) more than once. +Create a new pipeline instance. In your application, you would normally use a +shortcut for this and instantiate the component using its string name and +[`nlp.add_pipe`](/api/language#add_pipe). -Finally, you can choose to save a stringified version of LLM prompts/responses -within the `Doc.user_data["llm_io"]` attribute by setting `save_io` to `True`. -`Doc.user_data["llm_io"]` is a dictionary containing one entry for every LLM -component within the `nlp` pipeline. Each entry is itself a dictionary, with two -keys: `prompt` and `response`. +| Name | Description | +| -------------- | -------------------------------------------------------------------------------------------------- | +| `name` | String name of the component instance. `llm` by default. ~~str~~ | +| _keyword-only_ | | +| `vocab` | The shared vocabulary. ~~Vocab~~ | +| `task` | An [LLM Task](#tasks) can generate prompts and parse LLM responses. ~~LLMTask~~ | +| `model` | The [LLM Model](#models) queries a specific LLM API.. ~~Callable[[Iterable[Any]], Iterable[Any]]~~ | +| `cache` | [Cache](#cache) to use for caching prompts and responses per doc. ~~Cache~~ | +| `save_io` | Whether to save LLM I/O (prompts and responses) in the `Doc._.llm_io` custom attribute. ~~bool~~ | -A note on `validate_types`: by default, `spacy-llm` checks whether the -signatures of the `model` and `task` callables are consistent with each other -and emits a warning if they don't. `validate_types` can be set to `False` if you -want to disable this behavior. +### LLMWrapper.\_\_call\_\_ {id="call",tag="method"} -### Tasks {id="tasks"} +Apply the pipe to one document. The document is modified in place and returned. +This usually happens under the hood when the `nlp` object is called on a text +and all pipeline components are applied to the `Doc` in order. + +> #### Example +> +> ```python +> doc = nlp("Ingrid visited Paris.") +> llm_ner = nlp.add_pipe("llm_ner") +> # This usually happens under the hood +> processed = llm_ner(doc) +> ``` + +| Name | Description | +| ----------- | -------------------------------- | +| `doc` | The document to process. ~~Doc~~ | +| **RETURNS** | The processed document. ~~Doc~~ | + +### LLMWrapper.pipe {id="pipe",tag="method"} + +Apply the pipe to a stream of documents. This usually happens under the hood +when the `nlp` object is called on a text and all pipeline components are +applied to the `Doc` in order. + +> #### Example +> +> ```python +> llm_ner = nlp.add_pipe("llm_ner") +> for doc in llm_ner.pipe(docs, batch_size=50): +> pass +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------- | +| `docs` | A stream of documents. ~~Iterable[Doc]~~ | +| _keyword-only_ | | +| `batch_size` | The number of documents to buffer. Defaults to `128`. ~~int~~ | +| **YIELDS** | The processed documents in order. ~~Doc~~ | + +### LLMWrapper.add_label {id="add_label",tag="method"} + +Add a new label to the pipe's task. Alternatively, provide the labels upon the +[task](#task) definition, or through the `[initialize]` block of the +[config](#config). + +> #### Example +> +> ```python +> llm_ner = nlp.add_pipe("llm_ner") +> llm_ner.add_label("MY_LABEL") +> ``` + +| Name | Description | +| ----------- | ----------------------------------------------------------- | +| `label` | The label to add. ~~str~~ | +| **RETURNS** | `0` if the label is already present, otherwise `1`. ~~int~~ | + +### LLMWrapper.to_disk {id="to_disk",tag="method"} + +Serialize the pipe to disk. + +> #### Example +> +> ```python +> llm_ner = nlp.add_pipe("llm_ner") +> llm_ner.to_disk("/path/to/llm_ner") +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| `path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | + +### LLMWrapper.from_disk {id="from_disk",tag="method"} + +Load the pipe from disk. Modifies the object in place and returns it. + +> #### Example +> +> ```python +> llm_ner = nlp.add_pipe("llm_ner") +> llm_ner.from_disk("/path/to/llm_ner") +> ``` + +| Name | Description | +| -------------- | ----------------------------------------------------------------------------------------------- | +| `path` | A path to a directory. Paths may be either strings or `Path`-like objects. ~~Union[str, Path]~~ | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | +| **RETURNS** | The modified `LLMWrapper` object. ~~LLMWrapper~~ | + +### LLMWrapper.to_bytes {id="to_bytes",tag="method"} + +> #### Example +> +> ```python +> llm_ner = nlp.add_pipe("llm_ner") +> ner_bytes = llm_ner.to_bytes() +> ``` + +Serialize the pipe to a bytestring. + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------- | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | +| **RETURNS** | The serialized form of the `LLMWrapper` object. ~~bytes~~ | + +### LLMWrapper.from_bytes {id="from_bytes",tag="method"} + +Load the pipe from a bytestring. Modifies the object in place and returns it. + +> #### Example +> +> ```python +> ner_bytes = llm_ner.to_bytes() +> llm_ner = nlp.add_pipe("llm_ner") +> llm_ner.from_bytes(ner_bytes) +> ``` + +| Name | Description | +| -------------- | ------------------------------------------------------------------------------------------- | +| `bytes_data` | The data to load from. ~~bytes~~ | +| _keyword-only_ | | +| `exclude` | String names of [serialization fields](#serialization-fields) to exclude. ~~Iterable[str]~~ | +| **RETURNS** | The `LLMWrapper` object. ~~LLMWrapper~~ | + +### LLMWrapper.labels {id="labels",tag="property"} + +The labels currently added to the component. Empty tuple if the LLM's task does +not require labels. + +> #### Example +> +> ```python +> llm_ner.add_label("MY_LABEL") +> assert "MY_LABEL" in llm_ner.labels +> ``` + +| Name | Description | +| ----------- | ------------------------------------------------------ | +| **RETURNS** | The labels added to the component. ~~Tuple[str, ...]~~ | + +## Tasks {id="tasks"} + +### Task implementation {id="task-implementation"} A _task_ defines an NLP problem or question, that will be sent to the LLM via a prompt. Further, the task defines how to parse the LLM's responses back into @@ -86,6 +241,11 @@ objects. This depends on the return type of the [model](#models). | `responses` | The generated prompts. ~~Iterable[Any]~~ | | **RETURNS** | The annotated documents. ~~Iterable[Doc]~~ | +### Summarization {id="summarization"} + +A summarization task takes a document as input and generates a summary that is +stored in an extension attribute. + #### spacy.Summarization.v1 {id="summarization-v1"} The `spacy.Summarization.v1` task supports both zero-shot and few-shot @@ -100,12 +260,12 @@ prompting. > max_n_words = null > ``` -| Argument | Description | -| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `template` | Custom prompt template to send to LLM model. Default templates for each task are located in the `spacy_llm/tasks/templates` directory. Defaults to [summarization.jinja](./spacy_llm/tasks/templates/summarization.jinja). ~~str~~ | -| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | -| `max_n_words` | Maximum number of words to be used in summary. Note that this should not expected to work exactly. Defaults to `None`. ~~Optional[int]~~ | -| `field` | Name of extension attribute to store summary in (i. e. the summary will be available in `doc._.{field}`). Defaults to `summary`. ~~str~~ | +| Argument | Description | +| ------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `template` | Custom prompt template to send to LLM model. Defaults to [summarization.v1.jinja](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/summarization.v1.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `max_n_words` | Maximum number of words to be used in summary. Note that this should not expected to work exactly. Defaults to `None`. ~~Optional[int]~~ | +| `field` | Name of extension attribute to store summary in (i. e. the summary will be available in `doc._.{field}`). Defaults to `summary`. ~~str~~ | The summarization task prompts the model for a concise summary of the provided text. It optionally allows to limit the response to a certain number of tokens - @@ -146,11 +306,111 @@ max_n_words = 20 path = "summarization_examples.yml" ``` +### NER {id="ner"} + +The NER task identifies non-overlapping entities in text. + +#### spacy.NER.v3 {id="ner-v3"} + +Version 3 is fundamentally different to v1 and v2, as it implements +Chain-of-Thought prompting, based on the +[PromptNER paper](https://arxiv.org/pdf/2305.15444.pdf) by Ashok and Lipton +(2023). On an internal use-case, we have found this implementation to obtain +significant better accuracy - with an increase of F-score of up to 15 percentage +points. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.NER.v3" +> labels = ["PERSON", "ORGANISATION", "LOCATION"] +> ``` + +When no examples are [specified](/usage/large-language-models#few-shot-prompts), +the v3 implementation will use a dummy example in the prompt. Technically this +means that the task will always perform few-shot prompting under the hood. + +| Argument | Description | +| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `label_definitions` | Optional dict mapping a label to a description of that label. These descriptions are added to the prompt to help instruct the LLM on what to extract. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| `template` | Custom prompt template to send to LLM model. Defaults to [ner.v3.jinja](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/ner.v3.jinja). ~~str~~ | +| `description` (NEW) | A description of what to recognize or not recognize as entities. ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, defaults to `spacy.LowercaseNormalizer.v1`. Defaults to `None`. ~~Optional[Callable[[str], str]]~~ | +| `alignment_mode` | Alignment mode in case the LLM returns entities that do not align with token boundaries. Options are `"strict"`, `"contract"` or `"expand"`. Defaults to `"contract"`. ~~str~~ | +| `case_sensitive_matching` | Whether to search without case sensitivity. Defaults to `False`. ~~bool~~ | + +Note that the `single_match` parameter, used in v1 and v2, is not supported +anymore, as the CoT parsing algorithm takes care of this automatically. + +New to v3 is the fact that you can provide an explicit description of what +entities should look like. You can use this feature in addition to +`label_definitions`. + +```ini +[components.llm.task] +@llm_tasks = "spacy.NER.v3" +labels = ["DISH", "INGREDIENT", "EQUIPMENT"] +description = Entities are the names food dishes, + ingredients, and any kind of cooking equipment. + Adjectives, verbs, adverbs are not entities. + Pronouns are not entities. + +[components.llm.task.label_definitions] +DISH = "Known food dishes, e.g. Lobster Ravioli, garlic bread" +INGREDIENT = "Individual parts of a food dish, including herbs and spices." +EQUIPMENT = "Any kind of cooking equipment. e.g. oven, cooking pot, grill" +``` + +To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), +you can write down a few examples in a separate file, and provide these to be +injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` +supports `.yml`, `.yaml`, `.json` and `.jsonl`. + +While not required, this task works best when both positive and negative +examples are provided. The format is different than the files required for v1 +and v2, as additional fields such as `is_entity` and `reason` should now be +provided. + +```json +[ + { + "text": "You can't get a great chocolate flavor with carob.", + "spans": [ + { + "text": "chocolate", + "is_entity": false, + "label": "==NONE==", + "reason": "is a flavor in this context, not an ingredient" + }, + { + "text": "carob", + "is_entity": true, + "label": "INGREDIENT", + "reason": "is an ingredient to add chocolate flavor" + } + ] + }, + ... +] +``` + +```ini +[components.llm.task.examples] +@misc = "spacy.FewShotReader.v1" +path = "${paths.examples}" +``` + +For a fully working example, see this +[usage example](https://github.com/explosion/spacy-llm/tree/main/usage_examples/ner_v3_openai). + #### spacy.NER.v2 {id="ner-v2"} -The built-in NER task supports both zero-shot and few-shot prompting. This -version also supports explicitly defining the provided labels with custom -descriptions. +This version supports explicitly defining the provided labels with custom +descriptions, and further supports zero-shot and few-shot prompting just like +v1. > #### Example config > @@ -161,84 +421,44 @@ descriptions. > examples = null > ``` -| Argument | Description | -| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | -| `template` | Custom prompt template to send to LLM model. Default templates for each task are located in the `spacy_llm/tasks/templates` directory. Defaults to [ner.v2.jinja](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/ner.v2.jinja). ~~str~~ | -| `label_definitions` | Optional dict mapping a label to a description of that label. These descriptions are added to the prompt to help instruct the LLM on what to extract. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | -| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | -| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, defaults to `spacy.LowercaseNormalizer.v1`. Defaults to `None`. ~~Optional[Callable[[str], str]]~~ | -| `alignment_mode` | Alignment mode in case the LLM returns entities that do not align with token boundaries. Options are `"strict"`, `"contract"` or `"expand"`. Defaults to `"contract"`. ~~str~~ | -| `case_sensitive_matching` | Whether to search without case sensitivity. Defaults to `False`. ~~bool~~ | -| `single_match` | Whether to match an entity in the LLM's response only once (the first hit) or multiple times. Defaults to `False`. ~~bool~~ | +| Argument | Description | +| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `label_definitions` (NEW) | Optional dict mapping a label to a description of that label. These descriptions are added to the prompt to help instruct the LLM on what to extract. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| `template` (NEW) | Custom prompt template to send to LLM model. Defaults to [ner.v2.jinja](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/ner.v2.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, defaults to `spacy.LowercaseNormalizer.v1`. Defaults to `None`. ~~Optional[Callable[[str], str]]~~ | +| `alignment_mode` | Alignment mode in case the LLM returns entities that do not align with token boundaries. Options are `"strict"`, `"contract"` or `"expand"`. Defaults to `"contract"`. ~~str~~ | +| `case_sensitive_matching` | Whether to search without case sensitivity. Defaults to `False`. ~~bool~~ | +| `single_match` | Whether to match an entity in the LLM's response only once (the first hit) or multiple times. Defaults to `False`. ~~bool~~ | -The NER task implementation doesn't currently ask the LLM for specific offsets, -but simply expects a list of strings that represent the enties in the document. -This means that a form of string matching is required. This can be configured by -the following parameters: - -- The `single_match` parameter is typically set to `False` to allow for multiple - matches. For instance, the response from the LLM might only mention the entity - "Paris" once, but you'd still want to mark it every time it occurs in the - document. -- The case-sensitive matching is typically set to `False` to be robust against - case variances in the LLM's output. -- The `alignment_mode` argument is used to match entities as returned by the LLM - to the tokens from the original `Doc` - specifically it's used as argument in - the call to [`doc.char_span()`](/api/doc#char_span). The `"strict"` mode will - only keep spans that strictly adhere to the given token boundaries. - `"contract"` will only keep those tokens that are fully within the given - range, e.g. reducing `"New Y"` to `"New"`. Finally, `"expand"` will expand the - span to the next token boundaries, e.g. expanding `"New Y"` out to - `"New York"`. - -To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), -you can write down a few examples in a separate file, and provide these to be -injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` -supports `.yml`, `.yaml`, `.json` and `.jsonl`. - -```yaml -- text: Jack and Jill went up the hill. - entities: - PERSON: - - Jack - - Jill - LOCATION: - - hill -- text: Jack fell down and broke his crown. - entities: - PERSON: - - Jack -``` - -```ini -[components.llm.task] -@llm_tasks = "spacy.NER.v2" -labels = PERSON,ORGANISATION,LOCATION -[components.llm.task.examples] -@misc = "spacy.FewShotReader.v1" -path = "ner_examples.yml" -``` +The parameters `alignment_mode`, `case_sensitive_matching` and `single_match` +are identical to the [v1](#ner-v1) implementation. The format of few-shot +examples are also the same. > Label descriptions can also be used with explicit examples to give as much > info to the LLM model as possible. -You can also write definitions for each label and provide them via the -`label_definitions` argument. This lets you tell the LLM exactly what you're -looking for rather than relying on the LLM to interpret its task given just the -label name. Label descriptions are freeform so you can write whatever you want -here, but through some experiments a brief description along with some examples -and counter examples seems to work quite well. +New to v2 is the fact that you can write definitions for each label and provide +them via the `label_definitions` argument. This lets you tell the LLM exactly +what you're looking for rather than relying on the LLM to interpret its task +given just the label name. Label descriptions are freeform so you can write +whatever you want here, but a brief description along with some examples and +counter examples seems to work quite well. ```ini [components.llm.task] @llm_tasks = "spacy.NER.v2" labels = PERSON,SPORTS_TEAM + [components.llm.task.label_definitions] PERSON = "Extract any named individual in the text." SPORTS_TEAM = "Extract the names of any professional sports team. e.g. Golden State Warriors, LA Lakers, Man City, Real Madrid" ``` +For a fully working example, see this +[usage example](https://github.com/explosion/spacy-llm/tree/main/usage_examples/ner_dolly). + #### spacy.NER.v1 {id="ner-v1"} The original version of the built-in NER task supports both zero-shot and @@ -302,18 +522,48 @@ supports `.yml`, `.yaml`, `.json` and `.jsonl`. ``` ```ini -[components.llm.task] -@llm_tasks = "spacy.NER.v1" -labels = PERSON,ORGANISATION,LOCATION [components.llm.task.examples] @misc = "spacy.FewShotReader.v1" path = "ner_examples.yml" ``` +### SpanCat {id="spancat"} + +The SpanCat task identifies potentially overlapping entities in text. + +#### spacy.SpanCat.v3 {id="spancat-v3"} + +The built-in SpanCat v3 task is a simple adaptation of the NER v3 task to +support overlapping entities and store its annotations in `doc.spans`. + +> #### Example config +> +> ```ini +> [components.llm.task] +> @llm_tasks = "spacy.SpanCat.v3" +> labels = ["PERSON", "ORGANISATION", "LOCATION"] +> examples = null +> ``` + +| Argument | Description | +| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `label_definitions` | Optional dict mapping a label to a description of that label. These descriptions are added to the prompt to help instruct the LLM on what to extract. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| `template` | Custom prompt template to send to LLM model. Defaults to [`spancat.v3.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/spancat.v3.jinja). ~~str~~ | +| `description` (NEW) | A description of what to recognize or not recognize as entities. ~~str~~ | +| `spans_key` | Key of the `Doc.spans` dict to save the spans under. Defaults to `"sc"`. ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, defaults to `spacy.LowercaseNormalizer.v1`. ~~Optional[Callable[[str], str]]~~ | +| `alignment_mode` | Alignment mode in case the LLM returns entities that do not align with token boundaries. Options are `"strict"`, `"contract"` or `"expand"`. Defaults to `"contract"`. ~~str~~ | +| `case_sensitive_matching` | Whether to search without case sensitivity. Defaults to `False`. ~~bool~~ | + +Note that the `single_match` parameter, used in v1 and v2, is not supported +anymore, as the CoT parsing algorithm takes care of this automatically. + #### spacy.SpanCat.v2 {id="spancat-v2"} -The built-in SpanCat task is a simple adaptation of the NER task to support -overlapping entities and store its annotations in `doc.spans`. +The built-in SpanCat v2 task is a simple adaptation of the NER v2 task to +support overlapping entities and store its annotations in `doc.spans`. > #### Example config > @@ -324,20 +574,21 @@ overlapping entities and store its annotations in `doc.spans`. > examples = null > ``` -| Argument | Description | -| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | -| `template` | Custom prompt template to send to LLM model. Default templates for each task are located in the `spacy_llm/tasks/templates` directory. Defaults to [`spancat.v2.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/spancat.v2.jinja). ~~str~~ | -| `label_definitions` | Optional dict mapping a label to a description of that label. These descriptions are added to the prompt to help instruct the LLM on what to extract. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | -| `spans_key` | Key of the `Doc.spans` dict to save the spans under. Defaults to `"sc"`. ~~str~~ | -| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | -| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, defaults to `spacy.LowercaseNormalizer.v1`. ~~Optional[Callable[[str], str]]~~ | -| `alignment_mode` | Alignment mode in case the LLM returns entities that do not align with token boundaries. Options are `"strict"`, `"contract"` or `"expand"`. Defaults to `"contract"`. ~~str~~ | -| `case_sensitive_matching` | Whether to search without case sensitivity. Defaults to `False`. ~~bool~~ | -| `single_match` | Whether to match an entity in the LLM's response only once (the first hit) or multiple times. Defaults to `False`. ~~bool~~ | +| Argument | Description | +| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `label_definitions` (NEW) | Optional dict mapping a label to a description of that label. These descriptions are added to the prompt to help instruct the LLM on what to extract. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| `template` (NEW) | Custom prompt template to send to LLM model. Defaults to [`spancat.v2.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/spancat.v2.jinja). ~~str~~ | +| `spans_key` | Key of the `Doc.spans` dict to save the spans under. Defaults to `"sc"`. ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, defaults to `spacy.LowercaseNormalizer.v1`. ~~Optional[Callable[[str], str]]~~ | +| `alignment_mode` | Alignment mode in case the LLM returns entities that do not align with token boundaries. Options are `"strict"`, `"contract"` or `"expand"`. Defaults to `"contract"`. ~~str~~ | +| `case_sensitive_matching` | Whether to search without case sensitivity. Defaults to `False`. ~~bool~~ | +| `single_match` | Whether to match an entity in the LLM's response only once (the first hit) or multiple times. Defaults to `False`. ~~bool~~ | -Except for the `spans_key` parameter, the SpanCat task reuses the configuration -from the NER task. Refer to [its documentation](#ner-v2) for more insight. +Except for the `spans_key` parameter, the SpanCat v2 task reuses the +configuration from the NER v2 task. Refer to [its documentation](#ner-v2) for +more insight. #### spacy.SpanCat.v1 {id="spancat-v1"} @@ -364,14 +615,19 @@ v1 NER task to support overlapping entities and store its annotations in | `case_sensitive_matching` | Whether to search without case sensitivity. Defaults to `False`. ~~bool~~ | | `single_match` | Whether to match an entity in the LLM's response only once (the first hit) or multiple times. Defaults to `False`. ~~bool~~ | -Except for the `spans_key` parameter, the SpanCat task reuses the configuration -from the NER task. Refer to [its documentation](#ner-v1) for more insight. +Except for the `spans_key` parameter, the SpanCat v1 task reuses the +configuration from the NER v1 task. Refer to [its documentation](#ner-v1) for +more insight. + +### TextCat {id="textcat"} + +The TextCat task labels documents with relevant categories. #### spacy.TextCat.v3 {id="textcat-v3"} -Version 3 (the most recent) of the built-in TextCat task supports both zero-shot -and few-shot prompting. It allows setting definitions of labels. Those -definitions are included in the prompt. +On top of the functionality from v2, version 3 of the built-in TextCat tasks +allows setting definitions of labels. Those definitions are included in the +prompt. > #### Example config > @@ -379,59 +635,30 @@ definitions are included in the prompt. > [components.llm.task] > @llm_tasks = "spacy.TextCat.v3" > labels = ["COMPLIMENT", "INSULT"] -> label_definitions = { -> "COMPLIMENT": "a polite expression of praise or admiration.", -> "INSULT": "a disrespectful or scornfully abusive remark or act." -> } +> +> [components.llm.task.label_definitions] +> "COMPLIMENT" = "a polite expression of praise or admiration.", +> "INSULT" = "a disrespectful or scornfully abusive remark or act." > examples = null > ``` -| Argument | Description | -| ------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | -| `label_definitions` | Dictionary of label definitions. Included in the prompt, if set. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | -| `template` | Custom prompt template to send to LLM model. Default templates for each task are located in the `spacy_llm/tasks/templates` directory. Defaults to [`textcat.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/textcat.jinja). ~~str~~ | -| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | -| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, falls back to `spacy.LowercaseNormalizer.v1`. Defaults to `None`. ~~Optional[Callable[[str], str]]~~ | -| `exclusive_classes` | If set to `True`, only one label per document should be valid. If set to `False`, one document can have multiple labels. Defaults to `False`. ~~bool~~ | -| `allow_none` | When set to `True`, allows the LLM to not return any of the given label. The resulting dict in `doc.cats` will have `0.0` scores for all labels. Defaults to `True`. ~~bool~~ | -| `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Defaults to `False`. ~~bool~~ | +| Argument | Description | +| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `label_definitions` (NEW) | Dictionary of label definitions. Included in the prompt, if set. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| `template` | Custom prompt template to send to LLM model. Defaults to [`textcat.v3.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/textcat.v3.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, falls back to `spacy.LowercaseNormalizer.v1`. Defaults to `None`. ~~Optional[Callable[[str], str]]~~ | +| `exclusive_classes` | If set to `True`, only one label per document should be valid. If set to `False`, one document can have multiple labels. Defaults to `False`. ~~bool~~ | +| `allow_none` | When set to `True`, allows the LLM to not return any of the given label. The resulting dict in `doc.cats` will have `0.0` scores for all labels. Defaults to `True`. ~~bool~~ | +| `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Defaults to `False`. ~~bool~~ | -To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), -you can write down a few examples in a separate file, and provide these to be -injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` -supports `.yml`, `.yaml`, `.json` and `.jsonl`. - -```json -[ - { - "text": "You look great!", - "answer": "Compliment" - }, - { - "text": "You are not very clever at all.", - "answer": "Insult" - } -] -``` - -```ini -[components.llm.task] -@llm_tasks = "spacy.TextCat.v3" -labels = ["COMPLIMENT", "INSULT"] -label_definitions = { - "COMPLIMENT": "a polite expression of praise or admiration.", - "INSULT": "a disrespectful or scornfully abusive remark or act." -} -[components.llm.task.examples] -@misc = "spacy.FewShotReader.v1" -path = "textcat_examples.json" -``` +The formatting of few-shot examples is the same as those for the +[v1](#textcat-v1) implementation. #### spacy.TextCat.v2 {id="textcat-v2"} -Version 2 of the built-in TextCat task supports both zero-shot and few-shot -prompting and includes an improved prompt template. +V2 includes all v1 functionality, with an improved prompt template. > #### Example config > @@ -442,42 +669,18 @@ prompting and includes an improved prompt template. > examples = null > ``` -| Argument | Description | -| ------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | -| `template` | Custom prompt template to send to LLM model. Default templates for each task are located in the `spacy_llm/tasks/templates` directory. Defaults to [`textcat.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/textcat.jinja). ~~str~~ | -| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | -| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, falls back to `spacy.LowercaseNormalizer.v1`. ~~Optional[Callable[[str], str]]~~ | -| `exclusive_classes` | If set to `True`, only one label per document should be valid. If set to `False`, one document can have multiple labels. Defaults to `False`. ~~bool~~ | -| `allow_none` | When set to `True`, allows the LLM to not return any of the given label. The resulting dict in `doc.cats` will have `0.0` scores for all labels. Defaults to `True`. ~~bool~~ | -| `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Defaults to `False`. ~~bool~~ | +| Argument | Description | +| ------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `template` (NEW) | Custom prompt template to send to LLM model. Defaults to [`textcat.v2.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/textcat.v2.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, falls back to `spacy.LowercaseNormalizer.v1`. ~~Optional[Callable[[str], str]]~~ | +| `exclusive_classes` | If set to `True`, only one label per document should be valid. If set to `False`, one document can have multiple labels. Defaults to `False`. ~~bool~~ | +| `allow_none` | When set to `True`, allows the LLM to not return any of the given label. The resulting dict in `doc.cats` will have `0.0` scores for all labels. Defaults to `True`. ~~bool~~ | +| `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Defaults to `False`. ~~bool~~ | -To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), -you can write down a few examples in a separate file, and provide these to be -injected into the prompt to the LLM. The default reader `spacy.FewShotReader.v1` -supports `.yml`, `.yaml`, `.json` and `.jsonl`. - -```json -[ - { - "text": "You look great!", - "answer": "Compliment" - }, - { - "text": "You are not very clever at all.", - "answer": "Insult" - } -] -``` - -```ini -[components.llm.task] -@llm_tasks = "spacy.TextCat.v2" -labels = ["COMPLIMENT", "INSULT"] -[components.llm.task.examples] -@misc = "spacy.FewShotReader.v1" -path = "textcat_examples.json" -``` +The formatting of few-shot examples is the same as those for the +[v1](#textcat-v1) implementation. #### spacy.TextCat.v1 {id="textcat-v1"} @@ -521,14 +724,15 @@ supports `.yml`, `.yaml`, `.json` and `.jsonl`. ``` ```ini -[components.llm.task] -@llm_tasks = "spacy.TextCat.v2" -labels = COMPLIMENT,INSULT [components.llm.task.examples] @misc = "spacy.FewShotReader.v1" path = "textcat_examples.json" ``` +### REL {id="rel"} + +The REL task extracts relations between named entities. + #### spacy.REL.v1 {id="rel-v1"} The built-in REL task supports both zero-shot and few-shot prompting. It relies @@ -542,14 +746,14 @@ on an upstream NER component for entities extraction. > labels = ["LivesIn", "Visits"] > ``` -| Argument | Description | -| ------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | -| `template` | Custom prompt template to send to LLM model. Default templates for each task are located in the `spacy_llm/tasks/templates` directory. Defaults to [`rel.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/rel.jinja). ~~str~~ | -| `label_definitions` | Dictionary providing a description for each relation label. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | -| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | -| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, falls back to `spacy.LowercaseNormalizer.v1`. Defaults to `None`. ~~Optional[Callable[[str], str]]~~ | -| `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Defaults to `False`. ~~bool~~ | +| Argument | Description | +| ------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `labels` | List of labels or str of comma-separated list of labels. ~~Union[List[str], str]~~ | +| `template` | Custom prompt template to send to LLM model. Defaults to [`rel.v3.jinja`](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/rel.v1.jinja). ~~str~~ | +| `label_definitions` | Dictionary providing a description for each relation label. Defaults to `None`. ~~Optional[Dict[str, str]]~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `normalizer` | Function that normalizes the labels as returned by the LLM. If `None`, falls back to `spacy.LowercaseNormalizer.v1`. Defaults to `None`. ~~Optional[Callable[[str], str]]~~ | +| `verbose` | If set to `True`, warnings will be generated when the LLM returns invalid responses. Defaults to `False`. ~~bool~~ | To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), you can write down a few examples in a separate file, and provide these to be @@ -575,10 +779,17 @@ Note: the REL task relies on pre-extracted entities to make its prediction. Hence, you'll need to add a component that populates `doc.ents` with recognized spans to your spaCy pipeline and put it _before_ the REL component. +For a fully working example, see this +[usage example](https://github.com/explosion/spacy-llm/tree/main/usage_examples/rel_openai). + +### Lemma {id="lemma"} + +The Lemma task lemmatizes the provided text and updates the `lemma_` attribute +in the doc's tokens accordingly. + #### spacy.Lemma.v1 {id="lemma-v1"} -The `Lemma.v1` task lemmatizes the provided text and updates the `lemma_` -attribute in the doc's tokens accordingly. +This task supports both zero-shot and few-shot prompting. > #### Example config > @@ -588,14 +799,14 @@ attribute in the doc's tokens accordingly. > examples = null > ``` -| Argument | Description | -| ---------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `template` | Custom prompt template to send to LLM model. Default templates for each task are located in the `spacy_llm/tasks/templates` directory. Defaults to [lemma.jinja](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/lemma.jinja). ~~str~~ | -| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| Argument | Description | +| ---------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `template` | Custom prompt template to send to LLM model. Defaults to [lemma.v1.jinja](https://github.com/explosion/spacy-llm/blob/main/spacy_llm/tasks/templates/lemma.v1.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | -`Lemma.v1` prompts the LLM to lemmatize the passed text and return the -lemmatized version as a list of tokens and their corresponding lemma. E. g. the -text `I'm buying ice cream for my friends` should invoke the response +The task prompts the LLM to lemmatize the passed text and return the lemmatized +version as a list of tokens and their corresponding lemma. E. g. the text +`I'm buying ice cream for my friends` should invoke the response ``` I: I @@ -647,12 +858,16 @@ supports `.yml`, `.yaml`, `.json` and `.jsonl`. path = "lemma_examples.yml" ``` -#### spacy.Sentiment.v1 {id="sentiment-v1"} +### Sentiment {id="sentiment"} Performs sentiment analysis on provided texts. Scores between 0 and 1 are stored in `Doc._.sentiment` - the higher, the more positive. Note in cases of parsing issues (e. g. in case of unexpected LLM responses) the value might be `None`. +#### spacy.Sentiment.v1 {id="sentiment-v1"} + +This task supports both zero-shot and few-shot prompting. + > #### Example config > > ```ini @@ -661,11 +876,11 @@ issues (e. g. in case of unexpected LLM responses) the value might be `None`. > examples = null > ``` -| Argument | Description | -| ---------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `template` | Custom prompt template to send to LLM model. Default templates for each task are located in the `spacy_llm/tasks/templates` directory. Defaults to [sentiment.jinja](./spacy_llm/tasks/templates/sentiment.jinja). ~~str~~ | -| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | -| `field` | Name of extension attribute to store summary in (i. e. the summary will be available in `doc._.{field}`). Defaults to `sentiment`. ~~str~~ | +| Argument | Description | +| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| `template` | Custom prompt template to send to LLM model. Defaults to [sentiment.v1.jinja](./spacy_llm/tasks/templates/sentiment.v1.jinja). ~~str~~ | +| `examples` | Optional function that generates examples for few-shot learning. Defaults to `None`. ~~Optional[Callable[[], Iterable[Any]]]~~ | +| `field` | Name of extension attribute to store summary in (i. e. the summary will be available in `doc._.{field}`). Defaults to `sentiment`. ~~str~~ | To perform [few-shot learning](/usage/large-language-models#few-shot-prompts), you can write down a few examples in a separate file, and provide these to be @@ -691,7 +906,10 @@ supports `.yml`, `.yaml`, `.json` and `.jsonl`. path = "sentiment_examples.yml" ``` -#### spacy.NoOp.v1 {id="noop-v1"} +### NoOp {id="noop"} + +This task is only useful for testing - it tells the LLM to do nothing, and does +not set any fields on the `docs`. > #### Example config > @@ -700,10 +918,11 @@ path = "sentiment_examples.yml" > @llm_tasks = "spacy.NoOp.v1" > ``` -This task is only useful for testing - it tells the LLM to do nothing, and does -not set any fields on the `docs`. +#### spacy.NoOp.v1 {id="noop-v1"} -### Models {id="models"} +This task needs no further configuration. + +## Models {id="models"} A _model_ defines which LLM model to query, and how to query it. It can be a simple function taking a collection of prompts (consistent with the output type @@ -713,6 +932,66 @@ it's a function of type `Callable[[Iterable[Any]], Iterable[Any]]`, but specific implementations can have other signatures, like `Callable[[Iterable[str]], Iterable[str]]`. +### Models via REST API {id="models-rest"} + +These models all take the same parameters, but note that the `config` should +contain provider-specific keys and values, as it will be passed onwards to the +provider's API. + +| Argument | Description | +| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. Default depends on the specific model (cf. below) ~~str~~ | +| `config` | Further configuration passed on to the model. Default depends on the specific model (cf. below). ~~Dict[Any, Any]~~ | +| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | +| `max_tries` | Max. number of tries for API request. Defaults to `5`. ~~int~~ | +| `max_request_time` | Max. time (in seconds) to wait for request to terminate before raising an exception. Defaults to `30.0`. ~~float~~ | +| `interval` | Time interval (in seconds) for API retries in seconds. Defaults to `1.0`. ~~float~~ | + +> #### Example config: +> +> ```ini +> [components.llm.model] +> @llm_models = "spacy.GPT-4.v1" +> name = "gpt-4" +> config = {"temperature": 0.0} +> ``` + +| Model | Provider | Supported names | Default name | Default config | +| ----------------------------- | --------- | ---------------------------------------------------------------------------------------- | ---------------------- | ------------------------------------ | +| `spacy.GPT-4.v1` | OpenAI | `["gpt-4", "gpt-4-0314", "gpt-4-32k", "gpt-4-32k-0314"]` | `"gpt-4"` | `{}` | +| `spacy.GPT-4.v2` | OpenAI | `["gpt-4", "gpt-4-0314", "gpt-4-32k", "gpt-4-32k-0314"]` | `"gpt-4"` | `{temperature=0.0}` | +| `spacy.GPT-3-5.v1` | OpenAI | `["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-0613-16k"]` | `"gpt-3.5-turbo"` | `{}` | +| `spacy.GPT-3-5.v2` | OpenAI | `["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-0613-16k"]` | `"gpt-3.5-turbo"` | `{temperature=0.0}` | +| `spacy.Davinci.v1` | OpenAI | `["davinci"]` | `"davinci"` | `{}` | +| `spacy.Davinci.v2` | OpenAI | `["davinci"]` | `"davinci"` | `{temperature=0.0, max_tokens=500}` | +| `spacy.Text-Davinci.v1` | OpenAI | `["text-davinci-003", "text-davinci-002"]` | `"text-davinci-003"` | `{}` | +| `spacy.Text-Davinci.v2` | OpenAI | `["text-davinci-003", "text-davinci-002"]` | `"text-davinci-003"` | `{temperature=0.0, max_tokens=1000}` | +| `spacy.Code-Davinci.v1` | OpenAI | `["code-davinci-002"]` | `"code-davinci-002"` | `{}` | +| `spacy.Code-Davinci.v2` | OpenAI | `["code-davinci-002"]` | `"code-davinci-002"` | `{temperature=0.0, max_tokens=500}` | +| `spacy.Curie.v1` | OpenAI | `["curie"]` | `"curie"` | `{}` | +| `spacy.Curie.v2` | OpenAI | `["curie"]` | `"curie"` | `{temperature=0.0, max_tokens=500}` | +| `spacy.Text-Curie.v1` | OpenAI | `["text-curie-001"]` | `"text-curie-001"` | `{}` | +| `spacy.Text-Curie.v2` | OpenAI | `["text-curie-001"]` | `"text-curie-001"` | `{temperature=0.0, max_tokens=500}` | +| `spacy.Babbage.v1` | OpenAI | `["babbage"]` | `"babbage"` | `{}` | +| `spacy.Babbage.v2` | OpenAI | `["babbage"]` | `"babbage"` | `{temperature=0.0, max_tokens=500}` | +| `spacy.Text-Babbage.v1` | OpenAI | `["text-babbage-001"]` | `"text-babbage-001"` | `{}` | +| `spacy.Text-Babbage.v2` | OpenAI | `["text-babbage-001"]` | `"text-babbage-001"` | `{temperature=0.0, max_tokens=500}` | +| `spacy.Ada.v1` | OpenAI | `["ada"]` | `"ada"` | `{}` | +| `spacy.Ada.v2` | OpenAI | `["ada"]` | `"ada"` | `{temperature=0.0, max_tokens=500}` | +| `spacy.Text-Ada.v1` | OpenAI | `["text-ada-001"]` | `"text-ada-001"` | `{}` | +| `spacy.Text-Ada.v2` | OpenAI | `["text-ada-001"]` | `"text-ada-001"` | `{temperature=0.0, max_tokens=500}` | +| `spacy.Command.v1` | Cohere | `["command", "command-light", "command-light-nightly", "command-nightly"]` | `"command"` | `{}` | +| `spacy.Claude-2.v1` | Anthropic | `["claude-2", "claude-2-100k"]` | `"claude-2"` | `{}` | +| `spacy.Claude-1.v1` | Anthropic | `["claude-1", "claude-1-100k"]` | `"claude-1"` | `{}` | +| `spacy.Claude-1-0.v1` | Anthropic | `["claude-1.0"]` | `"claude-1.0"` | `{}` | +| `spacy.Claude-1-2.v1` | Anthropic | `["claude-1.2"]` | `"claude-1.2"` | `{}` | +| `spacy.Claude-1-3.v1` | Anthropic | `["claude-1.3", "claude-1.3-100k"]` | `"claude-1.3"` | `{}` | +| `spacy.Claude-instant-1.v1` | Anthropic | `["claude-instant-1", "claude-instant-1-100k"]` | `"claude-instant-1"` | `{}` | +| `spacy.Claude-instant-1-1.v1` | Anthropic | `["claude-instant-1.1", "claude-instant-1.1-100k"]` | `"claude-instant-1.1"` | `{}` | + +To use these models, make sure that you've [set the relevant API](#api-keys) +keys as environment variables. + #### API Keys {id="api-keys"} Note that when using hosted services, you have to ensure that the proper API @@ -727,492 +1006,27 @@ export OPENAI_API_KEY="sk-..." export OPENAI_API_ORG="org-..." ``` -For Cohere it's +For Cohere: ```shell export CO_API_KEY="..." ``` -and for Anthropic +For Anthropic: ```shell export ANTHROPIC_API_KEY="..." ``` -#### spacy.GPT-4.v1 {id="gpt-4"} +### Models via HuggingFace {id="models-hf"} -OpenAI's `gpt-4` model family. +These models all take the same parameters: -> #### Example config: -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.GPT-4.v1" -> name = "gpt-4" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"gpt-4"`. ~~Literal["gpt-4", "gpt-4-0314", "gpt-4-32k", "gpt-4-32k-0314"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.GPT-3-5.v1 {id="gpt-3-5"} - -OpenAI's `gpt-3-5` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.GPT-3-5.v1" -> name = "gpt-3.5-turbo" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"gpt-3.5-turbo"`. ~~Literal["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-0613-16k"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Text-Davinci.v1 {id="text-davinci"} - -OpenAI's `text-davinci` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Text-Davinci.v1" -> name = "text-davinci-003" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"text-davinci-003"`. ~~Literal["text-davinci-002", "text-davinci-003"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Code-Davinci.v1 {id="code-davinci"} - -OpenAI's `code-davinci` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Code-Davinci.v1" -> name = "code-davinci-002" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"code-davinci-002"`. ~~Literal["code-davinci-002"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Text-Curie.v1 {id="text-curie"} - -OpenAI's `text-curie` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Text-Curie.v1" -> name = "text-curie-001" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"text-curie-001"`. ~~Literal["text-curie-001"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Text-Babbage.v1 {id="text-babbage"} - -OpenAI's `text-babbage` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Text-Babbage.v1" -> name = "text-babbage-001" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"text-babbage-001"`. ~~Literal["text-babbage-001"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Text-Ada.v1 {id="text-ada"} - -OpenAI's `text-ada` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Text-Ada.v1" -> name = "text-ada-001" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"text-ada-001"`. ~~Literal["text-ada-001"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Davinci.v1 {id="davinci"} - -OpenAI's `davinci` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Davinci.v1" -> name = "davinci" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"davinci"`. ~~Literal["davinci"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Curie.v1 {id="curie"} - -OpenAI's `curie` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Curie.v1" -> name = "curie" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"curie"`. ~~Literal["curie"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Babbage.v1 {id="babbage"} - -OpenAI's `babbage` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Babbage.v1" -> name = "babbage" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"babbage"`. ~~Literal["babbage"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Ada.v1 {id="ada"} - -OpenAI's `ada` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Ada.v1" -> name = "ada" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"ada"`. ~~Literal["ada"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Command.v1 {id="command"} - -Cohere's `command` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Command.v1" -> name = "command" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"command"`. ~~Literal["command", "command-light", "command-light-nightly", "command-nightly"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Claude-2.v1 {id="claude-2"} - -Anthropic's `claude-2` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Claude-2.v1" -> name = "claude-2" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"claude-2"`. ~~Literal["claude-2", "claude-2-100k"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Claude-1.v1 {id="claude-1"} - -Anthropic's `claude-1` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Claude-1.v1" -> name = "claude-1" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"claude-1"`. ~~Literal["claude-1", "claude-1-100k"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Claude-instant-1.v1 {id="claude-instant-1"} - -Anthropic's `claude-instant-1` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Claude-instant-1.v1" -> name = "claude-instant-1" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"claude-instant-1"`. ~~Literal["claude-instant-1", "claude-instant-1-100k"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Claude-instant-1-1.v1 {id="claude-instant-1-1"} - -Anthropic's `claude-instant-1.1` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Claude-instant-1-1.v1" -> name = "claude-instant-1.1" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"claude-instant-1.1"`. ~~Literal["claude-instant-1.1", "claude-instant-1.1-100k"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Claude-1-0.v1 {id="claude-1-0"} - -Anthropic's `claude-1.0` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Claude-1-0.v1" -> name = "claude-1.0" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"claude-1.0"`. ~~Literal["claude-1.0"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Claude-1-2.v1 {id="claude-1-2"} - -Anthropic's `claude-1.2` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Claude-1-2.v1 " -> name = "claude-1.2" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"claude-1.2"`. ~~Literal["claude-1.2"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Claude-1-3.v1 {id="claude-1-3"} - -Anthropic's `claude-1.3` model family. - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Claude-1-3.v1" -> name = "claude-1.3" -> config = {"temperature": 0.3} -> ``` - -| Argument | Description | -| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | Model name, i. e. any supported variant for this particular model. Defaults to `"claude-1.3"`. ~~Literal["claude-1.3", "claude-1.3-100k"]~~ | -| `config` | Further configuration passed on to the model. Defaults to `{}`. ~~Dict[Any, Any]~~ | -| `strict` | If `True`, raises an error if the LLM API returns a malformed response. Otherwise, return the error responses as is. Defaults to `True`. ~~bool~~ | -| `max_tries` | Max. number of tries for API request. Defaults to `3`. ~~int~~ | -| `timeout` | Timeout for API request in seconds. Defaults to `30`. ~~int~~ | - -#### spacy.Dolly.v1 {id="dolly"} - -To use this model, ideally you have a GPU enabled and have installed -`transformers`, `torch` and CUDA in your virtual environment. This allows you to -have the setting `device=cuda:0` in your config, which ensures that the model is -loaded entirely on the GPU (and fails otherwise). - -You can do so with - -```shell -python -m pip install "spacy-llm[transformers]" "transformers[sentencepiece]" -``` - -If you don't have access to a GPU, you can install `accelerate` and -set`device_map=auto` instead, but be aware that this may result in some layers -getting distributed to the CPU or even the hard drive, which may ultimately -result in extremely slow queries. - -```shell -python -m pip install "accelerate>=0.16.0,<1.0" -``` - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Dolly.v1" -> name = "dolly-v2-3b" -> ``` - -| Argument | Description | -| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | The name of a Dolly model that is supported (e. g. "dolly-v2-3b" or "dolly-v2-12b"). ~~Literal["dolly-v2-3b", "dolly-v2-7b", "dolly-v2-12b"]~~ | -| `config_init` | Further configuration passed on to the construction of the model with `transformers.pipeline()`. Defaults to `{}`. ~~Dict[str, Any]~~ | -| `config_run` | Further configuration used during model inference. Defaults to `{}`. ~~Dict[str, Any]~~ | - -Supported models (see the -[Databricks models page](https://huggingface.co/databricks) on Hugging Face for -details): - -- `"databricks/dolly-v2-3b"` -- `"databricks/dolly-v2-7b"` -- `"databricks/dolly-v2-12b"` - -Note that Hugging Face will download this model the first time you use it - you -can -[define the cached directory](https://huggingface.co/docs/huggingface_hub/main/en/guides/manage-cache) -by setting the environmental variable `HF_HOME`. - -#### spacy.Llama2.v1 {id="llama2"} - -To use this model, ideally you have a GPU enabled and have installed -`transformers`, `torch` and CUDA in your virtual environment. This allows you to -have the setting `device=cuda:0` in your config, which ensures that the model is -loaded entirely on the GPU (and fails otherwise). - -You can do so with - -```shell -python -m pip install "spacy-llm[transformers]" "transformers[sentencepiece]" -``` - -If you don't have access to a GPU, you can install `accelerate` and -set`device_map=auto` instead, but be aware that this may result in some layers -getting distributed to the CPU or even the hard drive, which may ultimately -result in extremely slow queries. - -```shell -python -m pip install "accelerate>=0.16.0,<1.0" -``` - -Note that the chat models variants of Llama 2 are currently not supported. This -is because they need a particular prompting setup and don't add any discernible -benefits in the use case of `spacy-llm` (i. e. no interactive chat) compared the -completion model variants. +| Argument | Description | +| ------------- | ------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | Model name, i. e. any supported variant for this particular model. ~~str~~ | +| `config_init` | Further configuration passed on to the construction of the model with `transformers.pipeline()`. Defaults to `{}`. ~~Dict[str, Any]~~ | +| `config_run` | Further configuration used during model inference. Defaults to `{}`. ~~Dict[str, Any]~~ | > #### Example config > @@ -1222,108 +1036,27 @@ completion model variants. > name = "llama2-7b-hf" > ``` -| Argument | Description | -| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `name` | The name of a Llama 2 model variant that is supported. Defaults to `"Llama-2-7b-hf"`. ~~Literal["Llama-2-7b-hf", "Llama-2-13b-hf", "Llama-2-70b-hf"]~~ | -| `config_init` | Further configuration passed on to the construction of the model with `transformers.pipeline()`. Defaults to `{}`. ~~Dict[str, Any]~~ | -| `config_run` | Further configuration used during model inference. Defaults to `{}`. ~~Dict[str, Any]~~ | +| Model | Provider | Supported names | HF directory | +| -------------------- | --------------- | ------------------------------------------------------------------------------------------------------------ | -------------------------------------- | +| `spacy.Dolly.v1` | Databricks | `["dolly-v2-3b", "dolly-v2-7b", "dolly-v2-12b"]` | https://huggingface.co/databricks | +| `spacy.Llama2.v1` | Meta AI | `["Llama-2-7b-hf", "Llama-2-13b-hf", "Llama-2-70b-hf"]` | https://huggingface.co/meta-llama | +| `spacy.Falcon.v1` | TII | `["falcon-rw-1b", "falcon-7b", "falcon-7b-instruct", "falcon-40b-instruct"]` | https://huggingface.co/tiiuae | +| `spacy.StableLM.v1` | Stability AI | `["stablelm-base-alpha-3b", "stablelm-base-alpha-7b", "stablelm-tuned-alpha-3b", "stablelm-tuned-alpha-7b"]` | https://huggingface.co/stabilityai | +| `spacy.OpenLLaMA.v1` | OpenLM Research | `["open_llama_3b", "open_llama_7b", "open_llama_7b_v2", "open_llama_13b"]` | https://huggingface.co/openlm-research | -Note that Hugging Face will download this model the first time you use it - you -can -[define the cache directory](https://huggingface.co/docs/huggingface_hub/main/en/guides/manage-cache) -by setting the environmental variable `HF_HOME`. +See the "HF directory" for more details on each of the models. -#### spacy.Falcon.v1 {id="falcon"} - -To use this model, ideally you have a GPU enabled and have installed -`transformers`, `torch` and CUDA in your virtual environment. This allows you to -have the setting `device=cuda:0` in your config, which ensures that the model is -loaded entirely on the GPU (and fails otherwise). - -You can do so with - -```shell -python -m pip install "spacy-llm[transformers]" "transformers[sentencepiece]" -``` - -If you don't have access to a GPU, you can install `accelerate` and -set`device_map=auto` instead, but be aware that this may result in some layers -getting distributed to the CPU or even the hard drive, which may ultimately -result in extremely slow queries. - -```shell -python -m pip install "accelerate>=0.16.0,<1.0" -``` - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.Falcon.v1" -> name = "falcon-7b" -> ``` - -| Argument | Description | -| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `name` | The name of a Falcon model variant that is supported. Defaults to `"7b-instruct"`. ~~Literal["falcon-rw-1b", "falcon-7b", "falcon-7b-instruct", "falcon-40b-instruct"]~~ | -| `config_init` | Further configuration passed on to the construction of the model with `transformers.pipeline()`. Defaults to `{}`. ~~Dict[str, Any]~~ | -| `config_run` | Further configuration used during model inference. Defaults to `{}`. ~~Dict[str, Any]~~ | - -Note that Hugging Face will download this model the first time you use it - you -can -[define the cache directory](https://huggingface.co/docs/huggingface_hub/main/en/guides/manage-cache) -by setting the environmental variable `HF_HOME`. - -#### spacy.StableLM.v1 {id="stablelm"} - -To use this model, ideally you have a GPU enabled and have installed -`transformers`, `torch` and CUDA in your virtual environment. - -You can do so with - -```shell -python -m pip install "spacy-llm[transformers]" "transformers[sentencepiece]" -``` - -If you don't have access to a GPU, you can install `accelerate` and -set`device_map=auto` instead, but be aware that this may result in some layers -getting distributed to the CPU or even the hard drive, which may ultimately -result in extremely slow queries. - -```shell -python -m pip install "accelerate>=0.16.0,<1.0" -``` - -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.StableLM.v1" -> name = "stablelm-tuned-alpha-7b" -> ``` - -| Argument | Description | -| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | The name of a StableLM model that is supported (e. g. "stablelm-tuned-alpha-7b"). ~~Literal["stablelm-base-alpha-3b", "stablelm-base-alpha-7b", "stablelm-tuned-alpha-3b", "stablelm-tuned-alpha-7b"]~~ | -| `config_init` | Further configuration passed on to the construction of the model with `transformers.AutoModelForCausalLM.from_pretrained()`. Defaults to `{}`. ~~Dict[str, Any]~~ | -| `config_run` | Further configuration used during model inference. Defaults to `{}`. ~~Dict[str, Any]~~ | - -See the -[Stability AI StableLM GitHub repo](https://github.com/Stability-AI/StableLM/#stablelm-alpha) -for details. - -Note that Hugging Face will download this model the first time you use it - you +Note that Hugging Face will download the model the first time you use it - you can [define the cached directory](https://huggingface.co/docs/huggingface_hub/main/en/guides/manage-cache) by setting the environmental variable `HF_HOME`. -#### spacy.OpenLLaMA.v1 {id="openllama"} +#### Installation with HuggingFace {id="install-hf"} -To use this model, ideally you have a GPU enabled and have installed - -- `transformers[sentencepiece]` -- `torch` -- CUDA in your virtual environment. +To use models from HuggingFace, ideally you have a GPU enabled and have +installed `transformers`, `torch` and CUDA in your virtual environment. This +allows you to have the setting `device=cuda:0` in your config, which ensures +that the model is loaded entirely on the GPU (and fails otherwise). You can do so with @@ -1340,30 +1073,7 @@ result in extremely slow queries. python -m pip install "accelerate>=0.16.0,<1.0" ``` -> #### Example config -> -> ```ini -> [components.llm.model] -> @llm_models = "spacy.OpenLLaMA.v1" -> name = "open_llama_3b" -> ``` - -| Argument | Description | -| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `name` | The name of a OpenLLaMA model that is supported. ~~Literal["open_llama_3b", "open_llama_7b", "open_llama_7b_v2", "open_llama_13b"]~~ | -| `config_init` | Further configuration passed on to the construction of the model with `transformers.AutoModelForCausalLM.from_pretrained()`. Defaults to `{}`. ~~Dict[str, Any]~~ | -| `config_run` | Further configuration used during model inference. Defaults to `{}`. ~~Dict[str, Any]~~ | - -See the -[OpenLM Research OpenLLaMA GitHub repo](https://github.com/openlm-research/open_llama) -for details. - -Note that Hugging Face will download this model the first time you use it - you -can -[define the cached directory](https://huggingface.co/docs/huggingface_hub/main/en/guides/manage-cache) -by setting the environmental variable `HF_HOME`. - -#### LangChain models {id="langchain-models"} +### LangChain models {id="langchain-models"} To use [LangChain](https://github.com/hwchase17/langchain) for the API retrieval part, make sure you have installed it first: @@ -1392,7 +1102,7 @@ The name of the model to be used has to be passed in via the `name` attribute. > @llm_models = "langchain.OpenAI.v1" > name = "gpt-3.5-turbo" > query = {"@llm_queries": "spacy.CallLangChain.v1"} -> config = {"temperature": 0.3} +> config = {"temperature": 0.0} > ``` | Argument | Description | @@ -1404,7 +1114,7 @@ The name of the model to be used has to be passed in via the `name` attribute. The default `query` (`spacy.CallLangChain.v1`) executes the prompts by running `model(text)` for each given textual prompt. -### Cache {id="cache"} +## Cache {id="cache"} Interacting with LLMs, either through an external API or a local instance, is costly. Since developing an NLP pipeline generally means a lot of exploration @@ -1436,9 +1146,9 @@ provide your own registered function returning your own cache implementation. If you wish to do so, ensure that your cache object adheres to the `Protocol` defined in `spacy_llm.ty.Cache`. -### Various functions {id="various-functions"} +## Various functions {id="various-functions"} -#### spacy.FewShotReader.v1 {id="fewshotreader-v1"} +### spacy.FewShotReader.v1 {id="fewshotreader-v1"} This function is registered in spaCy's `misc` registry, and reads in examples from a `.yml`, `.yaml`, `.json` or `.jsonl` file. It uses @@ -1457,7 +1167,7 @@ them depending on the file extension. | -------- | ----------------------------------------------------------------------------------------------- | | `path` | Path to an examples file with suffix `.yml`, `.yaml`, `.json` or `.jsonl`. ~~Union[str, Path]~~ | -#### spacy.FileReader.v1 {id="filereader-v1"} +### spacy.FileReader.v1 {id="filereader-v1"} This function is registered in spaCy's `misc` registry, and reads a file provided to the `path` to return a `str` representation of its contents. This @@ -1477,7 +1187,7 @@ template. | -------- | ------------------------------------------------- | | `path` | Path to the file to be read. ~~Union[str, Path]~~ | -#### Normalizer functions {id="normalizer-functions"} +### Normalizer functions {id="normalizer-functions"} These functions provide simple normalizations for string comparisons, e.g. between a list of specified labels and a label given in the raw text of the LLM diff --git a/website/docs/usage/large-language-models.mdx b/website/docs/usage/large-language-models.mdx index 4da9a8f16..86f44f5ae 100644 --- a/website/docs/usage/large-language-models.mdx +++ b/website/docs/usage/large-language-models.mdx @@ -108,7 +108,7 @@ labels = ["COMPLIMENT", "INSULT"] [components.llm.model] @llm_models = "spacy.GPT-3-5.v1" -config = {"temperature": 0.3} +config = {"temperature": 0.0} ``` Now run: @@ -142,7 +142,7 @@ pipeline = ["llm"] factory = "llm" [components.llm.task] -@llm_tasks = "spacy.NER.v2" +@llm_tasks = "spacy.NER.v3" labels = ["PERSON", "ORGANISATION", "LOCATION"] [components.llm.model] @@ -169,25 +169,17 @@ to be `"databricks/dolly-v2-12b"` for better performance. ### Example 3: Create the component directly in Python {id="example-3"} -The `llm` component behaves as any other component does, so adding it to an -existing pipeline follows the same pattern: +The `llm` component behaves as any other component does, and there are +[task-specific components](/api/large-language-models#config) defined to +help you hit the ground running with a reasonable built-in task implementation. ```python import spacy nlp = spacy.blank("en") -nlp.add_pipe( - "llm", - config={ - "task": { - "@llm_tasks": "spacy.NER.v2", - "labels": ["PERSON", "ORGANISATION", "LOCATION"] - }, - "model": { - "@llm_models": "spacy.GPT-3-5.v1", - }, - }, -) +llm_ner = nlp.add_pipe("llm_ner") +llm_ner.add_label("PERSON") +llm_ner.add_label("LOCATION") nlp.initialize() doc = nlp("Jack and Jill rode up the hill in Les Deux Alpes") print([(ent.text, ent.label_) for ent in doc.ents]) @@ -314,7 +306,7 @@ COMPLIMENT ## API {id="api"} -`spacy-llm` exposes a `llm` factory with +`spacy-llm` exposes an `llm` factory with [configurable settings](/api/large-language-models#config). An `llm` component is defined by two main settings: @@ -359,24 +351,26 @@ function. | [`task.parse_responses`](/api/large-language-models#task-parse-responses) | Takes a collection of LLM responses and the original documents, parses the responses into structured information, and sets the annotations on the documents. | Moreover, the task may define an optional [`scorer` method](/api/scorer#score). -It should accept an iterable of `Example`s as input and return a score +It should accept an iterable of `Example` objects as input and return a score dictionary. If the `scorer` method is defined, `spacy-llm` will call it to evaluate the component. -| Component | Description | -| ----------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [`spacy.Summarization.v1`](/api/large-language-models#summarization-v1) | The summarization task prompts the model for a concise summary of the provided text. | -| [`spacy.NER.v2`](/api/large-language-models#ner-v2) | The built-in NER task supports both zero-shot and few-shot prompting. This version also supports explicitly defining the provided labels with custom descriptions. | -| [`spacy.NER.v1`](/api/large-language-models#ner-v1) | The original version of the built-in NER task supports both zero-shot and few-shot prompting. | -| [`spacy.SpanCat.v2`](/api/large-language-models#spancat-v2) | The built-in SpanCat task is a simple adaptation of the NER task to support overlapping entities and store its annotations in `doc.spans`. | -| [`spacy.SpanCat.v1`](/api/large-language-models#spancat-v1) | The original version of the built-in SpanCat task is a simple adaptation of the v1 NER task to support overlapping entities and store its annotations in `doc.spans`. | -| [`spacy.TextCat.v3`](/api/large-language-models#textcat-v3) | Version 3 (the most recent) of the built-in TextCat task supports both zero-shot and few-shot prompting. It allows setting definitions of labels. | -| [`spacy.TextCat.v2`](/api/large-language-models#textcat-v2) | Version 2 of the built-in TextCat task supports both zero-shot and few-shot prompting and includes an improved prompt template. | -| [`spacy.TextCat.v1`](/api/large-language-models#textcat-v1) | Version 1 of the built-in TextCat task supports both zero-shot and few-shot prompting. | -| [`spacy.REL.v1`](/api/large-language-models#rel-v1) | The built-in REL task supports both zero-shot and few-shot prompting. It relies on an upstream NER component for entities extraction. | -| [`spacy.Lemma.v1`](/api/large-language-models#lemma-v1) | The `Lemma.v1` task lemmatizes the provided text and updates the `lemma_` attribute in the doc's tokens accordingly. | -| [`spacy.Sentiment.v1`](/api/large-language-models#sentiment-v1) | Performs sentiment analysis on provided texts. | -| [`spacy.NoOp.v1`](/api/large-language-models#noop-v1) | This task is only useful for testing - it tells the LLM to do nothing, and does not set any fields on the `docs`. | +| Component | Description | +| ----------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------- | +| [`spacy.Summarization.v1`](/api/large-language-models#summarization-v1) | The summarization task prompts the model for a concise summary of the provided text. | +| [`spacy.NER.v3`](/api/large-language-models#ner-v3) | Implements Chain-of-Thought reasoning for NER extraction - obtains higher accuracy than v1 or v2. | +| [`spacy.NER.v2`](/api/large-language-models#ner-v2) | Builds on v1 and additionally supports defining the provided labels with explicit descriptions. | +| [`spacy.NER.v1`](/api/large-language-models#ner-v1) | The original version of the built-in NER task supports both zero-shot and few-shot prompting. | +| [`spacy.SpanCat.v3`](/api/large-language-models#spancat-v3) | Adaptation of the v3 NER task to support overlapping entities and store its annotations in `doc.spans`. | +| [`spacy.SpanCat.v2`](/api/large-language-models#spancat-v2) | Adaptation of the v2 NER task to support overlapping entities and store its annotations in `doc.spans`. | +| [`spacy.SpanCat.v1`](/api/large-language-models#spancat-v1) | Adaptation of the v1 NER task to support overlapping entities and store its annotations in `doc.spans`. | +| [`spacy.REL.v1`](/api/large-language-models#rel-v1) | Relation Extraction task supporting both zero-shot and few-shot prompting. | +| [`spacy.TextCat.v3`](/api/large-language-models#textcat-v3) | Version 3 builds on v2 and allows setting definitions of labels. | +| [`spacy.TextCat.v2`](/api/large-language-models#textcat-v2) | Version 2 builds on v1 and includes an improved prompt template. | +| [`spacy.TextCat.v1`](/api/large-language-models#textcat-v1) | Version 1 of the built-in TextCat task supports both zero-shot and few-shot prompting. | +| [`spacy.Lemma.v1`](/api/large-language-models#lemma-v1) | Lemmatizes the provided text and updates the `lemma_` attribute of the tokens accordingly. | +| [`spacy.Sentiment.v1`](/api/large-language-models#sentiment-v1) | Performs sentiment analysis on provided texts. | +| [`spacy.NoOp.v1`](/api/large-language-models#noop-v1) | This task is only useful for testing - it tells the LLM to do nothing, and does not set any fields on the `docs`. | #### Providing examples for few-shot prompts {id="few-shot-prompts"} @@ -469,31 +463,38 @@ provider's documentation. -| Component | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| [`spacy.GPT-4.v1`](/api/large-language-models#gpt-4) | OpenAI’s `gpt-4` model family. | -| [`spacy.GPT-3-5.v1`](/api/large-language-models#gpt-3-5) | OpenAI’s `gpt-3-5` model family. | -| [`spacy.Text-Davinci.v1`](/api/large-language-models#text-davinci) | OpenAI’s `text-davinci` model family. | -| [`spacy.Code-Davinci.v1`](/api/large-language-models#code-davinci) | OpenAI’s `code-davinci` model family. | -| [`spacy.Text-Curie.v1`](/api/large-language-models#text-curie) | OpenAI’s `text-curie` model family. | -| [`spacy.Text-Babbage.v1`](/api/large-language-models#text-babbage) | OpenAI’s `text-babbage` model family. | -| [`spacy.Text-Ada.v1`](/api/large-language-models#text-ada) | OpenAI’s `text-ada` model family. | -| [`spacy.Davinci.v1`](/api/large-language-models#davinci) | OpenAI’s `davinci` model family. | -| [`spacy.Curie.v1`](/api/large-language-models#curie) | OpenAI’s `curie` model family. | -| [`spacy.Babbage.v1`](/api/large-language-models#babbage) | OpenAI’s `babbage` model family. | -| [`spacy.Ada.v1`](/api/large-language-models#ada) | OpenAI’s `ada` model family. | -| [`spacy.Command.v1`](/api/large-language-models#command) | Cohere’s `command` model family. | -| [`spacy.Claude-1.v1`](/api/large-language-models#claude-1) | Anthropic’s `claude-1` model family. | -| [`spacy.Claude-instant-1.v1`](/api/large-language-models#claude-instant-1) | Anthropic’s `claude-instant-1` model family. | -| [`spacy.Claude-instant-1-1.v1`](/api/large-language-models#claude-instant-1-1) | Anthropic’s `claude-instant-1.1` model family. | -| [`spacy.Claude-1-0.v1`](/api/large-language-models#claude-1-0) | Anthropic’s `claude-1.0` model family. | -| [`spacy.Claude-1-2.v1`](/api/large-language-models#claude-1-2) | Anthropic’s `claude-1.2` model family. | -| [`spacy.Claude-1-3.v1`](/api/large-language-models#claude-1-3) | Anthropic’s `claude-1.3` model family. | -| [`spacy.Dolly.v1`](/api/large-language-models#dolly) | Dolly models through [Databricks](https://huggingface.co/databricks) on HuggingFace. | -| [`spacy.Falcon.v1`](/api/large-language-models#falcon) | Falcon model through HuggingFace. | -| [`spacy.StableLM.v1`](/api/large-language-models#stablelm) | StableLM model through HuggingFace. | -| [`spacy.OpenLLaMA.v1`](/api/large-language-models#openllama) | OpenLLaMA model through HuggingFace. | -| [LangChain models](/api/large-language-models#langchain-models) | LangChain models for API retrieval. | +| Model | Description | +| ----------------------------------------------------------------------- | ---------------------------------------------- | +| [`spacy.GPT-4.v2`](/api/large-language-models#models-rest) | OpenAI’s `gpt-4` model family. | +| [`spacy.GPT-3-5.v2`](/api/large-language-models#models-rest) | OpenAI’s `gpt-3-5` model family. | +| [`spacy.Text-Davinci.v2`](/api/large-language-models#models-rest) | OpenAI’s `text-davinci` model family. | +| [`spacy.Code-Davinci.v2`](/api/large-language-models#models-rest) | OpenAI’s `code-davinci` model family. | +| [`spacy.Text-Curie.v2`](/api/large-language-models#models-rest) | OpenAI’s `text-curie` model family. | +| [`spacy.Text-Babbage.v2`](/api/large-language-models#models-rest) | OpenAI’s `text-babbage` model family. | +| [`spacy.Text-Ada.v2`](/api/large-language-models#models-rest) | OpenAI’s `text-ada` model family. | +| [`spacy.Davinci.v2`](/api/large-language-models#models-rest) | OpenAI’s `davinci` model family. | +| [`spacy.Curie.v2`](/api/large-language-models#models-rest) | OpenAI’s `curie` model family. | +| [`spacy.Babbage.v2`](/api/large-language-models#models-rest) | OpenAI’s `babbage` model family. | +| [`spacy.Ada.v2`](/api/large-language-models#models-rest) | OpenAI’s `ada` model family. | +| [`spacy.Command.v1`](/api/large-language-models#models-rest) | Cohere’s `command` model family. | +| [`spacy.Claude-2.v1`](/api/large-language-models#models-rest) | Anthropic’s `claude-2` model family. | +| [`spacy.Claude-1.v1`](/api/large-language-models#models-rest) | Anthropic’s `claude-1` model family. | +| [`spacy.Claude-instant-1.v1`](/api/large-language-models#models-rest) | Anthropic’s `claude-instant-1` model family. | +| [`spacy.Claude-instant-1-1.v1`](/api/large-language-models#models-rest) | Anthropic’s `claude-instant-1.1` model family. | +| [`spacy.Claude-1-0.v1`](/api/large-language-models#models-rest) | Anthropic’s `claude-1.0` model family. | +| [`spacy.Claude-1-2.v1`](/api/large-language-models#models-rest) | Anthropic’s `claude-1.2` model family. | +| [`spacy.Claude-1-3.v1`](/api/large-language-models#models-rest) | Anthropic’s `claude-1.3` model family. | +| [`spacy.Dolly.v1`](/api/large-language-models#models-hf) | Dolly models through HuggingFace. | +| [`spacy.Falcon.v1`](/api/large-language-models#models-hf) | Falcon models through HuggingFace. | +| [`spacy.Llama2.v1`](/api/large-language-models#models-hf) | Llama2 models through HuggingFace. | +| [`spacy.StableLM.v1`](/api/large-language-models#models-hf) | StableLM models through HuggingFace. | +| [`spacy.OpenLLaMA.v1`](/api/large-language-models#models-hf) | OpenLLaMA models through HuggingFace. | +| [LangChain models](/api/large-language-models#langchain-models) | LangChain models for API retrieval. | + +Note that the chat models variants of Llama 2 are currently not supported. This +is because they need a particular prompting setup and don't add any discernible +benefits in the use case of `spacy-llm` (i. e. no interactive chat) compared to +the completion model variants. ### Cache {id="cache"} @@ -505,7 +506,7 @@ documents at each run that keeps batches of documents stored on disk. ### Various functions {id="various-functions"} -| Component | Description | +| Function | Description | | ----------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | [`spacy.FewShotReader.v1`](/api/large-language-models#fewshotreader-v1) | This function is registered in spaCy's `misc` registry, and reads in examples from a `.yml`, `.yaml`, `.json` or `.jsonl` file. It uses [`srsly`](https://github.com/explosion/srsly) to read in these files and parses them depending on the file extension. | | [`spacy.FileReader.v1`](/api/large-language-models#filereader-v1) | This function is registered in spaCy's `misc` registry, and reads a file provided to the `path` to return a `str` representation of its contents. This function is typically used to read [Jinja](https://jinja.palletsprojects.com/en/3.1.x/) files containing the prompt template. | From 013762be4150d29ceb25d4339e8a934c32fcf195 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Fri, 8 Sep 2023 11:35:38 +0200 Subject: [PATCH 29/36] Few spacy-llm doc fixes (#12969) * fix construction example * shorten task-specific factory list * small edits to HF models * small edit to API models * typo * fix space Co-authored-by: Raphael Mitsch --------- Co-authored-by: Raphael Mitsch --- website/docs/api/large-language-models.mdx | 24 +++++++++------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/website/docs/api/large-language-models.mdx b/website/docs/api/large-language-models.mdx index 1ac9b0cef..d32368e22 100644 --- a/website/docs/api/large-language-models.mdx +++ b/website/docs/api/large-language-models.mdx @@ -19,26 +19,20 @@ prototyping** and **prompting**, and turning unstructured responses into An LLM component is implemented through the `LLMWrapper` class. It is accessible through a generic `llm` [component factory](https://spacy.io/usage/processing-pipelines#custom-components-factories) -as well as through task-specific component factories: - -- `llm_ner` -- `llm_spancat` -- `llm_rel` -- `llm_textcat` -- `llm_sentiment` -- `llm_summarization` +as well as through task-specific component factories: `llm_ner`, `llm_spancat`, `llm_rel`, +`llm_textcat`, `llm_sentiment` and `llm_summarization`. ### LLMWrapper.\_\_init\_\_ {id="init",tag="method"} > #### Example > > ```python -> # Construction via add_pipe with default GPT3.5 model and NER task +> # Construction via add_pipe with the default GPT 3.5 model and an explicitly defined task > config = {"task": {"@llm_tasks": "spacy.NER.v3", "labels": ["PERSON", "ORGANISATION", "LOCATION"]}} -> llm = nlp.add_pipe("llm") +> llm = nlp.add_pipe("llm", config=config) > -> # Construction via add_pipe with task-specific factory and default GPT3.5 model -> parser = nlp.add_pipe("llm-ner", config=config) +> # Construction via add_pipe with a task-specific factory and default GPT3.5 model +> llm = nlp.add_pipe("llm-ner") > > # Construction from class > from spacy_llm.pipeline import LLMWrapper @@ -956,6 +950,8 @@ provider's API. > config = {"temperature": 0.0} > ``` +Currently, these models are provided as part of the core library: + | Model | Provider | Supported names | Default name | Default config | | ----------------------------- | --------- | ---------------------------------------------------------------------------------------- | ---------------------- | ------------------------------------ | | `spacy.GPT-4.v1` | OpenAI | `["gpt-4", "gpt-4-0314", "gpt-4-32k", "gpt-4-32k-0314"]` | `"gpt-4"` | `{}` | @@ -1036,6 +1032,8 @@ These models all take the same parameters: > name = "llama2-7b-hf" > ``` +Currently, these models are provided as part of the core library: + | Model | Provider | Supported names | HF directory | | -------------------- | --------------- | ------------------------------------------------------------------------------------------------------------ | -------------------------------------- | | `spacy.Dolly.v1` | Databricks | `["dolly-v2-3b", "dolly-v2-7b", "dolly-v2-12b"]` | https://huggingface.co/databricks | @@ -1044,8 +1042,6 @@ These models all take the same parameters: | `spacy.StableLM.v1` | Stability AI | `["stablelm-base-alpha-3b", "stablelm-base-alpha-7b", "stablelm-tuned-alpha-3b", "stablelm-tuned-alpha-7b"]` | https://huggingface.co/stabilityai | | `spacy.OpenLLaMA.v1` | OpenLM Research | `["open_llama_3b", "open_llama_7b", "open_llama_7b_v2", "open_llama_13b"]` | https://huggingface.co/openlm-research | -See the "HF directory" for more details on each of the models. - Note that Hugging Face will download the model the first time you use it - you can [define the cached directory](https://huggingface.co/docs/huggingface_hub/main/en/guides/manage-cache) From 8f0d6b0a8c42e4852bf6e24cdf629043f2f39361 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Wed, 13 Sep 2023 13:21:58 +0200 Subject: [PATCH 30/36] Fix in BertTokenizer docs (#12955) * fix BertWordPieceTokenizer constructor call * fix * Update website/docs/usage/linguistic-features.mdx --------- Co-authored-by: Adriane Boyd --- website/docs/usage/linguistic-features.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/usage/linguistic-features.mdx b/website/docs/usage/linguistic-features.mdx index 90f305ada..a58e8a241 100644 --- a/website/docs/usage/linguistic-features.mdx +++ b/website/docs/usage/linguistic-features.mdx @@ -1299,9 +1299,9 @@ correct type. ```python {title="functions.py",highlight="1"} @spacy.registry.tokenizers("bert_word_piece_tokenizer") -def create_whitespace_tokenizer(vocab_file: str, lowercase: bool): +def create_bert_tokenizer(vocab_file: str, lowercase: bool): def create_tokenizer(nlp): - return BertWordPieceTokenizer(nlp.vocab, vocab_file, lowercase) + return BertTokenizer(nlp.vocab, vocab_file, lowercase) return create_tokenizer ``` From 4e3360ad12c924b37185c238e832c10ac3ad9e15 Mon Sep 17 00:00:00 2001 From: Eliana Vornov Date: Mon, 25 Sep 2023 05:25:41 -0400 Subject: [PATCH 31/36] add --spans-key option for CLI spancat evaluation (#12981) * add span key option for CLI evaluation * Rephrase CLI help to refer to Doc.spans instead of spancat * Rephrase docs to refer to Doc.spans instead of spancat --------- Co-authored-by: Adriane Boyd --- spacy/cli/evaluate.py | 2 ++ website/docs/api/cli.mdx | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/spacy/cli/evaluate.py b/spacy/cli/evaluate.py index 6235b658d..2276ca6b0 100644 --- a/spacy/cli/evaluate.py +++ b/spacy/cli/evaluate.py @@ -28,6 +28,7 @@ def evaluate_cli( displacy_path: Optional[Path] = Opt(None, "--displacy-path", "-dp", help="Directory to output rendered parses as HTML", exists=True, file_okay=False), displacy_limit: int = Opt(25, "--displacy-limit", "-dl", help="Limit of parses to render as HTML"), per_component: bool = Opt(False, "--per-component", "-P", help="Return scores per component, only applicable when an output JSON file is specified."), + spans_key: str = Opt("sc", "--spans-key", "-sk", help="Spans key to use when evaluating Doc.spans"), # fmt: on ): """ @@ -53,6 +54,7 @@ def evaluate_cli( displacy_limit=displacy_limit, per_component=per_component, silent=False, + spans_key=spans_key, ) diff --git a/website/docs/api/cli.mdx b/website/docs/api/cli.mdx index d63ac6e1d..2646a848b 100644 --- a/website/docs/api/cli.mdx +++ b/website/docs/api/cli.mdx @@ -1183,7 +1183,7 @@ skew. To render a sample of dependency parses in a HTML file using the `--displacy-path` argument. ```bash -$ python -m spacy benchmark accuracy [model] [data_path] [--output] [--code] [--gold-preproc] [--gpu-id] [--displacy-path] [--displacy-limit] +$ python -m spacy benchmark accuracy [model] [data_path] [--output] [--code] [--gold-preproc] [--gpu-id] [--displacy-path] [--displacy-limit] [--per-component] [--spans-key] ``` | Name | Description | @@ -1197,6 +1197,7 @@ $ python -m spacy benchmark accuracy [model] [data_path] [--output] [--code] [-- | `--displacy-path`, `-dp` | Directory to output rendered parses as HTML. If not set, no visualizations will be generated. ~~Optional[Path] \(option)~~ | | `--displacy-limit`, `-dl` | Number of parses to generate per file. Defaults to `25`. Keep in mind that a significantly higher number might cause the `.html` files to render slowly. ~~int (option)~~ | | `--per-component`, `-P` 3.6 | Whether to return the scores keyed by component name. Defaults to `False`. ~~bool (flag)~~ | +| `--spans-key`, `-sk` | Spans key to use when evaluating `Doc.spans`. Defaults to `sc`. ~~str (option)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | **CREATES** | Training results and optional metrics and visualizations. | From ed8c11e2aac43d0a378377823188e30c367391e4 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Mon, 25 Sep 2023 18:44:35 +0900 Subject: [PATCH 32/36] Fix typo in lemmatizer.py (#13003) specfic -> specific --- spacy/lang/es/lemmatizer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spacy/lang/es/lemmatizer.py b/spacy/lang/es/lemmatizer.py index 44f968347..ee5d38e84 100644 --- a/spacy/lang/es/lemmatizer.py +++ b/spacy/lang/es/lemmatizer.py @@ -163,7 +163,7 @@ class SpanishLemmatizer(Lemmatizer): for old, new in self.lookups.get_table("lemma_rules").get("det", []): if word == old: return [new] - # If none of the specfic rules apply, search in the common rules for + # If none of the specific rules apply, search in the common rules for # determiners and pronouns that follow a unique pattern for # lemmatization. If the word is in the list, return the corresponding # lemma. @@ -291,7 +291,7 @@ class SpanishLemmatizer(Lemmatizer): for old, new in self.lookups.get_table("lemma_rules").get("pron", []): if word == old: return [new] - # If none of the specfic rules apply, search in the common rules for + # If none of the specific rules apply, search in the common rules for # determiners and pronouns that follow a unique pattern for # lemmatization. If the word is in the list, return the corresponding # lemma. From 935a5455b696635119dc879205c92d442da67cb6 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Mon, 25 Sep 2023 11:49:28 +0200 Subject: [PATCH 33/36] Docs: add new tag for evaluate CLI --spans-keys (#13013) --- website/docs/api/cli.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/api/cli.mdx b/website/docs/api/cli.mdx index 2646a848b..e6b04a930 100644 --- a/website/docs/api/cli.mdx +++ b/website/docs/api/cli.mdx @@ -1197,7 +1197,7 @@ $ python -m spacy benchmark accuracy [model] [data_path] [--output] [--code] [-- | `--displacy-path`, `-dp` | Directory to output rendered parses as HTML. If not set, no visualizations will be generated. ~~Optional[Path] \(option)~~ | | `--displacy-limit`, `-dl` | Number of parses to generate per file. Defaults to `25`. Keep in mind that a significantly higher number might cause the `.html` files to render slowly. ~~int (option)~~ | | `--per-component`, `-P` 3.6 | Whether to return the scores keyed by component name. Defaults to `False`. ~~bool (flag)~~ | -| `--spans-key`, `-sk` | Spans key to use when evaluating `Doc.spans`. Defaults to `sc`. ~~str (option)~~ | +| `--spans-key`, `-sk` 3.6.2 | Spans key to use when evaluating `Doc.spans`. Defaults to `sc`. ~~str (option)~~ | | `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | | **CREATES** | Training results and optional metrics and visualizations. | From b4501db6f8bff096f2a2103f998e81cd2dd1dfa2 Mon Sep 17 00:00:00 2001 From: Madeesh Kannan Date: Mon, 25 Sep 2023 18:20:30 +0200 Subject: [PATCH 34/36] Update emoji library in rule-based matcher example (#13014) --- website/docs/usage/rule-based-matching.mdx | 58 +++++++++++----------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/website/docs/usage/rule-based-matching.mdx b/website/docs/usage/rule-based-matching.mdx index 4f54415cb..d01107ea2 100644 --- a/website/docs/usage/rule-based-matching.mdx +++ b/website/docs/usage/rule-based-matching.mdx @@ -850,14 +850,14 @@ negative pattern. To keep it simple, we'll either add or subtract `0.1` points this way, the score will also reflect combinations of emoji, even positive _and_ negative ones. -With a library like [Emojipedia](https://github.com/bcongdon/python-emojipedia), -we can also retrieve a short description for each emoji – for example, 😍's -official title is "Smiling Face With Heart-Eyes". Assigning it to a +With a library like [emoji](https://github.com/carpedm20/emoji), we can also +retrieve a short description for each emoji – for example, 😍's official title +is "Smiling Face With Heart-Eyes". Assigning it to a [custom attribute](/usage/processing-pipelines#custom-components-attributes) on the emoji span will make it available as `span._.emoji_desc`. ```python -from emojipedia import Emojipedia # Installation: pip install emojipedia +import emoji # Installation: pip install emoji from spacy.tokens import Span # Get the global Span object Span.set_extension("emoji_desc", default=None) # Register the custom attribute @@ -869,9 +869,9 @@ def label_sentiment(matcher, doc, i, matches): elif doc.vocab.strings[match_id] == "SAD": doc.sentiment -= 0.1 # Subtract 0.1 for negative sentiment span = doc[start:end] - emoji = Emojipedia.search(span[0].text) # Get data for emoji - span._.emoji_desc = emoji.title # Assign emoji description - + # Verify if it is an emoji and set the extension attribute correctly. + if emoji.is_emoji(span[0].text): + span._.emoji_desc = emoji.demojize(span[0].text, delimiters=("", ""), language=doc.lang_).replace("_", " ") ``` To label the hashtags, we can use a @@ -1096,28 +1096,28 @@ The following operators are supported by the `DependencyMatcher`, most of which come directly from [Semgrex](https://nlp.stanford.edu/nlp/javadoc/javanlp/edu/stanford/nlp/semgraph/semgrex/SemgrexPattern.html): -| Symbol | Description | -| --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| `A < B` | `A` is the immediate dependent of `B`. | -| `A > B` | `A` is the immediate head of `B`. | -| `A << B` | `A` is the dependent in a chain to `B` following dep → head paths. | -| `A >> B` | `A` is the head in a chain to `B` following head → dep paths. | -| `A . B` | `A` immediately precedes `B`, i.e. `A.i == B.i - 1`, and both are within the same dependency tree. | -| `A .* B` | `A` precedes `B`, i.e. `A.i < B.i`, and both are within the same dependency tree _(Semgrex counterpart: `..`)_. | -| `A ; B` | `A` immediately follows `B`, i.e. `A.i == B.i + 1`, and both are within the same dependency tree _(Semgrex counterpart: `-`)_. | -| `A ;* B` | `A` follows `B`, i.e. `A.i > B.i`, and both are within the same dependency tree _(Semgrex counterpart: `--`)_. | -| `A $+ B` | `B` is a right immediate sibling of `A`, i.e. `A` and `B` have the same parent and `A.i == B.i - 1`. | -| `A $- B` | `B` is a left immediate sibling of `A`, i.e. `A` and `B` have the same parent and `A.i == B.i + 1`. | -| `A $++ B` | `B` is a right sibling of `A`, i.e. `A` and `B` have the same parent and `A.i < B.i`. | -| `A $-- B` | `B` is a left sibling of `A`, i.e. `A` and `B` have the same parent and `A.i > B.i`. | -| `A >+ B` 3.5.1 | `B` is a right immediate child of `A`, i.e. `A` is a parent of `B` and `A.i == B.i - 1` _(not in Semgrex)_. | -| `A >- B` 3.5.1 | `B` is a left immediate child of `A`, i.e. `A` is a parent of `B` and `A.i == B.i + 1` _(not in Semgrex)_. | -| `A >++ B` | `B` is a right child of `A`, i.e. `A` is a parent of `B` and `A.i < B.i`. | -| `A >-- B` | `B` is a left child of `A`, i.e. `A` is a parent of `B` and `A.i > B.i`. | -| `A <+ B` 3.5.1 | `B` is a right immediate parent of `A`, i.e. `A` is a child of `B` and `A.i == B.i - 1` _(not in Semgrex)_. | -| `A <- B` 3.5.1 | `B` is a left immediate parent of `A`, i.e. `A` is a child of `B` and `A.i == B.i + 1` _(not in Semgrex)_. | -| `A <++ B` | `B` is a right parent of `A`, i.e. `A` is a child of `B` and `A.i < B.i`. | -| `A <-- B` | `B` is a left parent of `A`, i.e. `A` is a child of `B` and `A.i > B.i`. | +| Symbol | Description | +| --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | +| `A < B` | `A` is the immediate dependent of `B`. | +| `A > B` | `A` is the immediate head of `B`. | +| `A << B` | `A` is the dependent in a chain to `B` following dep → head paths. | +| `A >> B` | `A` is the head in a chain to `B` following head → dep paths. | +| `A . B` | `A` immediately precedes `B`, i.e. `A.i == B.i - 1`, and both are within the same dependency tree. | +| `A .* B` | `A` precedes `B`, i.e. `A.i < B.i`, and both are within the same dependency tree _(Semgrex counterpart: `..`)_. | +| `A ; B` | `A` immediately follows `B`, i.e. `A.i == B.i + 1`, and both are within the same dependency tree _(Semgrex counterpart: `-`)_. | +| `A ;* B` | `A` follows `B`, i.e. `A.i > B.i`, and both are within the same dependency tree _(Semgrex counterpart: `--`)_. | +| `A $+ B` | `B` is a right immediate sibling of `A`, i.e. `A` and `B` have the same parent and `A.i == B.i - 1`. | +| `A $- B` | `B` is a left immediate sibling of `A`, i.e. `A` and `B` have the same parent and `A.i == B.i + 1`. | +| `A $++ B` | `B` is a right sibling of `A`, i.e. `A` and `B` have the same parent and `A.i < B.i`. | +| `A $-- B` | `B` is a left sibling of `A`, i.e. `A` and `B` have the same parent and `A.i > B.i`. | +| `A >+ B` 3.5.1 | `B` is a right immediate child of `A`, i.e. `A` is a parent of `B` and `A.i == B.i - 1` _(not in Semgrex)_. | +| `A >- B` 3.5.1 | `B` is a left immediate child of `A`, i.e. `A` is a parent of `B` and `A.i == B.i + 1` _(not in Semgrex)_. | +| `A >++ B` | `B` is a right child of `A`, i.e. `A` is a parent of `B` and `A.i < B.i`. | +| `A >-- B` | `B` is a left child of `A`, i.e. `A` is a parent of `B` and `A.i > B.i`. | +| `A <+ B` 3.5.1 | `B` is a right immediate parent of `A`, i.e. `A` is a child of `B` and `A.i == B.i - 1` _(not in Semgrex)_. | +| `A <- B` 3.5.1 | `B` is a left immediate parent of `A`, i.e. `A` is a child of `B` and `A.i == B.i + 1` _(not in Semgrex)_. | +| `A <++ B` | `B` is a right parent of `A`, i.e. `A` is a child of `B` and `A.i < B.i`. | +| `A <-- B` | `B` is a left parent of `A`, i.e. `A` is a child of `B` and `A.i > B.i`. | ### Designing dependency matcher patterns {id="dependencymatcher-patterns"} From 6255e386954778ea745aef4cf9ae3c3051a643bd Mon Sep 17 00:00:00 2001 From: Sergiu Nisioi Date: Thu, 28 Sep 2023 16:06:50 +0700 Subject: [PATCH 35/36] Adding rolegal model to the spaCy universe (#13017) * adding rolegal model to the spaCy universe * Fix formatting * Use raw URL * update image url and example * fix pip and update url to raw * okay, let's add thumb instead of image :octopus: * Update website/meta/universe.json --------- Co-authored-by: Adriane Boyd --- website/meta/universe.json | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/website/meta/universe.json b/website/meta/universe.json index 46de8121c..b2868c084 100644 --- a/website/meta/universe.json +++ b/website/meta/universe.json @@ -4469,6 +4469,37 @@ }, "category": ["pipeline", "standalone"], "tags": ["spans", "rules", "ner"] + }, + { + "id": "rolegal", + "title": "A spaCy Package for Romanian Legal Document Processing", + "thumb": "https://raw.githubusercontent.com/senisioi/rolegal/main/img/paper200x200.jpeg", + "slogan": "rolegal: a spaCy Package for Noisy Romanian Legal Document Processing", + "description": "This is a spaCy language model for Romanian legal domain trained with floret 4-gram to 5-gram embeddings and `LEGAL` entity recognition. Useful for processing OCR-resulted noisy legal documents.", + "github": "senisioi/rolegal", + "pip": "ro-legal-fl", + "tags": ["legal", "floret", "ner", "romanian"], + "code_example": [ + "import spacy", + "nlp = spacy.load(\"ro_legal_fl\")", + "", + "doc = nlp(\"Titlul III din LEGEA nr. 255 din 19 iulie 2013, publicată în MONITORUL OFICIAL\")", + "# legal entity identification", + "for entity in doc.ents:", + " print('entity: ', entity, '; entity type: ', entity.label_)", + "", + "# floret n-gram embeddings robust to typos", + "print(nlp('achizit1e public@').similarity(nlp('achiziții publice')))", + "# 0.7393895566928835", + "print(nlp('achizitii publice').similarity(nlp('achiziții publice')))", + "# 0.8996480808279399" + ], + "author": "Sergiu Nisioi", + "author_links": { + "github": "senisioi", + "website": "https://nlp.unibuc.ro/people/snisioi.html" + }, + "category": ["pipeline", "training", "models"] } ], From beda27a91eadd70563dbaffd844d8c9d5e245928 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dani=C3=ABl=20de=20Kok?= Date: Thu, 28 Sep 2023 11:36:44 +0200 Subject: [PATCH 36/36] Load the cli module lazily for spacy.info (#12962) * Load the cli module lazily for spacy.info This avoids that the `spacy` module cannot be imported when the users chooses not to install `typer`/`requests`. * Add test --------- Co-authored-by: Adriane Boyd --- spacy/__init__.py | 7 ++++++- spacy/tests/test_cli.py | 4 ++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/spacy/__init__.py b/spacy/__init__.py index 1a18ad0d5..8aa2eccd7 100644 --- a/spacy/__init__.py +++ b/spacy/__init__.py @@ -13,7 +13,6 @@ from thinc.api import Config, prefer_gpu, require_cpu, require_gpu # noqa: F401 from . import pipeline # noqa: F401 from . import util from .about import __version__ # noqa: F401 -from .cli.info import info # noqa: F401 from .errors import Errors from .glossary import explain # noqa: F401 from .language import Language @@ -77,3 +76,9 @@ def blank( # We should accept both dot notation and nested dict here for consistency config = util.dot_to_dict(config) return LangClass.from_config(config, vocab=vocab, meta=meta) + + +def info(*args, **kwargs): + from .cli.info import info as cli_info + + return cli_info(*args, **kwargs) diff --git a/spacy/tests/test_cli.py b/spacy/tests/test_cli.py index 8e1c9ca32..ebf2ec7da 100644 --- a/spacy/tests/test_cli.py +++ b/spacy/tests/test_cli.py @@ -14,6 +14,7 @@ from thinc.api import Config, ConfigValidationError import spacy from spacy import about +from spacy import info as spacy_info from spacy.cli import info from spacy.cli._util import ( download_file, @@ -225,6 +226,9 @@ def test_cli_info(): raw_data = info(tmp_dir, exclude=[""]) assert raw_data["lang"] == "nl" assert raw_data["components"] == ["textcat"] + raw_data = spacy_info(tmp_dir, exclude=[""]) + assert raw_data["lang"] == "nl" + assert raw_data["components"] == ["textcat"] def test_cli_converters_conllu_to_docs():