From 495e9e0751d542e7640407932febc769412176fd Mon Sep 17 00:00:00 2001 From: Dmitrii Magas Date: Mon, 24 Feb 2025 22:09:11 +0100 Subject: [PATCH] Add openreview helper to fetch papers from conferences (#879) Co-authored-by: pre-commit-ci-lite[bot] <117423508+pre-commit-ci-lite[bot]@users.noreply.github.com> Co-authored-by: James Braza --- .mailmap | 1 + CONTRIBUTING.md | 4 - README.md | 96 +----------- docs/tutorials/where_do_I_get_papers.md | 122 +++++++++++++++ paperqa/configs/openreview.json | 36 +++++ paperqa/contrib/openreview_paper_helper.py | 165 +++++++++++++++++++++ pyproject.toml | 4 + uv.lock | 75 +++++++++- 8 files changed, 402 insertions(+), 101 deletions(-) create mode 100644 docs/tutorials/where_do_I_get_papers.md create mode 100644 paperqa/configs/openreview.json create mode 100644 paperqa/contrib/openreview_paper_helper.py diff --git a/.mailmap b/.mailmap index 3a5f58824..bc4072974 100644 --- a/.mailmap +++ b/.mailmap @@ -1,5 +1,6 @@ Andrew White Anush008 Anush +Dmitrii Magas eamag Geemi Wellawatte Geemi Wellawatte <49410838+geemi725@users.noreply.github.com> Harry Vu diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 55c753623..eb9adc998 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -73,8 +73,4 @@ from responses to ensure sensitive information is excluded from the cassettes. Please ensure cassettes are less than 1 MB to keep tests loading quickly. -## Additional resources - -For more information on contributing, please refer to the [CONTRIBUTING.md](CONTRIBUTING.md) file in the repository. - Happy coding! diff --git a/README.md b/README.md index 32a04a9df..c51f8cd58 100644 --- a/README.md +++ b/README.md @@ -39,8 +39,6 @@ question answering, summarization, and contradiction detection. - [Using Clients Directly](#using-clients-directly) - [Settings Cheatsheet](#settings-cheatsheet) - [Where do I get papers?](#where-do-i-get-papers) - - [Zotero](#zotero) - - [Paper Scraper](#paper-scraper) - [Callbacks](#callbacks) - [Caching Embeddings](#caching-embeddings) - [Customizing Prompts](#customizing-prompts) @@ -836,99 +834,7 @@ will return much faster than the first query and we'll be certain the authors ma Well that's a really good question! It's probably best to just download PDFs of papers you think will help answer your question and start from there. -### Zotero - -_It's been a while since we've tested this - so let us know if it runs into issues!_ - -If you use [Zotero](https://www.zotero.org/) to organize your personal bibliography, -you can use the `paperqa.contrib.ZoteroDB` to query papers from your library, -which relies on [pyzotero](https://github.com/urschrei/pyzotero). - -Install `pyzotero` via the `zotero` extra for this feature: - -```bash -pip install paper-qa[zotero] -``` - -First, note that PaperQA2 parses the PDFs of papers to store in the database, -so all relevant papers should have PDFs stored inside your database. -You can get Zotero to automatically do this by highlighting the references -you wish to retrieve, right clicking, and selecting _"Find Available PDFs"_. -You can also manually drag-and-drop PDFs onto each reference. - -To download papers, you need to get an API key for your account. - -1. Get your library ID, and set it as the environment variable `ZOTERO_USER_ID`. - - For personal libraries, this ID is given [here](https://www.zotero.org/settings/keys) at the part "_Your userID for use in API calls is XXXXXX_". - - For group libraries, go to your group page `https://www.zotero.org/groups/groupname`, and hover over the settings link. The ID is the integer after /groups/. (_h/t pyzotero!_) -2. Create a new API key [here](https://www.zotero.org/settings/keys/new) and set it as the environment variable `ZOTERO_API_KEY`. - - The key will need read access to the library. - -With this, we can download papers from our library and add them to PaperQA2: - -```python -from paperqa import Docs -from paperqa.contrib import ZoteroDB - -docs = Docs() -zotero = ZoteroDB(library_type="user") # "group" if group library - -for item in zotero.iterate(limit=20): - if item.num_pages > 30: - continue # skip long papers - docs.add(item.pdf, docname=item.key) -``` - -which will download the first 20 papers in your Zotero database and add -them to the `Docs` object. - -We can also do specific queries of our Zotero library and iterate over the results: - -```python -for item in zotero.iterate( - q="large language models", - qmode="everything", - sort="date", - direction="desc", - limit=100, -): - print("Adding", item.title) - docs.add(item.pdf, docname=item.key) -``` - -You can read more about the search syntax by typing `zotero.iterate?` in IPython. - -### Paper Scraper - -If you want to search for papers outside of your own collection, I've found an unrelated project called [`paper-scraper`](https://github.com/blackadad/paper-scraper) that looks -like it might help. But beware, this project looks like it uses some scraping tools that may violate publisher's rights or be in a gray area of legality. - -First, install `paper-scraper`: - -```bash -pip install git+https://github.com/blackadad/paper-scraper.git -``` - -Then run with it: - -```python -import paperscraper -from paperqa import Docs - -keyword_search = "bispecific antibody manufacture" -papers = paperscraper.search_papers(keyword_search) -docs = Docs() -for path, data in papers.items(): - try: - docs.add(path) - except ValueError as e: - # sometimes this happens if PDFs aren't downloaded or readable - print("Could not read", path, e) -session = docs.query( - "What manufacturing challenges are unique to bispecific antibodies?" -) -print(session) -``` +See detailed docs [about zotero, openreview and parsing](docs/tutorials/where_do_I_get_papers.md) ## Callbacks diff --git a/docs/tutorials/where_do_I_get_papers.md b/docs/tutorials/where_do_I_get_papers.md new file mode 100644 index 000000000..f747c9d2c --- /dev/null +++ b/docs/tutorials/where_do_I_get_papers.md @@ -0,0 +1,122 @@ +# Where to get papers + +## OpenReview + +You can use papers from [https://openreview.net/](https://openreview.net/) as your database! +Here's a helper that fetches a list of all papers from a selected conference (like ICLR, ICML, NeurIPS), queries this list to find relevant papers using LLM, and downloads those relevant papers to a local directory which can be used with paper-qa on the next step. Install `openreview-py` with + +```bash +pip install paper-qa[openreview] +``` + +and get your username and password from the website. You can put them into `.env` file under `OPENREVIEW_USERNAME` and `OPENREVIEW_PASSWORD` variables, or pass them in the code directly. + +```python +from paperqa import Settings +from paperqa.contrib.openreview_paper_helper import OpenReviewPaperHelper + +# these settings require gemini api key you can get from https://aistudio.google.com/ +# import os; os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY") +# 1Mil context window helps to suggest papers. These settings are not required, but useful for an initial setup. +settings = Settings.from_name("openreview") +helper = OpenReviewPaperHelper(settings, venue_id="ICLR.cc/2025/Conference") +# if you don't know venue_id you can find it via +# helper.get_venues() + +# Now we can query LLM to select relevant papers and download PDFs +question = "What is the progress on brain activity research?" + +submissions = helper.fetch_relevant_papers(question) + +# There's also a function that saves tokens by using openreview metadata for citations +docs = await helper.aadd_docs(submissions) + +# Now you can continue asking like in the [main tutorial](../../README.md) +session = docs.query(question, settings=settings) +print(session.answer) +``` + +## Zotero + +_It's been a while since we've tested this - so let us know if it runs into issues!_ + +If you use [Zotero](https://www.zotero.org/) to organize your personal bibliography, +you can use the `paperqa.contrib.ZoteroDB` to query papers from your library, +which relies on [pyzotero](https://github.com/urschrei/pyzotero). + +Install `pyzotero` via the `zotero` extra for this feature: + +```bash +pip install paper-qa[zotero] +``` + +First, note that PaperQA2 parses the PDFs of papers to store in the database, +so all relevant papers should have PDFs stored inside your database. +You can get Zotero to automatically do this by highlighting the references +you wish to retrieve, right clicking, and selecting _"Find Available PDFs"_. +You can also manually drag-and-drop PDFs onto each reference. + +To download papers, you need to get an API key for your account. + +1. Get your library ID, and set it as the environment variable `ZOTERO_USER_ID`. + - For personal libraries, this ID is given [here](https://www.zotero.org/settings/keys) at the part "_Your userID for use in API calls is XXXXXX_". + - For group libraries, go to your group page `https://www.zotero.org/groups/groupname`, and hover over the settings link. The ID is the integer after /groups/. (_h/t pyzotero!_) +2. Create a new API key [here](https://www.zotero.org/settings/keys/new) and set it as the environment variable `ZOTERO_API_KEY`. + - The key will need read access to the library. + +With this, we can download papers from our library and add them to PaperQA2: + +```python +from paperqa import Docs +from paperqa.contrib import ZoteroDB + +docs = Docs() +zotero = ZoteroDB(library_type="user") # "group" if group library + +for item in zotero.iterate(limit=20): + if item.num_pages > 30: + continue # skip long papers + docs.add(item.pdf, docname=item.key) +``` + +which will download the first 20 papers in your Zotero database and add +them to the `Docs` object. + +We can also do specific queries of our Zotero library and iterate over the results: + +```python +for item in zotero.iterate( + q="large language models", + qmode="everything", + sort="date", + direction="desc", + limit=100, +): + print("Adding", item.title) + docs.add(item.pdf, docname=item.key) +``` + +You can read more about the search syntax by typing `zotero.iterate?` in IPython. + +## Paper Scraper + +If you want to search for papers outside of your own collection, I've found an unrelated project called [paper-scraper](https://github.com/blackadad/paper-scraper) that looks +like it might help. But beware, this project looks like it uses some scraping tools that may violate publisher's rights or be in a gray area of legality. + +```python +from paperqa import Docs + +keyword_search = "bispecific antibody manufacture" +papers = paperscraper.search_papers(keyword_search) +docs = Docs() +for path, data in papers.items(): + try: + docs.add(path) + except ValueError as e: + # sometimes this happens if PDFs aren't downloaded or readable + print("Could not read", path, e) +session = docs.query( + "What manufacturing challenges are unique to bispecific antibodies?" +) +print(session) +``` diff --git a/paperqa/configs/openreview.json b/paperqa/configs/openreview.json new file mode 100644 index 000000000..0bf1bc491 --- /dev/null +++ b/paperqa/configs/openreview.json @@ -0,0 +1,36 @@ +{ + "llm": "gemini/gemini-2.0-flash-exp", + "llm_config": { + "model_name": "gemini/gemini-2.0-flash-exp", + "litellm_params": { + "model": "gemini/gemini-2.0-flash-exp", + "api_key": null + } + }, + "summary_llm": "gemini/gemini-2.0-flash-exp", + "summary_llm_config": { + "model_name": "gemini/gemini-2.0-flash-exp", + "litellm_params": { + "model": "gemini/gemini-2.0-flash-exp", + "api_key": null + } + }, + "embedding": "ollama/granite3-dense", + "paper_directory": "my_papers", + "verbosity": 3, + "agent": { + "agent_llm": "gemini/gemini-2.0-flash-exp", + "agent_llm_config": { + "model_name": "gemini/gemini-2.0-flash-exp", + "litellm_params": { + "model": "gemini/gemini-2.0-flash-exp", + "api_key": null + } + }, + "return_paper_metadata": false + }, + "parsing": { + "chunk_size": 3000000, + "use_doc_details": false + } +} diff --git a/paperqa/contrib/openreview_paper_helper.py b/paperqa/contrib/openreview_paper_helper.py new file mode 100644 index 000000000..82a765535 --- /dev/null +++ b/paperqa/contrib/openreview_paper_helper.py @@ -0,0 +1,165 @@ +import json +import logging +import os +from pathlib import Path +from typing import Any + +import anyio +import httpx +from aviary.core import Message +from lmi import LiteLLMModel +from pydantic import BaseModel, Field + +from paperqa import Docs, Settings + +try: + import openreview +except ImportError: + openreview = None + +logger = logging.getLogger(__name__) + + +class PaperSuggestion(BaseModel): + submission_id: str = Field(description="The ID of the submission") + explanation: str = Field(description="Reasoning for why this paper is relevant") + + +class RelevantPapersResponse(BaseModel): + suggested_papers: list[PaperSuggestion] = Field( + description="List of suggested papers with their IDs and explanations" + ) + reasoning_step_by_step: str = Field( + description="Step-by-step reasoning for the selection" + ) + + +RELEVANT_PAPERS_SCHEMA = RelevantPapersResponse.model_json_schema() + + +class OpenReviewPaperHelper: + def __init__( + self, + settings: Settings, + venue_id: str | None = "ICLR.cc/2025/Conference", + username: str | None = None, + password: str | None = None, + ) -> None: + self.settings = settings + Path(settings.paper_directory).mkdir(parents=True, exist_ok=True) + if openreview is None: + raise ImportError( + "openreview requires the 'openreview-py' extra. Please run: `pip install paper-qa[openreview]`." + ) + self.client = openreview.api.OpenReviewClient( + baseurl="https://api2.openreview.net", + username=username or os.getenv("OPENREVIEW_USERNAME"), + password=password or os.getenv("OPENREVIEW_PASSWORD"), + ) + self.venue_id = venue_id + self.llm_model = LiteLLMModel( + name=self.settings.llm, config=self.settings.llm_config + ) + + def get_venues(self) -> list[str]: + """Get list of available venues.""" + return self.client.get_group(id="venues").members + + def get_submissions(self) -> list[Any]: + """Get all submissions for the current venue.""" + logger.info(f"Fetching submissions for venue {self.venue_id}") + return self.client.get_all_notes(content={"venueid": self.venue_id}) + + def create_submission_string(self, submissions: list[Any]) -> str: + """Creates a string containing the id, title, and abstract of all submissions.""" + submission_info_string = "" + for submission in submissions: + paper = { + "submission_id": submission.id, + "title": submission.content["title"]["value"], + "abstract": submission.content["abstract"]["value"], + } + submission_info_string += f"{paper}\n" + return submission_info_string + + async def fetch_relevant_papers(self, question: str) -> dict[str, Any]: + """Get relevant papers for a given question using LLM.""" + submissions = self.get_submissions() + submission_string = self.create_submission_string(submissions) + + if len(submission_string) > self.settings.parsing.chunk_size: + chunks = [ + submission_string[i : i + self.settings.parsing.chunk_size] + for i in range( + 0, len(submission_string), self.settings.parsing.chunk_size + ) + ] + else: + chunks = [submission_string] + relevant_papers = [] + for chunk in chunks: + logger.info(f"Fetching relevant papers for question: {question}") + relevant_papers += await self._get_relevant_papers_chunk(question, chunk) + subs = [s for s in submissions if s.id in set(relevant_papers)] + await self.download_papers(subs) + return {sub.id: sub for sub in subs} + + async def _get_relevant_papers_chunk(self, question: str, chunk: str) -> list[Any]: + prompt = ( + chunk + + "You are the helper model that aims to get up to 20 most relevant papers for the user's question. " + + "User's question:\n" + ) + + response = await self.llm_model.call_single( + messages=[Message(role="user", content=prompt + question)], + output_type=RELEVANT_PAPERS_SCHEMA, + ) + + content = json.loads(str(response.text)) + return [p["submission_id"] for p in content["suggested_papers"]] + + async def download_papers(self, submissions: list[Any]) -> None: + """Download PDFs for given submissions.""" + downloaded_papers = Path(self.settings.paper_directory).rglob("*.pdf") + downloaded_ids = [p.stem for p in downloaded_papers] + logger.info("Downloading PDFs for relevant papers.") + for submission in submissions: + if submission.id not in downloaded_ids: + await self._download_pdf(submission) + + async def _download_pdf(self, submission: Any) -> bool: + """Download a single PDF.""" + pdf_link = f"https://openreview.net/{submission.content['pdf']['value']}" + async with httpx.AsyncClient() as client: + response = await client.get(pdf_link) + if response.status_code == httpx.codes.OK.value: + async with await anyio.open_file( + f"{self.settings.paper_directory}/{submission.id}.pdf", "wb" + ) as f: + await f.write(response.content) + return True + logger.warning( + f"Failed to download the PDF. Status code: {response.status_code}, text: {response.text}" + ) + return False + + async def aadd_docs( + self, subs: dict[str, Any] | None = None, docs: Docs | None = None + ) -> Docs: + if docs is None: + docs = Docs() + for doc_path in Path(self.settings.paper_directory).rglob("*.pdf"): + sub = subs.get(doc_path.stem) if subs is not None else None + if sub: + await docs.aadd( + doc_path, + settings=self.settings, + citation=sub.content["_bibtex"]["value"], + title=sub.content["title"]["value"], + doi="None", + authors=sub.content["authors"]["value"], + ) + else: + await docs.aadd(doc_path, settings=self.settings) + return docs diff --git a/pyproject.toml b/pyproject.toml index 01b42cf3f..c73839f60 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -79,6 +79,9 @@ ldp = [ local = [ "sentence-transformers", ] +openreview = [ + "openreview-py", +] qdrant = [ "qdrant-client", ] @@ -164,6 +167,7 @@ warn_unused_ignores = true ignore_missing_imports = true # Per-module configuration options module = [ + "openreview", "pybtex.*", # SEE: https://bitbucket.org/pybtex-devs/pybtex/issues/141/type-annotations "pymupdf", # SEE: https://github.com/pymupdf/PyMuPDF/issues/2883 "pyzotero", # SEE: https://github.com/urschrei/pyzotero/issues/110 diff --git a/uv.lock b/uv.lock index ecfc40a1e..23ba6967d 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,4 @@ version = 1 -revision = 1 requires-python = ">=3.11" resolution-markers = [ "python_full_version >= '3.13' and platform_python_implementation == 'PyPy'", @@ -615,6 +614,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e2/94/758680531a00d06e471ef649e4ec2ed6bf185356a7f9fbfbb7368a40bd49/fsspec-2025.2.0-py3-none-any.whl", hash = "sha256:9de2ad9ce1f85e1931858535bc882543171d197001a0a5eb2ddc04f1781ab95b", size = 184484 }, ] +[[package]] +name = "future" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a7/b2/4140c69c6a66432916b26158687e821ba631a4c9273c474343badf84d3ba/future-1.0.0.tar.gz", hash = "sha256:bd2968309307861edae1458a4f8a4f3598c03be43b97521076aebf5d94c07b05", size = 1228490 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/da/71/ae30dadffc90b9006d77af76b393cb9dfbfc9629f339fc1574a1c52e6806/future-1.0.0-py3-none-any.whl", hash = "sha256:929292d34f5872e70396626ef385ec22355a1fae8ad29e1a734c3e43f9fbc216", size = 491326 }, +] + [[package]] name = "grpcio" version = "1.70.0" @@ -1545,6 +1553,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9a/1a/e62718f311daa26d208800976d7944e5ee6d503e1ea474522b2a15a904bb/openai-1.64.0-py3-none-any.whl", hash = "sha256:20f85cde9e95e9fbb416e3cb5a6d3119c0b28308afd6e3cc47bf100623dac623", size = 472289 }, ] +[[package]] +name = "openreview-py" +version = "1.46.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "future" }, + { name = "pycryptodome" }, + { name = "pyjwt" }, + { name = "pylatexenc" }, + { name = "requests" }, + { name = "tld" }, + { name = "tqdm" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/90/2fe270b4c88e2400fc6330195b318498ae026c93db79f668bafbf56cac2c/openreview_py-1.46.3-py3-none-any.whl", hash = "sha256:03c8ca4ab9417ccd87ecbc1c43283ddc5b6423e2e49cba62d295539d7b464067", size = 717452 }, +] + [[package]] name = "packaging" version = "24.2" @@ -1649,6 +1675,9 @@ ldp = [ local = [ { name = "sentence-transformers" }, ] +openreview = [ + { name = "openreview-py" }, +] qdrant = [ { name = "qdrant-client" }, ] @@ -1677,6 +1706,7 @@ requires-dist = [ { name = "ldp", marker = "extra == 'ldp'", specifier = ">=0.25.0" }, { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.8" }, { name = "numpy" }, + { name = "openreview-py", marker = "extra == 'openreview'" }, { name = "paper-qa", extras = ["ldp", "local", "qdrant", "typing", "zotero"], marker = "extra == 'dev'" }, { name = "pre-commit", marker = "extra == 'dev'", specifier = ">=3.4" }, { name = "pybtex" }, @@ -1708,7 +1738,6 @@ requires-dist = [ { name = "types-setuptools", marker = "extra == 'typing'" }, { name = "vcrpy", marker = "extra == 'dev'", specifier = ">=6" }, ] -provides-extras = ["dev", "ldp", "local", "qdrant", "typing", "zotero"] [package.metadata.requires-dev] dev = [{ name = "paper-qa", extras = ["dev"] }] @@ -1960,6 +1989,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ad/5f/40d8e90f985a05133a8895fc454c6127ecec3de8b095dd35bba91382f803/pybtex-0.24.0-py2.py3-none-any.whl", hash = "sha256:e1e0c8c69998452fea90e9179aa2a98ab103f3eed894405b7264e517cc2fcc0f", size = 561354 }, ] +[[package]] +name = "pycryptodome" +version = "3.21.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/52/13b9db4a913eee948152a079fe58d035bd3d1a519584155da8e786f767e6/pycryptodome-3.21.0.tar.gz", hash = "sha256:f7787e0d469bdae763b876174cf2e6c0f7be79808af26b1da96f1a64bcf47297", size = 4818071 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/88/5e83de10450027c96c79dc65ac45e9d0d7a7fef334f39d3789a191f33602/pycryptodome-3.21.0-cp36-abi3-macosx_10_9_universal2.whl", hash = "sha256:2480ec2c72438430da9f601ebc12c518c093c13111a5c1644c82cdfc2e50b1e4", size = 2495937 }, + { url = "https://files.pythonhosted.org/packages/66/e1/8f28cd8cf7f7563319819d1e172879ccce2333781ae38da61c28fe22d6ff/pycryptodome-3.21.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:de18954104667f565e2fbb4783b56667f30fb49c4d79b346f52a29cb198d5b6b", size = 1634629 }, + { url = "https://files.pythonhosted.org/packages/6a/c1/f75a1aaff0c20c11df8dc8e2bf8057e7f73296af7dfd8cbb40077d1c930d/pycryptodome-3.21.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2de4b7263a33947ff440412339cb72b28a5a4c769b5c1ca19e33dd6cd1dcec6e", size = 2168708 }, + { url = "https://files.pythonhosted.org/packages/ea/66/6f2b7ddb457b19f73b82053ecc83ba768680609d56dd457dbc7e902c41aa/pycryptodome-3.21.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0714206d467fc911042d01ea3a1847c847bc10884cf674c82e12915cfe1649f8", size = 2254555 }, + { url = "https://files.pythonhosted.org/packages/2c/2b/152c330732a887a86cbf591ed69bd1b489439b5464806adb270f169ec139/pycryptodome-3.21.0-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d85c1b613121ed3dbaa5a97369b3b757909531a959d229406a75b912dd51dd1", size = 2294143 }, + { url = "https://files.pythonhosted.org/packages/55/92/517c5c498c2980c1b6d6b9965dffbe31f3cd7f20f40d00ec4069559c5902/pycryptodome-3.21.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:8898a66425a57bcf15e25fc19c12490b87bd939800f39a03ea2de2aea5e3611a", size = 2160509 }, + { url = "https://files.pythonhosted.org/packages/39/1f/c74288f54d80a20a78da87df1818c6464ac1041d10988bb7d982c4153fbc/pycryptodome-3.21.0-cp36-abi3-musllinux_1_2_i686.whl", hash = "sha256:932c905b71a56474bff8a9c014030bc3c882cee696b448af920399f730a650c2", size = 2329480 }, + { url = "https://files.pythonhosted.org/packages/39/1b/d0b013bf7d1af7cf0a6a4fce13f5fe5813ab225313755367b36e714a63f8/pycryptodome-3.21.0-cp36-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:18caa8cfbc676eaaf28613637a89980ad2fd96e00c564135bf90bc3f0b34dd93", size = 2254397 }, + { url = "https://files.pythonhosted.org/packages/14/71/4cbd3870d3e926c34706f705d6793159ac49d9a213e3ababcdade5864663/pycryptodome-3.21.0-cp36-abi3-win32.whl", hash = "sha256:280b67d20e33bb63171d55b1067f61fbd932e0b1ad976b3a184303a3dad22764", size = 1775641 }, + { url = "https://files.pythonhosted.org/packages/43/1d/81d59d228381576b92ecede5cd7239762c14001a828bdba30d64896e9778/pycryptodome-3.21.0-cp36-abi3-win_amd64.whl", hash = "sha256:b7aa25fc0baa5b1d95b7633af4f5f1838467f1815442b22487426f94e0d66c53", size = 1812863 }, +] + [[package]] name = "pydantic" version = "2.10.1" @@ -2049,6 +2096,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 }, ] +[[package]] +name = "pyjwt" +version = "2.10.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997 }, +] + +[[package]] +name = "pylatexenc" +version = "2.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5d/ab/34ec41718af73c00119d0351b7a2531d2ebddb51833a36448fc7b862be60/pylatexenc-2.10.tar.gz", hash = "sha256:3dd8fd84eb46dc30bee1e23eaab8d8fb5a7f507347b23e5f38ad9675c84f40d3", size = 162597 } + [[package]] name = "pylint" version = "3.3.4" @@ -2831,6 +2893,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/de/a8/8f499c179ec900783ffe133e9aab10044481679bb9aad78436d239eee716/tiktoken-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:5ea0edb6f83dc56d794723286215918c1cde03712cbbafa0348b33448faf5b95", size = 894669 }, ] +[[package]] +name = "tld" +version = "0.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/19/2b/678082222bc1d2823ea8384c6806085b85226ff73885c703fe0c7143ef64/tld-0.13.tar.gz", hash = "sha256:93dde5e1c04bdf1844976eae440706379d21f4ab235b73c05d7483e074fb5629", size = 446824 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ea/75/779ddeaf4d847ba0021ad99d1b615a853f2a5762bd5d118273c7f7673c38/tld-0.13-py2.py3-none-any.whl", hash = "sha256:f75b2be080f767ed17c2338a339eaa4fab5792586319ca819119da252f9f3749", size = 263789 }, +] + [[package]] name = "tokenizers" version = "0.21.0"