aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Reinier van der Leer <pwuts@agpt.co> 2024-01-02 22:23:09 +0100
committerGravatar GitHub <noreply@github.com> 2024-01-02 22:23:09 +0100
commit25cc6ad6ae651c3b925ab9447421337c2f4b68e1 (patch)
tree493a22d9302f7b0fae461f5a51dc194027841788
parent[Documentation Update] Updating Using and Creating Abilities to use Action An... (diff)
downloadAuto-GPT-25cc6ad6ae651c3b925ab9447421337c2f4b68e1.tar.gz
Auto-GPT-25cc6ad6ae651c3b925ab9447421337c2f4b68e1.tar.bz2
Auto-GPT-25cc6ad6ae651c3b925ab9447421337c2f4b68e1.zip
AGBenchmark codebase clean-up (#6650)
* refactor(benchmark): Deduplicate configuration loading logic - Move the configuration loading logic to a separate `load_agbenchmark_config` function in `agbenchmark/config.py` module. - Replace the duplicate loading logic in `conftest.py`, `generate_test.py`, `ReportManager.py`, `reports.py`, and `__main__.py` with calls to `load_agbenchmark_config` function. * fix(benchmark): Fix type errors, linting errors, and clean up CLI validation in __main__.py - Fixed type errors and linting errors in `__main__.py` - Improved the readability of CLI argument validation by introducing a separate function for it * refactor(benchmark): Lint and typefix app.py - Rearranged and cleaned up import statements - Fixed type errors caused by improper use of `psutil` objects - Simplified a number of `os.path` usages by converting to `pathlib` - Use `Task` and `TaskRequestBody` classes from `agent_protocol_client` instead of `.schema` * refactor(benchmark): Replace `.agent_protocol_client` by `agent-protcol-client`, clean up schema.py - Remove `agbenchmark.agent_protocol_client` (an offline copy of `agent-protocol-client`). - Add `agent-protocol-client` as a dependency and change imports to `agent_protocol_client`. - Fix type annotation on `agent_api_interface.py::upload_artifacts` (`ApiClient` -> `AgentApi`). - Remove all unused types from schema.py (= most of them). * refactor(benchmark): Use pathlib in agent_interface.py and agent_api_interface.py * refactor(benchmark): Improve typing, response validation, and readability in app.py - Simplified response generation by leveraging type checking and conversion by FastAPI. - Introduced use of `HTTPException` for error responses. - Improved naming, formatting, and typing in `app.py::create_evaluation`. - Updated the docstring on `app.py::create_agent_task`. - Fixed return type annotations of `create_single_test` and `create_challenge` in generate_test.py. - Added default values to optional attributes on models in report_types_v2.py. - Removed unused imports in `generate_test.py` * refactor(benchmark): Clean up logging and print statements - Introduced use of the `logging` library for unified logging and better readability. - Converted most print statements to use `logger.debug`, `logger.warning`, and `logger.error`. - Improved descriptiveness of log statements. - Removed unnecessary print statements. - Added log statements to unspecific and non-verbose `except` blocks. - Added `--debug` flag, which sets the log level to `DEBUG` and enables a more comprehensive log format. - Added `.utils.logging` module with `configure_logging` function to easily configure the logging library. - Converted raw escape sequences in `.utils.challenge` to use `colorama`. - Renamed `generate_test.py::generate_tests` to `load_challenges`. * refactor(benchmark): Remove unused server.py and agent_interface.py::run_agent - Remove unused server.py file - Remove unused run_agent function from agent_interface.py * refactor(benchmark): Clean up conftest.py - Fix and add type annotations - Rewrite docstrings - Disable or remove unused code - Fix definition of arguments and their types in `pytest_addoption` * refactor(benchmark): Clean up generate_test.py file - Refactored the `create_single_test` function for clarity and readability - Removed unused variables - Made creation of `Challenge` subclasses more straightforward - Made bare `except` more specific - Renamed `Challenge.setup_challenge` method to `run_challenge` - Updated type hints and annotations - Made minor code/readability improvements in `load_challenges` - Added a helper function `_add_challenge_to_module` for attaching a Challenge class to the current module * fix(benchmark): Fix and add type annotations in execute_sub_process.py * refactor(benchmark): Simplify const determination in agent_interface.py - Simplify the logic that determines the value of `HELICONE_GRAPHQL_LOGS` * fix(benchmark): Register category markers to prevent warnings - Use the `pytest_configure` hook to register the known challenge categories as markers. Otherwise, Pytest will raise "unknown marker" warnings at runtime. * refactor(benchmark/challenges): Fix indentation in 4_revenue_retrieval_2/data.json * refactor(benchmark): Update agent_api_interface.py - Add type annotations to `copy_agent_artifacts_into_temp_folder` function - Add note about broken endpoint in the `agent_protocol_client` library - Remove unused variable in `run_api_agent` function - Improve readability and resolve linting error * feat(benchmark): Improve and centralize pathfinding - Search path hierarchy for applicable `agbenchmark_config`, rather than assuming it's in the current folder. - Create `agbenchmark.utils.path_manager` with `AGBenchmarkPathManager` and exporting a `PATH_MANAGER` const. - Replace path constants defined in __main__.py with usages of `PATH_MANAGER`. * feat(benchmark/cli): Clean up and improve CLI - Updated commands, options, and their descriptions to be more intuitive and consistent - Moved slow imports into the entrypoints that use them to speed up application startup - Fixed type hints to match output types of Click options - Hid deprecated `agbenchmark start` command - Refactored code to improve readability and maintainability - Moved main entrypoint into `run` subcommand - Fixed `version` and `serve` subcommands - Added `click-default-group` package to allow using `run` implicitly (for backwards compatibility) - Renamed `--no_dep` to `--no-dep` for consistency - Fixed string formatting issues in log statements * refactor(benchmark/config): Move AgentBenchmarkConfig and related functions to config.py - Move the `AgentBenchmarkConfig` class from `utils/data_types.py` to `config.py`. - Extract the `calculate_info_test_path` function from `utils/data_types.py` and move it to `config.py` as a private helper function `_calculate_info_test_path`. - Move `load_agent_benchmark_config()` to `AgentBenchmarkConfig.load()`. - Changed simple getter methods on `AgentBenchmarkConfig` to calculated properties. - Update all code references according to the changes mentioned above. * refactor(benchmark): Fix ReportManager init parameter types and use pathlib - Fix the type annotation of the `benchmark_start_time` parameter in `ReportManager.__init__`, was mistyped as `str` instead of `datetime`. - Change the type of the `filename` parameter in the `ReportManager.__init__` method from `str` to `Path`. - Rename `self.filename` with `self.report_file` in `ReportManager`. - Change the way the report file is created, opened and saved to use the `Path` object. * refactor(benchmark): Improve typing surrounding ChallengeData and clean up its implementation - Use `ChallengeData` objects instead of untyped `dict` in app.py, generate_test.py, reports.py. - Remove unnecessary methods `serialize`, `get_data`, `get_json_from_path`, `deserialize` from `ChallengeData` class. - Remove unused methods `challenge_from_datum` and `challenge_from_test_data` from `ChallengeData class. - Update function signatures and annotations of `create_challenge` and `generate_single_test` functions in generate_test.py. - Add types to function signatures of `generate_single_call_report` and `finalize_reports` in reports.py. - Remove unnecessary `challenge_data` parameter (in generate_test.py) and fixture (in conftest.py). * refactor(benchmark): Clean up generate_test.py, conftest.py and __main__.py - Cleaned up generate_test.py and conftest.py - Consolidated challenge creation logic in the `Challenge` class itself, most notably the new `Challenge.from_challenge_spec` method. - Moved challenge selection logic from generate_test.py to the `pytest_collection_modifyitems` hook in conftest.py. - Converted methods in the `Challenge` class to class methods where appropriate. - Improved argument handling in the `run_benchmark` function in `__main__.py`. * refactor(benchmark/config): Merge AGBenchmarkPathManager into AgentBenchmarkConfig and reduce fragmented/global state - Merge the functionality of `AGBenchmarkPathManager` into `AgentBenchmarkConfig` to consolidate the configuration management. - Remove the `.path_manager` module containing `AGBenchmarkPathManager`. - Pass the `AgentBenchmarkConfig` and its attributes through function arguments to reduce global state and improve code clarity. * feat(benchmark/serve): Configurable port for `serve` subcommand - Added `--port` option to `serve` subcommand to allow for specifying the port to run the API on. - If no `--port` option is provided, the port will default to the value specified in the `PORT` environment variable, or 8080 if not set. * feat(benchmark/cli): Add `config` subcommand - Added a new subcommand `config` to the AGBenchmark CLI, to display information about the present AGBenchmark config. * fix(benchmark): Gracefully handle incompatible challenge spec files in app.py - Added a check to skip deprecated challenges - Added logging to allow debugging of the loading process - Added handling of validation errors when parsing challenge spec files - Added missing `spec_file` attribute to `ChallengeData` * refactor(benchmark): Move `run_benchmark` entrypoint to main.py, use it in `/reports` endpoint - Move `run_benchmark` and `validate_args` from __main__.py to main.py - Replace agbenchmark subprocess in `app.py:run_single_test` with `run_benchmark` - Move `get_unique_categories` from __main__.py to challenges/__init__.py - Move `OPTIONAL_CATEGORIES` from __main__.py to challenge.py - Reduce operations on updates.json (including `initialize_updates_file`) outside of API * refactor(benchmark): Remove unused `/updates` endpoint and all related code - Remove `updates_json_file` attribute from `AgentBenchmarkConfig` - Remove `get_updates` and `_initialize_updates_file` in app.py - Remove `append_updates_file` and `create_update_json` functions in agent_api_interface.py - Remove call to `append_updates_file` in challenge.py * refactor(benchmark/config): Clean up and update docstrings on `AgentBenchmarkConfig` - Add and update docstrings - Change base class from `BaseModel` to `BaseSettings`, allow extras for backwards compatibility - Make naming of path attributes on `AgentBenchmarkConfig` more consistent - Remove unused `agent_home_directory` attribute - Remove unused `workspace` attribute * fix(benchmark): Restore mechanism to select (optional) categories in agent benchmark config * fix(benchmark): Update agent-protocol-client to v1.1.0 - Fixes issue with fetching task artifact listings
-rw-r--r--.github/workflows/hackathon.yml2
-rw-r--r--benchmark/agbenchmark/__main__.py341
-rw-r--r--benchmark/agbenchmark/agent_api_interface.py77
-rw-r--r--benchmark/agbenchmark/agent_interface.py36
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/__init__.py42
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/api/__init__.py4
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/api/agent_api.py1647
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/api_client.py838
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/api_response.py28
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/configuration.py447
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/docs/AgentApi.md615
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/exceptions.py154
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/models/__init__.py25
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/models/artifact.py72
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/models/artifacts.py77
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/models/pagination.py75
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/models/step.py146
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/models/step_all_of.py133
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/models/step_request_body.py77
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/models/step_result.py89
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/models/task.py99
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/models/task_all_of.py87
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/models/task_request_body.py77
-rw-r--r--benchmark/agbenchmark/agent_protocol_client/rest.py311
-rw-r--r--benchmark/agbenchmark/app.py569
-rw-r--r--benchmark/agbenchmark/challenges/__init__.py32
-rw-r--r--benchmark/agbenchmark/challenges/verticals/scrape/4_revenue_retrieval_2/data.json30
-rw-r--r--benchmark/agbenchmark/config.py119
-rw-r--r--benchmark/agbenchmark/conftest.py434
-rw-r--r--benchmark/agbenchmark/execute_sub_process.py79
-rw-r--r--benchmark/agbenchmark/generate_test.py218
-rw-r--r--benchmark/agbenchmark/main.py153
-rw-r--r--benchmark/agbenchmark/reports/ReportManager.py36
-rw-r--r--benchmark/agbenchmark/reports/agent_benchmark_config.py18
-rw-r--r--benchmark/agbenchmark/reports/processing/process_report.py5
-rw-r--r--benchmark/agbenchmark/reports/processing/report_types_v2.py31
-rw-r--r--benchmark/agbenchmark/reports/reports.py72
-rw-r--r--benchmark/agbenchmark/schema.py172
-rw-r--r--benchmark/agbenchmark/utils/challenge.py170
-rw-r--r--benchmark/agbenchmark/utils/data_types.py152
-rw-r--r--benchmark/agbenchmark/utils/dependencies/graphs.py17
-rw-r--r--benchmark/agbenchmark/utils/get_data_from_helicone.py35
-rw-r--r--benchmark/agbenchmark/utils/logging.py74
-rw-r--r--benchmark/agbenchmark/utils/utils.py43
-rw-r--r--benchmark/poetry.lock1789
-rw-r--r--benchmark/pyproject.toml2
-rw-r--r--benchmark/server.py121
47 files changed, 2120 insertions, 7750 deletions
diff --git a/.github/workflows/hackathon.yml b/.github/workflows/hackathon.yml
index a8b592eb9..3becee799 100644
--- a/.github/workflows/hackathon.yml
+++ b/.github/workflows/hackathon.yml
@@ -121,7 +121,7 @@ jobs:
./run agent start $AGENT_NAME
cd ../benchmark
poetry install
- poetry run agbenchmark --no_dep
+ poetry run agbenchmark --no-dep
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
SERP_API_KEY: ${{ secrets.SERP_API_KEY }}
diff --git a/benchmark/agbenchmark/__main__.py b/benchmark/agbenchmark/__main__.py
index 76ca7529a..a111dd9ff 100644
--- a/benchmark/agbenchmark/__main__.py
+++ b/benchmark/agbenchmark/__main__.py
@@ -1,5 +1,4 @@
-import glob
-import json
+import logging
import os
import sys
from datetime import datetime, timezone
@@ -7,205 +6,97 @@ from pathlib import Path
from typing import Any, Optional
import click
-import pytest
-import toml
+from click_default_group import DefaultGroup
from dotenv import load_dotenv
-from helicone.lock import HeliconeLockManager
-from agbenchmark.app import app
-from agbenchmark.reports.ReportManager import SingletonReportManager
-from agbenchmark.utils.data_types import AgentBenchmarkConfig
+from agbenchmark.config import AgentBenchmarkConfig
+from agbenchmark.utils.logging import configure_logging
load_dotenv()
+try:
+ if os.getenv("HELICONE_API_KEY"):
+ import helicone # noqa
+
+ helicone_enabled = True
+ else:
+ helicone_enabled = False
+except ImportError:
+ helicone_enabled = False
+
+
+class InvalidInvocationError(ValueError):
+ pass
+
+
+logger = logging.getLogger(__name__)
+
BENCHMARK_START_TIME_DT = datetime.now(timezone.utc)
BENCHMARK_START_TIME = BENCHMARK_START_TIME_DT.strftime("%Y-%m-%dT%H:%M:%S+00:00")
-TEMP_FOLDER_ABS_PATH = Path.cwd() / "agbenchmark_config" / "temp_folder"
-CHALLENGES_ALREADY_BEATEN = (
- Path.cwd() / "agbenchmark_config" / "challenges_already_beaten.json"
-)
-UPDATES_JSON_PATH = Path.cwd() / "agbenchmark_config" / "updates.json"
-if os.environ.get("HELICONE_API_KEY"):
+if helicone_enabled:
+ from helicone.lock import HeliconeLockManager
+
HeliconeLockManager.write_custom_property(
"benchmark_start_time", BENCHMARK_START_TIME
)
-with open(
- Path(__file__).resolve().parent / "challenges" / "optional_categories.json"
-) as f:
- OPTIONAL_CATEGORIES = json.load(f)["optional_categories"]
-
-
-def get_unique_categories() -> set[str]:
- """Find all data.json files in the directory relative to this file and its subdirectories,
- read the "category" field from each file, and return a set of unique categories."""
- categories = set()
-
- # Get the directory of this file
- this_dir = os.path.dirname(os.path.abspath(__file__))
-
- glob_path = os.path.join(this_dir, "./challenges/**/data.json")
- # Use it as the base for the glob pattern
- for data_file in glob.glob(glob_path, recursive=True):
- with open(data_file, "r") as f:
- try:
- data = json.load(f)
- categories.update(data.get("category", []))
- except json.JSONDecodeError:
- print(f"Error: {data_file} is not a valid JSON file.")
- continue
- except IOError:
- print(f"IOError: file could not be read: {data_file}")
- continue
-
- return categories
-
-
-def run_benchmark(
- maintain: bool = False,
- improve: bool = False,
- explore: bool = False,
- mock: bool = False,
- no_dep: bool = False,
- nc: bool = False,
- keep_answers: bool = False,
- category: Optional[tuple[str]] = None,
- skip_category: Optional[tuple[str]] = None,
- test: Optional[str] = None,
- cutoff: Optional[int] = None,
- server: bool = False,
-) -> int:
- """Start the benchmark tests. If a category flag is provided, run the categories with that mark."""
- # Check if configuration file exists and is not empty
-
- initialize_updates_file()
- SingletonReportManager()
- agent_benchmark_config_path = str(Path.cwd() / "agbenchmark_config" / "config.json")
- try:
- with open(agent_benchmark_config_path, "r") as f:
- agent_benchmark_config = AgentBenchmarkConfig(**json.load(f))
- agent_benchmark_config.agent_benchmark_config_path = (
- agent_benchmark_config_path
- )
- except json.JSONDecodeError:
- print("Error: benchmark_config.json is not a valid JSON file.")
- return 1
- if maintain and improve and explore:
- print(
- "Error: You can't use --maintain, --improve or --explore at the same time. Please choose one."
- )
- return 1
-
- if test and (category or skip_category or maintain or improve or explore):
- print(
- "Error: If you're running a specific test make sure no other options are selected. Please just pass the --test."
- )
- return 1
-
- assert agent_benchmark_config.host, "Error: host needs to be added to the config."
+@click.group(cls=DefaultGroup, default_if_no_args=True)
+@click.option("--debug", is_flag=True, help="Enable debug output")
+def cli(
+ debug: bool,
+) -> Any:
+ configure_logging(logging.DEBUG if debug else logging.INFO)
- print("Current configuration:")
- for key, value in vars(agent_benchmark_config).items():
- print(f"{key}: {value}")
- pytest_args = ["-vs"]
- if keep_answers:
- pytest_args.append("--keep-answers")
+@cli.command(hidden=True)
+def start():
+ raise DeprecationWarning(
+ "`agbenchmark start` is deprecated. Use `agbenchmark run` instead."
+ )
- if test:
- print("Running specific test:", test)
- else:
- # Categories that are used in the challenges
- categories = get_unique_categories()
- if category:
- invalid_categories = set(category) - categories
- assert (
- not invalid_categories
- ), f"Invalid categories: {invalid_categories}. Valid categories are: {categories}"
-
- if category:
- categories_to_run = set(category)
- if skip_category:
- categories_to_run = categories_to_run.difference(set(skip_category))
- assert categories_to_run, "Error: You can't skip all categories"
- pytest_args.extend(["-m", " or ".join(categories_to_run), "--category"])
- print("Running tests of category:", categories_to_run)
- elif skip_category:
- categories_to_run = categories - set(skip_category)
- assert categories_to_run, "Error: You can't skip all categories"
- pytest_args.extend(["-m", " or ".join(categories_to_run), "--category"])
- print("Running tests of category:", categories_to_run)
- else:
- print("Running all categories")
-
- if maintain:
- print("Running only regression tests")
- pytest_args.append("--maintain")
- elif improve:
- print("Running only non-regression tests")
- pytest_args.append("--improve")
- elif explore:
- print("Only attempt challenges that have never been beaten")
- pytest_args.append("--explore")
-
- if mock:
- pytest_args.append("--mock")
- os.environ[
- "IS_MOCK"
- ] = "True" # ugly hack to make the mock work when calling from API
-
- if no_dep:
- pytest_args.append("--no_dep")
-
- if nc and cutoff:
- print(
- "Error: You can't use both --nc and --cutoff at the same time. Please choose one."
- )
- return 1
- if nc:
- pytest_args.append("--nc")
- if cutoff:
- pytest_args.append("--cutoff")
- print(f"Setting cuttoff override to {cutoff} seconds.")
- current_dir = Path(__file__).resolve().parent
- print(f"Current directory: {current_dir}")
- pytest_args.extend((str(current_dir), "--cache-clear"))
- exit_code = pytest.main(pytest_args)
- SingletonReportManager().clear_instance()
-
-
-@click.group(invoke_without_command=True)
-@click.option("--backend", is_flag=True, help="If it's being run from the cli")
-@click.option("-c", "--category", multiple=True, help="Specific category to run")
+@cli.command(default=True)
+@click.option(
+ "-c",
+ "--category",
+ multiple=True,
+ help="(+) Select a category to run.",
+)
@click.option(
"-s",
"--skip-category",
multiple=True,
- help="Skips preventing the tests from this category from running",
+ help="(+) Exclude a category from running.",
)
-@click.option("--test", multiple=True, help="Specific test to run")
-@click.option("--maintain", is_flag=True, help="Runs only regression tests")
-@click.option("--improve", is_flag=True, help="Run only non-regression tests")
+@click.option("--test", multiple=True, help="(+) Select a test to run.")
+@click.option("--maintain", is_flag=True, help="Run only regression tests.")
+@click.option("--improve", is_flag=True, help="Run only non-regression tests.")
@click.option(
"--explore",
is_flag=True,
- help="Only attempt challenges that have never been beaten",
+ help="Run only challenges that have never been beaten.",
)
-@click.option("--mock", is_flag=True, help="Run with mock")
@click.option(
- "--no_dep",
+ "--no-dep",
is_flag=True,
- help="Run without dependencies",
+ help="Run all (selected) challenges, regardless of dependency success/failure.",
)
-@click.option("--nc", is_flag=True, help="Run without cutoff")
+@click.option("--cutoff", type=int, help="Override the challenge time limit (seconds).")
+@click.option("--nc", is_flag=True, help="Disable the challenge time limit.")
+@click.option("--mock", is_flag=True, help="Run with mock")
@click.option("--keep-answers", is_flag=True, help="Keep answers")
-@click.option("--cutoff", help="Set or override tests cutoff (seconds)")
-@click.argument("value", type=str, required=False)
-def cli(
+@click.option(
+ "--backend",
+ is_flag=True,
+ help="Write log output to a file instead of the terminal.",
+)
+# @click.argument(
+# "agent_path", type=click.Path(exists=True, file_okay=False), required=False
+# )
+def run(
maintain: bool,
improve: bool,
explore: bool,
@@ -213,18 +104,37 @@ def cli(
no_dep: bool,
nc: bool,
keep_answers: bool,
- category: Optional[list[str]] = None,
- skip_category: Optional[list[str]] = None,
- test: Optional[str] = None,
+ test: tuple[str],
+ category: tuple[str],
+ skip_category: tuple[str],
cutoff: Optional[int] = None,
backend: Optional[bool] = False,
- value: Optional[str] = None,
-) -> Any:
- # Redirect stdout if backend is True
- if value == "start":
- raise ("`agbenchmark start` is removed. Run `agbenchmark` instead.")
- if value == "serve":
- return serve()
+ # agent_path: Optional[Path] = None,
+) -> None:
+ """
+ Run the benchmark on the agent in the current directory.
+
+ Options marked with (+) can be specified multiple times, to select multiple items.
+ """
+ from agbenchmark.main import run_benchmark, validate_args
+
+ agbenchmark_config = AgentBenchmarkConfig.load()
+ logger.debug(f"agbenchmark_config: {agbenchmark_config.agbenchmark_config_dir}")
+ try:
+ validate_args(
+ maintain=maintain,
+ improve=improve,
+ explore=explore,
+ tests=test,
+ categories=category,
+ skip_categories=skip_category,
+ no_cutoff=nc,
+ cutoff=cutoff,
+ )
+ except InvalidInvocationError as e:
+ logger.error("Error: " + "\n".join(e.args))
+ sys.exit(1)
+
original_stdout = sys.stdout # Save the original standard output
exit_code = None
@@ -232,16 +142,17 @@ def cli(
with open("backend/backend_stdout.txt", "w") as f:
sys.stdout = f
exit_code = run_benchmark(
+ config=agbenchmark_config,
maintain=maintain,
improve=improve,
explore=explore,
mock=mock,
no_dep=no_dep,
- nc=nc,
+ no_cutoff=nc,
keep_answers=keep_answers,
- category=category,
- skip_category=skip_category,
- test=test,
+ tests=test,
+ categories=category,
+ skip_categories=skip_category,
cutoff=cutoff,
)
@@ -249,16 +160,17 @@ def cli(
else:
exit_code = run_benchmark(
+ config=agbenchmark_config,
maintain=maintain,
improve=improve,
explore=explore,
mock=mock,
no_dep=no_dep,
- nc=nc,
+ no_cutoff=nc,
keep_answers=keep_answers,
- category=category,
- skip_category=skip_category,
- test=test,
+ tests=test,
+ categories=category,
+ skip_categories=skip_category,
cutoff=cutoff,
)
@@ -266,33 +178,44 @@ def cli(
@cli.command()
-def version():
- """Print the version of the benchmark tool."""
- current_directory = Path(__file__).resolve().parent
- version = toml.load(current_directory / ".." / "pyproject.toml")["tool"]["poetry"][
- "version"
- ]
- print(f"Benchmark Tool Version {version}")
+@click.option("--port", type=int, help="Port to run the API on.")
+def serve(port: Optional[int] = None):
+ """Serve the benchmark frontend and API on port 8080."""
+ import uvicorn
+ from agbenchmark.app import setup_fastapi_app
-def serve():
- import uvicorn
+ config = AgentBenchmarkConfig.load()
+ app = setup_fastapi_app(config)
# Run the FastAPI application using uvicorn
- uvicorn.run(app, host="0.0.0.0", port=8080)
+ port = port or int(os.getenv("PORT", 8080))
+ uvicorn.run(app, host="0.0.0.0", port=port)
-def initialize_updates_file():
- if os.path.exists(UPDATES_JSON_PATH):
- # If the file already exists, overwrite it with an empty list
- with open(UPDATES_JSON_PATH, "w") as file:
- json.dump([], file, indent=2)
- print("Initialized updates.json by overwriting with an empty array")
- else:
- # If the file doesn't exist, create it and write an empty list
- with open(UPDATES_JSON_PATH, "w") as file:
- json.dump([], file, indent=2)
- print("Created updates.json and initialized it with an empty array")
+@cli.command()
+def config():
+ """Displays info regarding the present AGBenchmark config."""
+ try:
+ config = AgentBenchmarkConfig.load()
+ except FileNotFoundError as e:
+ click.echo(e, err=True)
+ return 1
+
+ k_col_width = max(len(k) for k in config.dict().keys())
+ for k, v in config.dict().items():
+ click.echo(f"{k: <{k_col_width}} = {v}")
+
+
+@cli.command()
+def version():
+ """Print version info for the AGBenchmark application."""
+ import toml
+
+ package_root = Path(__file__).resolve().parent.parent
+ pyproject = toml.load(package_root / "pyproject.toml")
+ version = pyproject["tool"]["poetry"]["version"]
+ click.echo(f"AGBenchmark version {version}")
if __name__ == "__main__":
diff --git a/benchmark/agbenchmark/agent_api_interface.py b/benchmark/agbenchmark/agent_api_interface.py
index 18ff4520e..572471811 100644
--- a/benchmark/agbenchmark/agent_api_interface.py
+++ b/benchmark/agbenchmark/agent_api_interface.py
@@ -1,30 +1,25 @@
-import json
import logging
import os
-import pathlib
import time
-from typing import Any, Dict, Optional
+from pathlib import Path
+from typing import Optional
+
+from agent_protocol_client import AgentApi, ApiClient, Configuration, TaskRequestBody
-from agbenchmark.__main__ import TEMP_FOLDER_ABS_PATH, UPDATES_JSON_PATH
from agbenchmark.agent_interface import get_list_of_file_paths
-from agbenchmark.agent_protocol_client import (
- AgentApi,
- ApiClient,
- Configuration,
- TaskRequestBody,
-)
-from agbenchmark.agent_protocol_client.models.step import Step
+from agbenchmark.config import AgentBenchmarkConfig
from agbenchmark.utils.data_types import ChallengeData
LOG = logging.getLogger(__name__)
async def run_api_agent(
- task: ChallengeData, config: Dict[str, Any], artifacts_location: str, timeout: int
+ task: ChallengeData,
+ config: AgentBenchmarkConfig,
+ artifacts_location: str,
+ timeout: int,
) -> None:
- host_value = None
-
- configuration = Configuration(host=config["AgentBenchmarkConfig"].host + "/ap/v1")
+ configuration = Configuration(host=config.host)
async with ApiClient(configuration) as api_client:
api_instance = AgentApi(api_client)
task_request_body = TaskRequestBody(input=task.task)
@@ -45,7 +40,6 @@ async def run_api_agent(
# Read the existing JSON data from the file
step = await api_instance.execute_agent_task_step(task_id=task_id)
- await append_updates_file(step)
print(f"[{task.name}] - step {step.name} ({i}. request)")
i += 1
@@ -54,34 +48,38 @@ async def run_api_agent(
raise TimeoutError("Time limit exceeded")
if not step or step.is_last:
steps_remaining = False
- # if we're calling a mock agent, we "cheat" and give the correct artifacts to pass the tests
+
+ # In "mock" mode, we cheat by giving the correct artifacts to pass the challenge
if os.getenv("IS_MOCK"):
await upload_artifacts(
api_instance, artifacts_location, task_id, "artifacts_out"
)
- await copy_agent_artifacts_into_temp_folder(api_instance, task_id)
+ await copy_agent_artifacts_into_folder(
+ api_instance, task_id, config.temp_folder
+ )
-async def copy_agent_artifacts_into_temp_folder(api_instance, task_id):
+async def copy_agent_artifacts_into_folder(
+ api_instance: AgentApi, task_id: str, folder: Path
+):
artifacts = await api_instance.list_agent_task_artifacts(task_id=task_id)
+
for artifact in artifacts.artifacts:
# current absolute path of the directory of the file
- directory_location = pathlib.Path(TEMP_FOLDER_ABS_PATH)
if artifact.relative_path:
- path = (
+ path: str = (
artifact.relative_path
if not artifact.relative_path.startswith("/")
else artifact.relative_path[1:]
)
- directory_location = pathlib.Path(
- os.path.dirname(directory_location / path)
- )
- LOG.info(f"Creating directory {directory_location}")
+ folder = (folder / path).parent
- directory_location.mkdir(parents=True, exist_ok=True)
+ if not folder.exists():
+ LOG.info(f"Creating directory {folder}")
+ folder.mkdir(parents=True)
- file_path = directory_location / artifact.file_name
+ file_path = folder / artifact.file_name
LOG.info(f"Writing file {file_path}")
with open(file_path, "wb") as f:
content = await api_instance.download_agent_task_artifact(
@@ -91,35 +89,16 @@ async def copy_agent_artifacts_into_temp_folder(api_instance, task_id):
f.write(content)
-async def append_updates_file(step: Step):
- with open(UPDATES_JSON_PATH, "r") as file:
- existing_data = json.load(file)
- # Append the new update to the existing array
- new_update = create_update_json(step)
-
- existing_data.append(new_update)
- # Write the updated array back to the file
- with open(UPDATES_JSON_PATH, "w") as file:
- file.write(json.dumps(existing_data, indent=2))
-
-
async def upload_artifacts(
- api_instance: ApiClient, artifacts_location: str, task_id: str, type: str
+ api_instance: AgentApi, artifacts_location: str, task_id: str, type: str
) -> None:
for file_path in get_list_of_file_paths(artifacts_location, type):
relative_path: Optional[str] = "/".join(
- file_path.split(f"{type}/", 1)[-1].split("/")[:-1]
+ str(file_path).split(f"{type}/", 1)[-1].split("/")[:-1]
)
if not relative_path:
relative_path = None
await api_instance.upload_agent_task_artifacts(
- task_id=task_id, file=file_path, relative_path=relative_path
+ task_id=task_id, file=str(file_path), relative_path=relative_path
)
-
-
-def create_update_json(step: Step):
- now = int(time.time())
- content = {"content": step.to_dict(), "timestamp": now}
-
- return content
diff --git a/benchmark/agbenchmark/agent_interface.py b/benchmark/agbenchmark/agent_interface.py
index 269e8f8ff..aa1a40a5c 100644
--- a/benchmark/agbenchmark/agent_interface.py
+++ b/benchmark/agbenchmark/agent_interface.py
@@ -1,45 +1,27 @@
import os
import shutil
-import sys
-from typing import List
+from pathlib import Path
from dotenv import load_dotenv
-from agbenchmark.execute_sub_process import execute_subprocess
-
load_dotenv()
-helicone_graphql_logs = os.getenv("HELICONE_GRAPHQL_LOGS")
-HELICONE_GRAPHQL_LOGS = (
- helicone_graphql_logs.lower() == "true" if helicone_graphql_logs else False
-)
-
-
-def run_agent(task: str, timeout: int) -> None:
- print(f"Running agbenchmark/benchmarks.py with timeout {timeout}")
-
- command = [sys.executable, "-m", "agbenchmark_config.benchmarks", str(task)]
-
- execute_subprocess(command, timeout)
+HELICONE_GRAPHQL_LOGS = os.getenv("HELICONE_GRAPHQL_LOGS", "").lower() == "true"
def get_list_of_file_paths(
- challenge_dir_path: str, artifact_folder_name: str
-) -> List[str]:
- # this file is at agbenchmark\agent_interface.py
- source_dir = os.path.join(
- challenge_dir_path,
- artifact_folder_name,
- )
- if not os.path.exists(source_dir):
+ challenge_dir_path: str | Path, artifact_folder_name: str
+) -> list[Path]:
+ source_dir = Path(challenge_dir_path) / artifact_folder_name
+ if not source_dir.exists():
return []
- return [os.path.join(source_dir, file_name) for file_name in os.listdir(source_dir)]
+ return list(source_dir.iterdir())
def copy_artifacts_into_temp_folder(
- workspace: str | dict[str, str], artifact_folder_name: str, challenge_dir_path: str
+ workspace: str | Path, artifact_folder_name: str, challenge_dir_path: str | Path
) -> None:
file_paths = get_list_of_file_paths(challenge_dir_path, artifact_folder_name)
for file_path in file_paths:
- if os.path.isfile(file_path):
+ if file_path.is_file():
shutil.copy(file_path, workspace)
diff --git a/benchmark/agbenchmark/agent_protocol_client/__init__.py b/benchmark/agbenchmark/agent_protocol_client/__init__.py
deleted file mode 100644
index c393f2f83..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/__init__.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# coding: utf-8
-
-# flake8: noqa
-
-"""
- Agent Communication Protocol
-
- Specification of the API protocol for communication with an agent. # noqa: E501
-
- The version of the OpenAPI document: v0.2
- Generated by OpenAPI Generator (https://openapi-generator.tech)
-
- Do not edit the class manually.
-"""
-
-
-__version__ = "1.0.0"
-
-# import apis into sdk package
-from agbenchmark.agent_protocol_client.api.agent_api import AgentApi
-from agbenchmark.agent_protocol_client.api_client import ApiClient
-
-# import ApiClient
-from agbenchmark.agent_protocol_client.api_response import ApiResponse
-from agbenchmark.agent_protocol_client.configuration import Configuration
-from agbenchmark.agent_protocol_client.exceptions import (
- ApiAttributeError,
- ApiException,
- ApiKeyError,
- ApiTypeError,
- ApiValueError,
- OpenApiException,
-)
-
-# import models into sdk package
-from agbenchmark.agent_protocol_client.models.artifact import Artifact
-from agbenchmark.agent_protocol_client.models.step import Step
-from agbenchmark.agent_protocol_client.models.step_all_of import StepAllOf
-from agbenchmark.agent_protocol_client.models.step_request_body import StepRequestBody
-from agbenchmark.agent_protocol_client.models.task import Task
-from agbenchmark.agent_protocol_client.models.task_all_of import TaskAllOf
-from agbenchmark.agent_protocol_client.models.task_request_body import TaskRequestBody
diff --git a/benchmark/agbenchmark/agent_protocol_client/api/__init__.py b/benchmark/agbenchmark/agent_protocol_client/api/__init__.py
deleted file mode 100644
index 3b2e21069..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/api/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# flake8: noqa
-
-# import apis into api package
-from agbenchmark.agent_protocol_client.api.agent_api import AgentApi
diff --git a/benchmark/agbenchmark/agent_protocol_client/api/agent_api.py b/benchmark/agbenchmark/agent_protocol_client/api/agent_api.py
deleted file mode 100644
index 2597c880f..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/api/agent_api.py
+++ /dev/null
@@ -1,1647 +0,0 @@
-# coding: utf-8
-
-"""
- Agent Communication Protocol
-
- Specification of the API protocol for communication with an agent. # noqa: E501
-
- The version of the OpenAPI document: v0.2
- Generated by OpenAPI Generator (https://openapi-generator.tech)
-
- Do not edit the class manually.
-"""
-
-
-import re # noqa: F401
-from typing import Any, Awaitable, List, Optional, Union, overload
-
-from pydantic import Field, StrictBytes, StrictStr, validate_arguments
-from typing_extensions import Annotated
-
-from agbenchmark.agent_protocol_client.api_client import ApiClient
-from agbenchmark.agent_protocol_client.api_response import ApiResponse
-from agbenchmark.agent_protocol_client.exceptions import ( # noqa: F401
- ApiTypeError,
- ApiValueError,
-)
-from agbenchmark.agent_protocol_client.models.artifact import Artifact
-from agbenchmark.agent_protocol_client.models.step import Step
-from agbenchmark.agent_protocol_client.models.step_request_body import StepRequestBody
-from agbenchmark.agent_protocol_client.models.task import Task
-from agbenchmark.agent_protocol_client.models.task_request_body import TaskRequestBody
-
-
-class AgentApi(object):
- """NOTE: This class is auto generated by OpenAPI Generator
- Ref: https://openapi-generator.tech
-
- Do not edit the class manually.
- """
-
- def __init__(self, api_client=None):
- if api_client is None:
- api_client = ApiClient.get_default()
- self.api_client = api_client
-
- @overload
- async def create_agent_task(
- self, task_request_body: Optional[TaskRequestBody] = None, **kwargs
- ) -> Task: # noqa: E501
- ...
-
- @overload
- def create_agent_task(
- self,
- task_request_body: Optional[TaskRequestBody] = None,
- async_req: Optional[bool] = True,
- **kwargs,
- ) -> Task: # noqa: E501
- ...
-
- @validate_arguments
- def create_agent_task(
- self,
- task_request_body: Optional[TaskRequestBody] = None,
- async_req: Optional[bool] = None,
- **kwargs,
- ) -> Union[Task, Awaitable[Task]]: # noqa: E501
- """Creates a task for the agent. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.create_agent_task(task_request_body, async_req=True)
- >>> result = thread.get()
-
- :param task_request_body:
- :type task_request_body: TaskRequestBody
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: Task
- """
- kwargs["_return_http_data_only"] = True
- if "_preload_content" in kwargs:
- raise ValueError(
- "Error! Please call the create_agent_task_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data"
- )
- if async_req is not None:
- kwargs["async_req"] = async_req
- return self.create_agent_task_with_http_info(
- task_request_body, **kwargs
- ) # noqa: E501
-
- @validate_arguments
- def create_agent_task_with_http_info(
- self, task_request_body: Optional[TaskRequestBody] = None, **kwargs
- ) -> ApiResponse: # noqa: E501
- """Creates a task for the agent. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.create_agent_task_with_http_info(task_request_body, async_req=True)
- >>> result = thread.get()
-
- :param task_request_body:
- :type task_request_body: TaskRequestBody
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _preload_content: if False, the ApiResponse.data will
- be set to none and raw_data will store the
- HTTP response body without reading/decoding.
- Default is True.
- :type _preload_content: bool, optional
- :param _return_http_data_only: response data instead of ApiResponse
- object with status code, headers, etc
- :type _return_http_data_only: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :param _request_auth: set to override the auth_settings for an a single
- request; this effectively ignores the authentication
- in the spec for a single request.
- :type _request_auth: dict, optional
- :type _content_type: string, optional: force content-type for the request
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: tuple(Task, status_code(int), headers(HTTPHeaderDict))
- """
-
- _params = locals()
-
- _all_params = ["task_request_body"]
- _all_params.extend(
- [
- "async_req",
- "_return_http_data_only",
- "_preload_content",
- "_request_timeout",
- "_request_auth",
- "_content_type",
- "_headers",
- ]
- )
-
- # validate the arguments
- for _key, _val in _params["kwargs"].items():
- if _key not in _all_params:
- raise ApiTypeError(
- "Got an unexpected keyword argument '%s'"
- " to method create_agent_task" % _key
- )
- _params[_key] = _val
- del _params["kwargs"]
-
- _collection_formats = {}
-
- # process the path parameters
- _path_params = {}
-
- # process the query parameters
- _query_params = []
- # process the header parameters
- _header_params = dict(_params.get("_headers", {}))
- # process the form parameters
- _form_params = []
- _files = {}
- # process the body parameter
- _body_params = None
- if _params["task_request_body"] is not None:
- _body_params = _params["task_request_body"]
-
- # set the HTTP header `Accept`
- _header_params["Accept"] = self.api_client.select_header_accept(
- ["application/json"]
- ) # noqa: E501
-
- # set the HTTP header `Content-Type`
- _content_types_list = _params.get(
- "_content_type",
- self.api_client.select_header_content_type(["application/json"]),
- )
- if _content_types_list:
- _header_params["Content-Type"] = _content_types_list
-
- # authentication setting
- _auth_settings = [] # noqa: E501
-
- _response_types_map = {
- "200": "Task",
- }
-
- return self.api_client.call_api(
- "/agent/tasks",
- "POST",
- _path_params,
- _query_params,
- _header_params,
- body=_body_params,
- post_params=_form_params,
- files=_files,
- response_types_map=_response_types_map,
- auth_settings=_auth_settings,
- async_req=_params.get("async_req"),
- _return_http_data_only=_params.get("_return_http_data_only"), # noqa: E501
- _preload_content=_params.get("_preload_content", True),
- _request_timeout=_params.get("_request_timeout"),
- collection_formats=_collection_formats,
- _request_auth=_params.get("_request_auth"),
- )
-
- @overload
- async def download_agent_task_artifact(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- artifact_id: Annotated[StrictStr, Field(..., description="ID of the artifact")],
- **kwargs,
- ) -> bytearray: # noqa: E501
- ...
-
- @overload
- def download_agent_task_artifact(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- artifact_id: Annotated[StrictStr, Field(..., description="ID of the artifact")],
- async_req: Optional[bool] = True,
- **kwargs,
- ) -> bytearray: # noqa: E501
- ...
-
- @validate_arguments
- def download_agent_task_artifact(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- artifact_id: Annotated[StrictStr, Field(..., description="ID of the artifact")],
- async_req: Optional[bool] = None,
- **kwargs,
- ) -> Union[bytearray, Awaitable[bytearray]]: # noqa: E501
- """Download a specified artifact. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.download_agent_task_artifact(task_id, artifact_id, async_req=True)
- >>> result = thread.get()
-
- :param task_id: ID of the task (required)
- :type task_id: str
- :param artifact_id: ID of the artifact (required)
- :type artifact_id: str
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: bytearray
- """
- kwargs["_return_http_data_only"] = True
- if "_preload_content" in kwargs:
- raise ValueError(
- "Error! Please call the download_agent_task_artifact_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data"
- )
- if async_req is not None:
- kwargs["async_req"] = async_req
- return self.download_agent_task_artifact_with_http_info(
- task_id, artifact_id, **kwargs
- ) # noqa: E501
-
- @validate_arguments
- def download_agent_task_artifact_with_http_info(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- artifact_id: Annotated[StrictStr, Field(..., description="ID of the artifact")],
- **kwargs,
- ) -> ApiResponse: # noqa: E501
- """Download a specified artifact. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.download_agent_task_artifact_with_http_info(task_id, artifact_id, async_req=True)
- >>> result = thread.get()
-
- :param task_id: ID of the task (required)
- :type task_id: str
- :param artifact_id: ID of the artifact (required)
- :type artifact_id: str
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _preload_content: if False, the ApiResponse.data will
- be set to none and raw_data will store the
- HTTP response body without reading/decoding.
- Default is True.
- :type _preload_content: bool, optional
- :param _return_http_data_only: response data instead of ApiResponse
- object with status code, headers, etc
- :type _return_http_data_only: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :param _request_auth: set to override the auth_settings for an a single
- request; this effectively ignores the authentication
- in the spec for a single request.
- :type _request_auth: dict, optional
- :type _content_type: string, optional: force content-type for the request
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: tuple(bytearray, status_code(int), headers(HTTPHeaderDict))
- """
-
- _params = locals()
-
- _all_params = ["task_id", "artifact_id"]
- _all_params.extend(
- [
- "async_req",
- "_return_http_data_only",
- "_preload_content",
- "_request_timeout",
- "_request_auth",
- "_content_type",
- "_headers",
- ]
- )
-
- # validate the arguments
- for _key, _val in _params["kwargs"].items():
- if _key not in _all_params:
- raise ApiTypeError(
- "Got an unexpected keyword argument '%s'"
- " to method download_agent_task_artifact" % _key
- )
- _params[_key] = _val
- del _params["kwargs"]
-
- _collection_formats = {}
-
- # process the path parameters
- _path_params = {}
- if _params["task_id"]:
- _path_params["task_id"] = _params["task_id"]
-
- if _params["artifact_id"]:
- _path_params["artifact_id"] = _params["artifact_id"]
-
- # process the query parameters
- _query_params = []
- # process the header parameters
- _header_params = dict(_params.get("_headers", {}))
- # process the form parameters
- _form_params = []
- _files = {}
- # process the body parameter
- _body_params = None
- # set the HTTP header `Accept`
- _header_params["Accept"] = self.api_client.select_header_accept(
- ["application/octet-stream"]
- ) # noqa: E501
-
- # authentication setting
- _auth_settings = [] # noqa: E501
-
- _response_types_map = {
- "200": "bytearray",
- }
-
- return self.api_client.call_api(
- "/agent/tasks/{task_id}/artifacts/{artifact_id}",
- "GET",
- _path_params,
- _query_params,
- _header_params,
- body=_body_params,
- post_params=_form_params,
- files=_files,
- response_types_map=_response_types_map,
- auth_settings=_auth_settings,
- async_req=_params.get("async_req"),
- _return_http_data_only=_params.get("_return_http_data_only"), # noqa: E501
- _preload_content=_params.get("_preload_content", True),
- _request_timeout=_params.get("_request_timeout"),
- collection_formats=_collection_formats,
- _request_auth=_params.get("_request_auth"),
- )
-
- @overload
- async def execute_agent_task_step(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- step_request_body: Optional[StepRequestBody] = None,
- **kwargs,
- ) -> Step: # noqa: E501
- ...
-
- @overload
- def execute_agent_task_step(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- step_request_body: Optional[StepRequestBody] = None,
- async_req: Optional[bool] = True,
- **kwargs,
- ) -> Step: # noqa: E501
- ...
-
- @validate_arguments
- def execute_agent_task_step(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- step_request_body: Optional[StepRequestBody] = None,
- async_req: Optional[bool] = None,
- **kwargs,
- ) -> Union[Step, Awaitable[Step]]: # noqa: E501
- """Execute a step in the specified agent task. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.execute_agent_task_step(task_id, step_request_body, async_req=True)
- >>> result = thread.get()
-
- :param task_id: ID of the task (required)
- :type task_id: str
- :param step_request_body:
- :type step_request_body: StepRequestBody
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: Step
- """
- kwargs["_return_http_data_only"] = True
- if "_preload_content" in kwargs:
- raise ValueError(
- "Error! Please call the execute_agent_task_step_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data"
- )
- if async_req is not None:
- kwargs["async_req"] = async_req
- return self.execute_agent_task_step_with_http_info(
- task_id, step_request_body, **kwargs
- ) # noqa: E501
-
- @validate_arguments
- def execute_agent_task_step_with_http_info(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- step_request_body: Optional[StepRequestBody] = None,
- **kwargs,
- ) -> ApiResponse: # noqa: E501
- """Execute a step in the specified agent task. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.execute_agent_task_step_with_http_info(task_id, step_request_body, async_req=True)
- >>> result = thread.get()
-
- :param task_id: ID of the task (required)
- :type task_id: str
- :param step_request_body:
- :type step_request_body: StepRequestBody
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _preload_content: if False, the ApiResponse.data will
- be set to none and raw_data will store the
- HTTP response body without reading/decoding.
- Default is True.
- :type _preload_content: bool, optional
- :param _return_http_data_only: response data instead of ApiResponse
- object with status code, headers, etc
- :type _return_http_data_only: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :param _request_auth: set to override the auth_settings for an a single
- request; this effectively ignores the authentication
- in the spec for a single request.
- :type _request_auth: dict, optional
- :type _content_type: string, optional: force content-type for the request
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: tuple(Step, status_code(int), headers(HTTPHeaderDict))
- """
-
- _params = locals()
-
- _all_params = ["task_id", "step_request_body"]
- _all_params.extend(
- [
- "async_req",
- "_return_http_data_only",
- "_preload_content",
- "_request_timeout",
- "_request_auth",
- "_content_type",
- "_headers",
- ]
- )
-
- # validate the arguments
- for _key, _val in _params["kwargs"].items():
- if _key not in _all_params:
- raise ApiTypeError(
- "Got an unexpected keyword argument '%s'"
- " to method execute_agent_task_step" % _key
- )
- _params[_key] = _val
- del _params["kwargs"]
-
- _collection_formats = {}
-
- # process the path parameters
- _path_params = {}
- if _params["task_id"]:
- _path_params["task_id"] = _params["task_id"]
-
- # process the query parameters
- _query_params = []
- # process the header parameters
- _header_params = dict(_params.get("_headers", {}))
- # process the form parameters
- _form_params = []
- _files = {}
- # process the body parameter
- _body_params = None
- if _params["step_request_body"] is not None:
- _body_params = _params["step_request_body"]
-
- # set the HTTP header `Accept`
- _header_params["Accept"] = self.api_client.select_header_accept(
- ["application/json"]
- ) # noqa: E501
-
- # set the HTTP header `Content-Type`
- _content_types_list = _params.get(
- "_content_type",
- self.api_client.select_header_content_type(["application/json"]),
- )
- if _content_types_list:
- _header_params["Content-Type"] = _content_types_list
-
- # authentication setting
- _auth_settings = [] # noqa: E501
-
- _response_types_map = {
- "200": "Step",
- }
-
- return self.api_client.call_api(
- "/agent/tasks/{task_id}/steps",
- "POST",
- _path_params,
- _query_params,
- _header_params,
- body=_body_params,
- post_params=_form_params,
- files=_files,
- response_types_map=_response_types_map,
- auth_settings=_auth_settings,
- async_req=_params.get("async_req"),
- _return_http_data_only=_params.get("_return_http_data_only"), # noqa: E501
- _preload_content=_params.get("_preload_content", True),
- _request_timeout=_params.get("_request_timeout"),
- collection_formats=_collection_formats,
- _request_auth=_params.get("_request_auth"),
- )
-
- @overload
- async def get_agent_task(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- **kwargs,
- ) -> Task: # noqa: E501
- ...
-
- @overload
- def get_agent_task(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- async_req: Optional[bool] = True,
- **kwargs,
- ) -> Task: # noqa: E501
- ...
-
- @validate_arguments
- def get_agent_task(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- async_req: Optional[bool] = None,
- **kwargs,
- ) -> Union[Task, Awaitable[Task]]: # noqa: E501
- """Get details about a specified agent task. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.get_agent_task(task_id, async_req=True)
- >>> result = thread.get()
-
- :param task_id: ID of the task (required)
- :type task_id: str
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: Task
- """
- kwargs["_return_http_data_only"] = True
- if "_preload_content" in kwargs:
- raise ValueError(
- "Error! Please call the get_agent_task_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data"
- )
- if async_req is not None:
- kwargs["async_req"] = async_req
- return self.get_agent_task_with_http_info(task_id, **kwargs) # noqa: E501
-
- @validate_arguments
- def get_agent_task_with_http_info(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- **kwargs,
- ) -> ApiResponse: # noqa: E501
- """Get details about a specified agent task. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.get_agent_task_with_http_info(task_id, async_req=True)
- >>> result = thread.get()
-
- :param task_id: ID of the task (required)
- :type task_id: str
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _preload_content: if False, the ApiResponse.data will
- be set to none and raw_data will store the
- HTTP response body without reading/decoding.
- Default is True.
- :type _preload_content: bool, optional
- :param _return_http_data_only: response data instead of ApiResponse
- object with status code, headers, etc
- :type _return_http_data_only: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :param _request_auth: set to override the auth_settings for an a single
- request; this effectively ignores the authentication
- in the spec for a single request.
- :type _request_auth: dict, optional
- :type _content_type: string, optional: force content-type for the request
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: tuple(Task, status_code(int), headers(HTTPHeaderDict))
- """
-
- _params = locals()
-
- _all_params = ["task_id"]
- _all_params.extend(
- [
- "async_req",
- "_return_http_data_only",
- "_preload_content",
- "_request_timeout",
- "_request_auth",
- "_content_type",
- "_headers",
- ]
- )
-
- # validate the arguments
- for _key, _val in _params["kwargs"].items():
- if _key not in _all_params:
- raise ApiTypeError(
- "Got an unexpected keyword argument '%s'"
- " to method get_agent_task" % _key
- )
- _params[_key] = _val
- del _params["kwargs"]
-
- _collection_formats = {}
-
- # process the path parameters
- _path_params = {}
- if _params["task_id"]:
- _path_params["task_id"] = _params["task_id"]
-
- # process the query parameters
- _query_params = []
- # process the header parameters
- _header_params = dict(_params.get("_headers", {}))
- # process the form parameters
- _form_params = []
- _files = {}
- # process the body parameter
- _body_params = None
- # set the HTTP header `Accept`
- _header_params["Accept"] = self.api_client.select_header_accept(
- ["application/json"]
- ) # noqa: E501
-
- # authentication setting
- _auth_settings = [] # noqa: E501
-
- _response_types_map = {
- "200": "Task",
- }
-
- return self.api_client.call_api(
- "/agent/tasks/{task_id}",
- "GET",
- _path_params,
- _query_params,
- _header_params,
- body=_body_params,
- post_params=_form_params,
- files=_files,
- response_types_map=_response_types_map,
- auth_settings=_auth_settings,
- async_req=_params.get("async_req"),
- _return_http_data_only=_params.get("_return_http_data_only"), # noqa: E501
- _preload_content=_params.get("_preload_content", True),
- _request_timeout=_params.get("_request_timeout"),
- collection_formats=_collection_formats,
- _request_auth=_params.get("_request_auth"),
- )
-
- @overload
- async def get_agent_task_step(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- step_id: Annotated[StrictStr, Field(..., description="ID of the step")],
- **kwargs,
- ) -> Step: # noqa: E501
- ...
-
- @overload
- def get_agent_task_step(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- step_id: Annotated[StrictStr, Field(..., description="ID of the step")],
- async_req: Optional[bool] = True,
- **kwargs,
- ) -> Step: # noqa: E501
- ...
-
- @validate_arguments
- def get_agent_task_step(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- step_id: Annotated[StrictStr, Field(..., description="ID of the step")],
- async_req: Optional[bool] = None,
- **kwargs,
- ) -> Union[Step, Awaitable[Step]]: # noqa: E501
- """Get details about a specified task step. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.get_agent_task_step(task_id, step_id, async_req=True)
- >>> result = thread.get()
-
- :param task_id: ID of the task (required)
- :type task_id: str
- :param step_id: ID of the step (required)
- :type step_id: str
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: Step
- """
- kwargs["_return_http_data_only"] = True
- if "_preload_content" in kwargs:
- raise ValueError(
- "Error! Please call the get_agent_task_step_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data"
- )
- if async_req is not None:
- kwargs["async_req"] = async_req
- return self.get_agent_task_step_with_http_info(
- task_id, step_id, **kwargs
- ) # noqa: E501
-
- @validate_arguments
- def get_agent_task_step_with_http_info(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- step_id: Annotated[StrictStr, Field(..., description="ID of the step")],
- **kwargs,
- ) -> ApiResponse: # noqa: E501
- """Get details about a specified task step. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.get_agent_task_step_with_http_info(task_id, step_id, async_req=True)
- >>> result = thread.get()
-
- :param task_id: ID of the task (required)
- :type task_id: str
- :param step_id: ID of the step (required)
- :type step_id: str
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _preload_content: if False, the ApiResponse.data will
- be set to none and raw_data will store the
- HTTP response body without reading/decoding.
- Default is True.
- :type _preload_content: bool, optional
- :param _return_http_data_only: response data instead of ApiResponse
- object with status code, headers, etc
- :type _return_http_data_only: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :param _request_auth: set to override the auth_settings for an a single
- request; this effectively ignores the authentication
- in the spec for a single request.
- :type _request_auth: dict, optional
- :type _content_type: string, optional: force content-type for the request
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: tuple(Step, status_code(int), headers(HTTPHeaderDict))
- """
-
- _params = locals()
-
- _all_params = ["task_id", "step_id"]
- _all_params.extend(
- [
- "async_req",
- "_return_http_data_only",
- "_preload_content",
- "_request_timeout",
- "_request_auth",
- "_content_type",
- "_headers",
- ]
- )
-
- # validate the arguments
- for _key, _val in _params["kwargs"].items():
- if _key not in _all_params:
- raise ApiTypeError(
- "Got an unexpected keyword argument '%s'"
- " to method get_agent_task_step" % _key
- )
- _params[_key] = _val
- del _params["kwargs"]
-
- _collection_formats = {}
-
- # process the path parameters
- _path_params = {}
- if _params["task_id"]:
- _path_params["task_id"] = _params["task_id"]
-
- if _params["step_id"]:
- _path_params["step_id"] = _params["step_id"]
-
- # process the query parameters
- _query_params = []
- # process the header parameters
- _header_params = dict(_params.get("_headers", {}))
- # process the form parameters
- _form_params = []
- _files = {}
- # process the body parameter
- _body_params = None
- # set the HTTP header `Accept`
- _header_params["Accept"] = self.api_client.select_header_accept(
- ["application/json"]
- ) # noqa: E501
-
- # authentication setting
- _auth_settings = [] # noqa: E501
-
- _response_types_map = {
- "200": "Step",
- }
-
- return self.api_client.call_api(
- "/agent/tasks/{task_id}/steps/{step_id}",
- "GET",
- _path_params,
- _query_params,
- _header_params,
- body=_body_params,
- post_params=_form_params,
- files=_files,
- response_types_map=_response_types_map,
- auth_settings=_auth_settings,
- async_req=_params.get("async_req"),
- _return_http_data_only=_params.get("_return_http_data_only"), # noqa: E501
- _preload_content=_params.get("_preload_content", True),
- _request_timeout=_params.get("_request_timeout"),
- collection_formats=_collection_formats,
- _request_auth=_params.get("_request_auth"),
- )
-
- @overload
- async def list_agent_task_artifacts(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- **kwargs,
- ) -> Any: # noqa: E501
- ...
-
- @overload
- def list_agent_task_artifacts(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- async_req: Optional[bool] = True,
- **kwargs,
- ) -> Any: # noqa: E501
- ...
-
- @validate_arguments
- def list_agent_task_artifacts(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- async_req: Optional[bool] = None,
- **kwargs,
- ) -> Union[Any, Awaitable[Any]]: # noqa: E501
- """List all artifacts that have been created for the given task. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.list_agent_task_artifacts(task_id, async_req=True)
- >>> result = thread.get()
-
- :param task_id: ID of the task (required)
- :type task_id: str
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: List[Artifact]
- """
- kwargs["_return_http_data_only"] = True
- if "_preload_content" in kwargs:
- raise ValueError(
- "Error! Please call the list_agent_task_artifacts_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data"
- )
- if async_req is not None:
- kwargs["async_req"] = async_req
- return self.list_agent_task_artifacts_with_http_info(
- task_id, **kwargs
- ) # noqa: E501
-
- @validate_arguments
- def list_agent_task_artifacts_with_http_info(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- **kwargs,
- ) -> ApiResponse: # noqa: E501
- """List all artifacts that have been created for the given task. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.list_agent_task_artifacts_with_http_info(task_id, async_req=True)
- >>> result = thread.get()
-
- :param task_id: ID of the task (required)
- :type task_id: str
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _preload_content: if False, the ApiResponse.data will
- be set to none and raw_data will store the
- HTTP response body without reading/decoding.
- Default is True.
- :type _preload_content: bool, optional
- :param _return_http_data_only: response data instead of ApiResponse
- object with status code, headers, etc
- :type _return_http_data_only: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :param _request_auth: set to override the auth_settings for an a single
- request; this effectively ignores the authentication
- in the spec for a single request.
- :type _request_auth: dict, optional
- :type _content_type: string, optional: force content-type for the request
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: tuple(List[Artifact], status_code(int), headers(HTTPHeaderDict))
- """
-
- _params = locals()
-
- _all_params = ["task_id"]
- _all_params.extend(
- [
- "async_req",
- "_return_http_data_only",
- "_preload_content",
- "_request_timeout",
- "_request_auth",
- "_content_type",
- "_headers",
- ]
- )
-
- # validate the arguments
- for _key, _val in _params["kwargs"].items():
- if _key not in _all_params:
- raise ApiTypeError(
- "Got an unexpected keyword argument '%s'"
- " to method list_agent_task_artifacts" % _key
- )
- _params[_key] = _val
- del _params["kwargs"]
-
- _collection_formats = {}
-
- # process the path parameters
- _path_params = {}
- if _params["task_id"]:
- _path_params["task_id"] = _params["task_id"]
-
- # process the query parameters
- _query_params = []
- # process the header parameters
- _header_params = dict(_params.get("_headers", {}))
- # process the form parameters
- _form_params = []
- _files = {}
- # process the body parameter
- _body_params = None
- # set the HTTP header `Accept`
- _header_params["Accept"] = self.api_client.select_header_accept(
- ["application/json"]
- ) # noqa: E501
-
- # authentication setting
- _auth_settings = [] # noqa: E501
-
- _response_types_map = {
- "200": "Artifacts",
- }
-
- return self.api_client.call_api(
- "/agent/tasks/{task_id}/artifacts",
- "GET",
- _path_params,
- _query_params,
- _header_params,
- body=_body_params,
- post_params=_form_params,
- files=_files,
- response_types_map=_response_types_map,
- auth_settings=_auth_settings,
- async_req=_params.get("async_req"),
- _return_http_data_only=_params.get("_return_http_data_only"), # noqa: E501
- _preload_content=_params.get("_preload_content", True),
- _request_timeout=_params.get("_request_timeout"),
- collection_formats=_collection_formats,
- _request_auth=_params.get("_request_auth"),
- )
-
- @overload
- async def list_agent_task_steps(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- **kwargs,
- ) -> List[str]: # noqa: E501
- ...
-
- @overload
- def list_agent_task_steps(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- async_req: Optional[bool] = True,
- **kwargs,
- ) -> List[str]: # noqa: E501
- ...
-
- @validate_arguments
- def list_agent_task_steps(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- async_req: Optional[bool] = None,
- **kwargs,
- ) -> Union[List[str], Awaitable[List[str]]]: # noqa: E501
- """List all steps for the specified task. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.list_agent_task_steps(task_id, async_req=True)
- >>> result = thread.get()
-
- :param task_id: ID of the task (required)
- :type task_id: str
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: List[str]
- """
- kwargs["_return_http_data_only"] = True
- if "_preload_content" in kwargs:
- raise ValueError(
- "Error! Please call the list_agent_task_steps_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data"
- )
- if async_req is not None:
- kwargs["async_req"] = async_req
- return self.list_agent_task_steps_with_http_info(
- task_id, **kwargs
- ) # noqa: E501
-
- @validate_arguments
- def list_agent_task_steps_with_http_info(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- **kwargs,
- ) -> ApiResponse: # noqa: E501
- """List all steps for the specified task. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.list_agent_task_steps_with_http_info(task_id, async_req=True)
- >>> result = thread.get()
-
- :param task_id: ID of the task (required)
- :type task_id: str
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _preload_content: if False, the ApiResponse.data will
- be set to none and raw_data will store the
- HTTP response body without reading/decoding.
- Default is True.
- :type _preload_content: bool, optional
- :param _return_http_data_only: response data instead of ApiResponse
- object with status code, headers, etc
- :type _return_http_data_only: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :param _request_auth: set to override the auth_settings for an a single
- request; this effectively ignores the authentication
- in the spec for a single request.
- :type _request_auth: dict, optional
- :type _content_type: string, optional: force content-type for the request
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: tuple(List[str], status_code(int), headers(HTTPHeaderDict))
- """
-
- _params = locals()
-
- _all_params = ["task_id"]
- _all_params.extend(
- [
- "async_req",
- "_return_http_data_only",
- "_preload_content",
- "_request_timeout",
- "_request_auth",
- "_content_type",
- "_headers",
- ]
- )
-
- # validate the arguments
- for _key, _val in _params["kwargs"].items():
- if _key not in _all_params:
- raise ApiTypeError(
- "Got an unexpected keyword argument '%s'"
- " to method list_agent_task_steps" % _key
- )
- _params[_key] = _val
- del _params["kwargs"]
-
- _collection_formats = {}
-
- # process the path parameters
- _path_params = {}
- if _params["task_id"]:
- _path_params["task_id"] = _params["task_id"]
-
- # process the query parameters
- _query_params = []
- # process the header parameters
- _header_params = dict(_params.get("_headers", {}))
- # process the form parameters
- _form_params = []
- _files = {}
- # process the body parameter
- _body_params = None
- # set the HTTP header `Accept`
- _header_params["Accept"] = self.api_client.select_header_accept(
- ["application/json"]
- ) # noqa: E501
-
- # authentication setting
- _auth_settings = [] # noqa: E501
-
- _response_types_map = {
- "200": "List[str]",
- }
-
- return self.api_client.call_api(
- "/agent/tasks/{task_id}/steps",
- "GET",
- _path_params,
- _query_params,
- _header_params,
- body=_body_params,
- post_params=_form_params,
- files=_files,
- response_types_map=_response_types_map,
- auth_settings=_auth_settings,
- async_req=_params.get("async_req"),
- _return_http_data_only=_params.get("_return_http_data_only"), # noqa: E501
- _preload_content=_params.get("_preload_content", True),
- _request_timeout=_params.get("_request_timeout"),
- collection_formats=_collection_formats,
- _request_auth=_params.get("_request_auth"),
- )
-
- @overload
- async def list_agent_tasks_ids(self, **kwargs) -> List[str]: # noqa: E501
- ...
-
- @overload
- def list_agent_tasks_ids(
- self, async_req: Optional[bool] = True, **kwargs
- ) -> List[str]: # noqa: E501
- ...
-
- @validate_arguments
- def list_agent_tasks_ids(
- self, async_req: Optional[bool] = None, **kwargs
- ) -> Union[List[str], Awaitable[List[str]]]: # noqa: E501
- """List all tasks that have been created for the agent. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.list_agent_tasks_ids(async_req=True)
- >>> result = thread.get()
-
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: List[str]
- """
- kwargs["_return_http_data_only"] = True
- if "_preload_content" in kwargs:
- raise ValueError(
- "Error! Please call the list_agent_tasks_ids_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data"
- )
- if async_req is not None:
- kwargs["async_req"] = async_req
- return self.list_agent_tasks_ids_with_http_info(**kwargs) # noqa: E501
-
- @validate_arguments
- def list_agent_tasks_ids_with_http_info(
- self, **kwargs
- ) -> ApiResponse: # noqa: E501
- """List all tasks that have been created for the agent. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.list_agent_tasks_ids_with_http_info(async_req=True)
- >>> result = thread.get()
-
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _preload_content: if False, the ApiResponse.data will
- be set to none and raw_data will store the
- HTTP response body without reading/decoding.
- Default is True.
- :type _preload_content: bool, optional
- :param _return_http_data_only: response data instead of ApiResponse
- object with status code, headers, etc
- :type _return_http_data_only: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :param _request_auth: set to override the auth_settings for an a single
- request; this effectively ignores the authentication
- in the spec for a single request.
- :type _request_auth: dict, optional
- :type _content_type: string, optional: force content-type for the request
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: tuple(List[str], status_code(int), headers(HTTPHeaderDict))
- """
-
- _params = locals()
-
- _all_params = []
- _all_params.extend(
- [
- "async_req",
- "_return_http_data_only",
- "_preload_content",
- "_request_timeout",
- "_request_auth",
- "_content_type",
- "_headers",
- ]
- )
-
- # validate the arguments
- for _key, _val in _params["kwargs"].items():
- if _key not in _all_params:
- raise ApiTypeError(
- "Got an unexpected keyword argument '%s'"
- " to method list_agent_tasks_ids" % _key
- )
- _params[_key] = _val
- del _params["kwargs"]
-
- _collection_formats = {}
-
- # process the path parameters
- _path_params = {}
-
- # process the query parameters
- _query_params = []
- # process the header parameters
- _header_params = dict(_params.get("_headers", {}))
- # process the form parameters
- _form_params = []
- _files = {}
- # process the body parameter
- _body_params = None
- # set the HTTP header `Accept`
- _header_params["Accept"] = self.api_client.select_header_accept(
- ["application/json"]
- ) # noqa: E501
-
- # authentication setting
- _auth_settings = [] # noqa: E501
-
- _response_types_map = {
- "200": "List[str]",
- }
-
- return self.api_client.call_api(
- "/agent/tasks",
- "GET",
- _path_params,
- _query_params,
- _header_params,
- body=_body_params,
- post_params=_form_params,
- files=_files,
- response_types_map=_response_types_map,
- auth_settings=_auth_settings,
- async_req=_params.get("async_req"),
- _return_http_data_only=_params.get("_return_http_data_only"), # noqa: E501
- _preload_content=_params.get("_preload_content", True),
- _request_timeout=_params.get("_request_timeout"),
- collection_formats=_collection_formats,
- _request_auth=_params.get("_request_auth"),
- )
-
- @overload
- async def upload_agent_task_artifacts(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- file: Annotated[
- Union[StrictBytes, StrictStr], Field(..., description="File to upload.")
- ],
- relative_path: Annotated[
- Optional[StrictStr],
- Field(
- description="Relative path of the artifact in the agent's workspace."
- ),
- ] = None,
- **kwargs,
- ) -> Artifact: # noqa: E501
- ...
-
- @overload
- def upload_agent_task_artifacts(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- file: Annotated[
- Union[StrictBytes, StrictStr], Field(..., description="File to upload.")
- ],
- relative_path: Annotated[
- Optional[StrictStr],
- Field(
- description="Relative path of the artifact in the agent's workspace."
- ),
- ] = None,
- async_req: Optional[bool] = True,
- **kwargs,
- ) -> Artifact: # noqa: E501
- ...
-
- @validate_arguments
- def upload_agent_task_artifacts(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- file: Annotated[
- Union[StrictBytes, StrictStr], Field(..., description="File to upload.")
- ],
- relative_path: Annotated[
- Optional[StrictStr],
- Field(
- description="Relative path of the artifact in the agent's workspace."
- ),
- ] = None,
- async_req: Optional[bool] = None,
- **kwargs,
- ) -> Union[Artifact, Awaitable[Artifact]]: # noqa: E501
- """Upload an artifact for the specified task. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.upload_agent_task_artifacts(task_id, file, relative_path, async_req=True)
- >>> result = thread.get()
-
- :param task_id: ID of the task (required)
- :type task_id: str
- :param file: File to upload. (required)
- :type file: bytearray
- :param relative_path: Relative path of the artifact in the agent's workspace.
- :type relative_path: str
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: Artifact
- """
- kwargs["_return_http_data_only"] = True
- if "_preload_content" in kwargs:
- raise ValueError(
- "Error! Please call the upload_agent_task_artifacts_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data"
- )
- if async_req is not None:
- kwargs["async_req"] = async_req
- return self.upload_agent_task_artifacts_with_http_info(
- task_id, file, relative_path, **kwargs
- ) # noqa: E501
-
- @validate_arguments
- def upload_agent_task_artifacts_with_http_info(
- self,
- task_id: Annotated[StrictStr, Field(..., description="ID of the task")],
- file: Annotated[
- Union[StrictBytes, StrictStr], Field(..., description="File to upload.")
- ],
- relative_path: Annotated[
- Optional[StrictStr],
- Field(
- description="Relative path of the artifact in the agent's workspace."
- ),
- ] = None,
- **kwargs,
- ) -> ApiResponse: # noqa: E501
- """Upload an artifact for the specified task. # noqa: E501
-
- This method makes a synchronous HTTP request by default. To make an
- asynchronous HTTP request, please pass async_req=True
-
- >>> thread = api.upload_agent_task_artifacts_with_http_info(task_id, file, relative_path, async_req=True)
- >>> result = thread.get()
-
- :param task_id: ID of the task (required)
- :type task_id: str
- :param file: File to upload. (required)
- :type file: bytearray
- :param relative_path: Relative path of the artifact in the agent's workspace.
- :type relative_path: str
- :param async_req: Whether to execute the request asynchronously.
- :type async_req: bool, optional
- :param _preload_content: if False, the ApiResponse.data will
- be set to none and raw_data will store the
- HTTP response body without reading/decoding.
- Default is True.
- :type _preload_content: bool, optional
- :param _return_http_data_only: response data instead of ApiResponse
- object with status code, headers, etc
- :type _return_http_data_only: bool, optional
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :param _request_auth: set to override the auth_settings for an a single
- request; this effectively ignores the authentication
- in the spec for a single request.
- :type _request_auth: dict, optional
- :type _content_type: string, optional: force content-type for the request
- :return: Returns the result object.
- If the method is called asynchronously,
- returns the request thread.
- :rtype: tuple(Artifact, status_code(int), headers(HTTPHeaderDict))
- """
-
- _params = locals()
-
- _all_params = ["task_id", "file", "relative_path"]
- _all_params.extend(
- [
- "async_req",
- "_return_http_data_only",
- "_preload_content",
- "_request_timeout",
- "_request_auth",
- "_content_type",
- "_headers",
- ]
- )
-
- # validate the arguments
- for _key, _val in _params["kwargs"].items():
- if _key not in _all_params:
- raise ApiTypeError(
- "Got an unexpected keyword argument '%s'"
- " to method upload_agent_task_artifacts" % _key
- )
- _params[_key] = _val
- del _params["kwargs"]
-
- _collection_formats = {}
-
- # process the path parameters
- _path_params = {}
- if _params["task_id"]:
- _path_params["task_id"] = _params["task_id"]
-
- # process the query parameters
- _query_params = []
- # process the header parameters
- _header_params = dict(_params.get("_headers", {}))
- # process the form parameters
- _form_params = []
- _files = {}
- if _params["file"]:
- _files["file"] = _params["file"]
-
- if _params["relative_path"]:
- _form_params.append(("relative_path", _params["relative_path"]))
-
- # process the body parameter
- _body_params = None
- # set the HTTP header `Accept`
- _header_params["Accept"] = self.api_client.select_header_accept(
- ["application/json"]
- ) # noqa: E501
-
- # set the HTTP header `Content-Type`
- _content_types_list = _params.get(
- "_content_type",
- self.api_client.select_header_content_type(["multipart/form-data"]),
- )
- if _content_types_list:
- _header_params["Content-Type"] = _content_types_list
-
- # authentication setting
- _auth_settings = [] # noqa: E501
-
- _response_types_map = {
- "200": "Artifact",
- }
-
- return self.api_client.call_api(
- "/agent/tasks/{task_id}/artifacts",
- "POST",
- _path_params,
- _query_params,
- _header_params,
- body=_body_params,
- post_params=_form_params,
- files=_files,
- response_types_map=_response_types_map,
- auth_settings=_auth_settings,
- async_req=_params.get("async_req"),
- _return_http_data_only=_params.get("_return_http_data_only"), # noqa: E501
- _preload_content=_params.get("_preload_content", True),
- _request_timeout=_params.get("_request_timeout"),
- collection_formats=_collection_formats,
- _request_auth=_params.get("_request_auth"),
- )
diff --git a/benchmark/agbenchmark/agent_protocol_client/api_client.py b/benchmark/agbenchmark/agent_protocol_client/api_client.py
deleted file mode 100644
index ce178a96b..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/api_client.py
+++ /dev/null
@@ -1,838 +0,0 @@
-# coding: utf-8
-
-"""
- Agent Communication Protocol
-
- Specification of the API protocol for communication with an agent. # noqa: E501
-
- The version of the OpenAPI document: v0.2
- Generated by OpenAPI Generator (https://openapi-generator.tech)
-
- Do not edit the class manually.
-"""
-
-
-import atexit
-import datetime
-import json
-import mimetypes
-import os
-import re
-import tempfile
-from multiprocessing.pool import ThreadPool
-from urllib.parse import quote
-
-from dateutil.parser import parse
-
-import agbenchmark.agent_protocol_client.models
-from agbenchmark.agent_protocol_client import rest
-from agbenchmark.agent_protocol_client.api_response import ApiResponse
-from agbenchmark.agent_protocol_client.configuration import Configuration
-from agbenchmark.agent_protocol_client.exceptions import ApiException, ApiValueError
-
-
-class ApiClient(object):
- """Generic API client for OpenAPI client library builds.
-
- OpenAPI generic API client. This client handles the client-
- server communication, and is invariant across implementations. Specifics of
- the methods and models for each application are generated from the OpenAPI
- templates.
-
- :param configuration: .Configuration object for this client
- :param header_name: a header to pass when making calls to the API.
- :param header_value: a header value to pass when making calls to
- the API.
- :param cookie: a cookie to include in the header when making calls
- to the API
- :param pool_threads: The number of threads to use for async requests
- to the API. More threads means more concurrent API requests.
- """
-
- PRIMITIVE_TYPES = (float, bool, bytes, str, int)
- NATIVE_TYPES_MAPPING = {
- "int": int,
- "long": int, # TODO remove as only py3 is supported?
- "float": float,
- "str": str,
- "bool": bool,
- "date": datetime.date,
- "datetime": datetime.datetime,
- "object": object,
- }
- _pool = None
-
- def __init__(
- self,
- configuration=None,
- header_name=None,
- header_value=None,
- cookie=None,
- pool_threads=1,
- ):
- # use default configuration if none is provided
- if configuration is None:
- configuration = Configuration.get_default()
- self.configuration = configuration
- self.pool_threads = pool_threads
-
- self.rest_client = rest.RESTClientObject(configuration)
- self.default_headers = {}
- if header_name is not None:
- self.default_headers[header_name] = header_value
- self.cookie = cookie
- # Set default User-Agent.
- self.user_agent = "OpenAPI-Generator/1.0.0/python"
- self.client_side_validation = configuration.client_side_validation
-
- async def __aenter__(self):
- return self
-
- async def __aexit__(self, exc_type, exc_value, traceback):
- await self.close()
-
- async def close(self):
- await self.rest_client.close()
- if self._pool:
- self._pool.close()
- self._pool.join()
- self._pool = None
- if hasattr(atexit, "unregister"):
- atexit.unregister(self.close)
-
- @property
- def pool(self):
- """Create thread pool on first request
- avoids instantiating unused threadpool for blocking clients.
- """
- if self._pool is None:
- atexit.register(self.close)
- self._pool = ThreadPool(self.pool_threads)
- return self._pool
-
- @property
- def user_agent(self):
- """User agent for this API client"""
- return self.default_headers["User-Agent"]
-
- @user_agent.setter
- def user_agent(self, value):
- self.default_headers["User-Agent"] = value
-
- def set_default_header(self, header_name, header_value):
- self.default_headers[header_name] = header_value
-
- _default = None
-
- @classmethod
- def get_default(cls):
- """Return new instance of ApiClient.
-
- This method returns newly created, based on default constructor,
- object of ApiClient class or returns a copy of default
- ApiClient.
-
- :return: The ApiClient object.
- """
- if cls._default is None:
- cls._default = ApiClient()
- return cls._default
-
- @classmethod
- def set_default(cls, default):
- """Set default instance of ApiClient.
-
- It stores default ApiClient.
-
- :param default: object of ApiClient.
- """
- cls._default = default
-
- async def __call_api(
- self,
- resource_path,
- method,
- path_params=None,
- query_params=None,
- header_params=None,
- body=None,
- post_params=None,
- files=None,
- response_types_map=None,
- auth_settings=None,
- _return_http_data_only=None,
- collection_formats=None,
- _preload_content=True,
- _request_timeout=None,
- _host=None,
- _request_auth=None,
- ):
- config = self.configuration
-
- # header parameters
- header_params = header_params or {}
- header_params.update(self.default_headers)
- if self.cookie:
- header_params["Cookie"] = self.cookie
- if header_params:
- header_params = self.sanitize_for_serialization(header_params)
- header_params = dict(
- self.parameters_to_tuples(header_params, collection_formats)
- )
-
- # path parameters
- if path_params:
- path_params = self.sanitize_for_serialization(path_params)
- path_params = self.parameters_to_tuples(path_params, collection_formats)
- for k, v in path_params:
- # specified safe chars, encode everything
- resource_path = resource_path.replace(
- "{%s}" % k, quote(str(v), safe=config.safe_chars_for_path_param)
- )
-
- # post parameters
- if post_params or files:
- post_params = post_params if post_params else []
- post_params = self.sanitize_for_serialization(post_params)
- post_params = self.parameters_to_tuples(post_params, collection_formats)
- post_params.extend(self.files_parameters(files))
-
- # auth setting
- self.update_params_for_auth(
- header_params,
- query_params,
- auth_settings,
- resource_path,
- method,
- body,
- request_auth=_request_auth,
- )
-
- # body
- if body:
- body = self.sanitize_for_serialization(body)
-
- # request url
- if _host is None:
- url = self.configuration.host + resource_path
- else:
- # use server/host defined in path or operation instead
- url = _host + resource_path
-
- # query parameters
- if query_params:
- query_params = self.sanitize_for_serialization(query_params)
- url_query = self.parameters_to_url_query(query_params, collection_formats)
- url += "?" + url_query
-
- try:
- # perform request and return response
- response_data = await self.request(
- method,
- url,
- query_params=query_params,
- headers=header_params,
- post_params=post_params,
- body=body,
- _preload_content=_preload_content,
- _request_timeout=_request_timeout,
- )
- except ApiException as e:
- if e.body:
- e.body = e.body.decode("utf-8")
- raise e
-
- self.last_response = response_data
-
- return_data = None # assuming derialization is not needed
- # data needs deserialization or returns HTTP data (deserialized) only
- if _preload_content or _return_http_data_only:
- response_type = response_types_map.get(str(response_data.status), None)
-
- if response_type == "bytearray":
- response_data.data = response_data.data
- else:
- match = None
- content_type = response_data.getheader("content-type")
- if content_type is not None:
- match = re.search(r"charset=([a-zA-Z\-\d]+)[\s;]?", content_type)
- encoding = match.group(1) if match else "utf-8"
- response_data.data = response_data.data.decode(encoding)
-
- # deserialize response data
- if response_type == "bytearray":
- return_data = response_data.data
- elif response_type:
- return_data = self.deserialize(response_data, response_type)
- else:
- return_data = None
-
- if _return_http_data_only:
- return return_data
- else:
- return ApiResponse(
- status_code=response_data.status,
- data=return_data,
- headers=response_data.getheaders(),
- raw_data=response_data.data,
- )
-
- def sanitize_for_serialization(self, obj):
- """Builds a JSON POST object.
-
- If obj is None, return None.
- If obj is str, int, long, float, bool, return directly.
- If obj is datetime.datetime, datetime.date
- convert to string in iso8601 format.
- If obj is list, sanitize each element in the list.
- If obj is dict, return the dict.
- If obj is OpenAPI model, return the properties dict.
-
- :param obj: The data to serialize.
- :return: The serialized form of data.
- """
- if obj is None:
- return None
- elif isinstance(obj, self.PRIMITIVE_TYPES):
- return obj
- elif isinstance(obj, list):
- return [self.sanitize_for_serialization(sub_obj) for sub_obj in obj]
- elif isinstance(obj, tuple):
- return tuple(self.sanitize_for_serialization(sub_obj) for sub_obj in obj)
- elif isinstance(obj, (datetime.datetime, datetime.date)):
- return obj.isoformat()
-
- if isinstance(obj, dict):
- obj_dict = obj
- else:
- # Convert model obj to dict except
- # attributes `openapi_types`, `attribute_map`
- # and attributes which value is not None.
- # Convert attribute name to json key in
- # model definition for request.
- obj_dict = obj.to_dict()
-
- return {
- key: self.sanitize_for_serialization(val) for key, val in obj_dict.items()
- }
-
- def deserialize(self, response, response_type):
- """Deserializes response into an object.
-
- :param response: RESTResponse object to be deserialized.
- :param response_type: class literal for
- deserialized object, or string of class name.
-
- :return: deserialized object.
- """
- # handle file downloading
- # save response body into a tmp file and return the instance
- if response_type == "file":
- return self.__deserialize_file(response)
-
- # fetch data from response object
- try:
- data = json.loads(response.data)
- except ValueError:
- data = response.data
-
- return self.__deserialize(data, response_type)
-
- def __deserialize(self, data, klass):
- """Deserializes dict, list, str into an object.
-
- :param data: dict, list or str.
- :param klass: class literal, or string of class name.
-
- :return: object.
- """
- if data is None:
- return None
-
- if type(klass) == str:
- if klass.startswith("List["):
- sub_kls = re.match(r"List\[(.*)]", klass).group(1)
- return [self.__deserialize(sub_data, sub_kls) for sub_data in data]
-
- if klass.startswith("Dict["):
- sub_kls = re.match(r"Dict\[([^,]*), (.*)]", klass).group(2)
- return {k: self.__deserialize(v, sub_kls) for k, v in data.items()}
-
- # convert str to class
- if klass in self.NATIVE_TYPES_MAPPING:
- klass = self.NATIVE_TYPES_MAPPING[klass]
- else:
- klass = getattr(agbenchmark.agent_protocol_client.models, klass)
-
- if klass in self.PRIMITIVE_TYPES:
- return self.__deserialize_primitive(data, klass)
- elif klass == object:
- return self.__deserialize_object(data)
- elif klass == datetime.date:
- return self.__deserialize_date(data)
- elif klass == datetime.datetime:
- return self.__deserialize_datetime(data)
- else:
- return self.__deserialize_model(data, klass)
-
- def call_api(
- self,
- resource_path,
- method,
- path_params=None,
- query_params=None,
- header_params=None,
- body=None,
- post_params=None,
- files=None,
- response_types_map=None,
- auth_settings=None,
- async_req=None,
- _return_http_data_only=None,
- collection_formats=None,
- _preload_content=True,
- _request_timeout=None,
- _host=None,
- _request_auth=None,
- ):
- """Makes the HTTP request (synchronous) and returns deserialized data.
-
- To make an async_req request, set the async_req parameter.
-
- :param resource_path: Path to method endpoint.
- :param method: Method to call.
- :param path_params: Path parameters in the url.
- :param query_params: Query parameters in the url.
- :param header_params: Header parameters to be
- placed in the request header.
- :param body: Request body.
- :param post_params dict: Request post form parameters,
- for `application/x-www-form-urlencoded`, `multipart/form-data`.
- :param auth_settings list: Auth Settings names for the request.
- :param response: Response data type.
- :param files dict: key -> filename, value -> filepath,
- for `multipart/form-data`.
- :param async_req bool: execute request asynchronously
- :param _return_http_data_only: response data instead of ApiResponse
- object with status code, headers, etc
- :param _preload_content: if False, the ApiResponse.data will
- be set to none and raw_data will store the
- HTTP response body without reading/decoding.
- Default is True.
- :param collection_formats: dict of collection formats for path, query,
- header, and post parameters.
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- :param _request_auth: set to override the auth_settings for an a single
- request; this effectively ignores the authentication
- in the spec for a single request.
- :type _request_token: dict, optional
- :return:
- If async_req parameter is True,
- the request will be called asynchronously.
- The method will return the request thread.
- If parameter async_req is False or missing,
- then the method will return the response directly.
- """
- if not async_req:
- return self.__call_api(
- resource_path,
- method,
- path_params,
- query_params,
- header_params,
- body,
- post_params,
- files,
- response_types_map,
- auth_settings,
- _return_http_data_only,
- collection_formats,
- _preload_content,
- _request_timeout,
- _host,
- _request_auth,
- )
-
- return self.pool.apply_async(
- self.__call_api,
- (
- resource_path,
- method,
- path_params,
- query_params,
- header_params,
- body,
- post_params,
- files,
- response_types_map,
- auth_settings,
- _return_http_data_only,
- collection_formats,
- _preload_content,
- _request_timeout,
- _host,
- _request_auth,
- ),
- )
-
- def request(
- self,
- method,
- url,
- query_params=None,
- headers=None,
- post_params=None,
- body=None,
- _preload_content=True,
- _request_timeout=None,
- ):
- """Makes the HTTP request using RESTClient."""
- if method == "GET":
- return self.rest_client.get_request(
- url,
- query_params=query_params,
- _preload_content=_preload_content,
- _request_timeout=_request_timeout,
- headers=headers,
- )
- elif method == "HEAD":
- return self.rest_client.head_request(
- url,
- query_params=query_params,
- _preload_content=_preload_content,
- _request_timeout=_request_timeout,
- headers=headers,
- )
- elif method == "OPTIONS":
- return self.rest_client.options_request(
- url,
- query_params=query_params,
- headers=headers,
- _preload_content=_preload_content,
- _request_timeout=_request_timeout,
- )
- elif method == "POST":
- return self.rest_client.post_request(
- url,
- query_params=query_params,
- headers=headers,
- post_params=post_params,
- _preload_content=_preload_content,
- _request_timeout=_request_timeout,
- body=body,
- )
- elif method == "PUT":
- return self.rest_client.put_request(
- url,
- query_params=query_params,
- headers=headers,
- post_params=post_params,
- _preload_content=_preload_content,
- _request_timeout=_request_timeout,
- body=body,
- )
- elif method == "PATCH":
- return self.rest_client.patch_request(
- url,
- query_params=query_params,
- headers=headers,
- post_params=post_params,
- _preload_content=_preload_content,
- _request_timeout=_request_timeout,
- body=body,
- )
- elif method == "DELETE":
- return self.rest_client.delete_request(
- url,
- query_params=query_params,
- headers=headers,
- _preload_content=_preload_content,
- _request_timeout=_request_timeout,
- body=body,
- )
- else:
- raise ApiValueError(
- "http method must be `GET`, `HEAD`, `OPTIONS`,"
- " `POST`, `PATCH`, `PUT` or `DELETE`."
- )
-
- def parameters_to_tuples(self, params, collection_formats):
- """Get parameters as list of tuples, formatting collections.
-
- :param params: Parameters as dict or list of two-tuples
- :param dict collection_formats: Parameter collection formats
- :return: Parameters as list of tuples, collections formatted
- """
- new_params = []
- if collection_formats is None:
- collection_formats = {}
- for k, v in (
- params.items() if isinstance(params, dict) else params
- ): # noqa: E501
- if k in collection_formats:
- collection_format = collection_formats[k]
- if collection_format == "multi":
- new_params.extend((k, value) for value in v)
- else:
- if collection_format == "ssv":
- delimiter = " "
- elif collection_format == "tsv":
- delimiter = "\t"
- elif collection_format == "pipes":
- delimiter = "|"
- else: # csv is the default
- delimiter = ","
- new_params.append((k, delimiter.join(str(value) for value in v)))
- else:
- new_params.append((k, v))
- return new_params
-
- def parameters_to_url_query(self, params, collection_formats):
- """Get parameters as list of tuples, formatting collections.
-
- :param params: Parameters as dict or list of two-tuples
- :param dict collection_formats: Parameter collection formats
- :return: URL query string (e.g. a=Hello%20World&b=123)
- """
- new_params = []
- if collection_formats is None:
- collection_formats = {}
- for k, v in (
- params.items() if isinstance(params, dict) else params
- ): # noqa: E501
- if isinstance(v, (int, float)):
- v = str(v)
- if isinstance(v, bool):
- v = str(v).lower()
- if isinstance(v, dict):
- v = json.dumps(v)
-
- if k in collection_formats:
- collection_format = collection_formats[k]
- if collection_format == "multi":
- new_params.extend((k, value) for value in v)
- else:
- if collection_format == "ssv":
- delimiter = " "
- elif collection_format == "tsv":
- delimiter = "\t"
- elif collection_format == "pipes":
- delimiter = "|"
- else: # csv is the default
- delimiter = ","
- new_params.append(
- (k, delimiter.join(quote(str(value)) for value in v))
- )
- else:
- new_params.append((k, quote(str(v))))
-
- return "&".join(["=".join(item) for item in new_params])
-
- def files_parameters(self, files=None):
- """Builds form parameters.
-
- :param files: File parameters.
- :return: Form parameters with files.
- """
- params = []
-
- if files:
- for k, v in files.items():
- if not v:
- continue
- file_names = v if type(v) is list else [v]
- for n in file_names:
- with open(n, "rb") as f:
- filename = os.path.basename(f.name)
- filedata = f.read()
- mimetype = (
- mimetypes.guess_type(filename)[0]
- or "application/octet-stream"
- )
- params.append(tuple([k, tuple([filename, filedata, mimetype])]))
-
- return params
-
- def select_header_accept(self, accepts):
- """Returns `Accept` based on an array of accepts provided.
-
- :param accepts: List of headers.
- :return: Accept (e.g. application/json).
- """
- if not accepts:
- return
-
- for accept in accepts:
- if re.search("json", accept, re.IGNORECASE):
- return accept
-
- return accepts[0]
-
- def select_header_content_type(self, content_types):
- """Returns `Content-Type` based on an array of content_types provided.
-
- :param content_types: List of content-types.
- :return: Content-Type (e.g. application/json).
- """
- if not content_types:
- return None
-
- for content_type in content_types:
- if re.search("json", content_type, re.IGNORECASE):
- return content_type
-
- return content_types[0]
-
- def update_params_for_auth(
- self,
- headers,
- queries,
- auth_settings,
- resource_path,
- method,
- body,
- request_auth=None,
- ):
- """Updates header and query params based on authentication setting.
-
- :param headers: Header parameters dict to be updated.
- :param queries: Query parameters tuple list to be updated.
- :param auth_settings: Authentication setting identifiers list.
- :resource_path: A string representation of the HTTP request resource path.
- :method: A string representation of the HTTP request method.
- :body: A object representing the body of the HTTP request.
- The object type is the return value of sanitize_for_serialization().
- :param request_auth: if set, the provided settings will
- override the token in the configuration.
- """
- if not auth_settings:
- return
-
- if request_auth:
- self._apply_auth_params(
- headers, queries, resource_path, method, body, request_auth
- )
- return
-
- for auth in auth_settings:
- auth_setting = self.configuration.auth_settings().get(auth)
- if auth_setting:
- self._apply_auth_params(
- headers, queries, resource_path, method, body, auth_setting
- )
-
- def _apply_auth_params(
- self, headers, queries, resource_path, method, body, auth_setting
- ):
- """Updates the request parameters based on a single auth_setting
-
- :param headers: Header parameters dict to be updated.
- :param queries: Query parameters tuple list to be updated.
- :resource_path: A string representation of the HTTP request resource path.
- :method: A string representation of the HTTP request method.
- :body: A object representing the body of the HTTP request.
- The object type is the return value of sanitize_for_serialization().
- :param auth_setting: auth settings for the endpoint
- """
- if auth_setting["in"] == "cookie":
- headers["Cookie"] = auth_setting["value"]
- elif auth_setting["in"] == "header":
- if auth_setting["type"] != "http-signature":
- headers[auth_setting["key"]] = auth_setting["value"]
- elif auth_setting["in"] == "query":
- queries.append((auth_setting["key"], auth_setting["value"]))
- else:
- raise ApiValueError("Authentication token must be in `query` or `header`")
-
- def __deserialize_file(self, response):
- """Deserializes body to file
-
- Saves response body into a file in a temporary folder,
- using the filename from the `Content-Disposition` header if provided.
-
- :param response: RESTResponse.
- :return: file path.
- """
- fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
- os.close(fd)
- os.remove(path)
-
- content_disposition = response.getheader("Content-Disposition")
- if content_disposition:
- filename = re.search(
- r'filename=[\'"]?([^\'"\s]+)[\'"]?', content_disposition
- ).group(1)
- path = os.path.join(os.path.dirname(path), filename)
-
- with open(path, "wb") as f:
- f.write(response.data)
-
- return path
-
- def __deserialize_primitive(self, data, klass):
- """Deserializes string to primitive type.
-
- :param data: str.
- :param klass: class literal.
-
- :return: int, long, float, str, bool.
- """
- try:
- return klass(data)
- except UnicodeEncodeError:
- return str(data)
- except TypeError:
- return data
-
- def __deserialize_object(self, value):
- """Return an original value.
-
- :return: object.
- """
- return value
-
- def __deserialize_date(self, string):
- """Deserializes string to date.
-
- :param string: str.
- :return: date.
- """
- try:
- return parse(string).date()
- except ImportError:
- return string
- except ValueError:
- raise rest.ApiException(
- status=0, reason="Failed to parse `{0}` as date object".format(string)
- )
-
- def __deserialize_datetime(self, string):
- """Deserializes string to datetime.
-
- The string should be in iso8601 datetime format.
-
- :param string: str.
- :return: datetime.
- """
- try:
- return parse(string)
- except ImportError:
- return string
- except ValueError:
- raise rest.ApiException(
- status=0,
- reason=("Failed to parse `{0}` as datetime object".format(string)),
- )
-
- def __deserialize_model(self, data, klass):
- """Deserializes list or dict to model.
-
- :param data: dict, list.
- :param klass: class literal.
- :return: model object.
- """
-
- return klass.from_dict(data)
diff --git a/benchmark/agbenchmark/agent_protocol_client/api_response.py b/benchmark/agbenchmark/agent_protocol_client/api_response.py
deleted file mode 100644
index d1fdefc1e..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/api_response.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""API response object."""
-
-from __future__ import annotations
-
-from typing import Any, Dict, Optional
-
-from pydantic import Field, StrictInt, StrictStr
-
-
-class ApiResponse:
- """
- API response object
- """
-
- status_code: Optional[StrictInt] = Field(None, description="HTTP status code")
- headers: Optional[Dict[StrictStr, StrictStr]] = Field(
- None, description="HTTP headers"
- )
- data: Optional[Any] = Field(
- None, description="Deserialized data given the data type"
- )
- raw_data: Optional[Any] = Field(None, description="Raw data (HTTP response body)")
-
- def __init__(self, status_code=None, headers=None, data=None, raw_data=None):
- self.status_code = status_code
- self.headers = headers
- self.data = data
- self.raw_data = raw_data
diff --git a/benchmark/agbenchmark/agent_protocol_client/configuration.py b/benchmark/agbenchmark/agent_protocol_client/configuration.py
deleted file mode 100644
index 8bbcec716..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/configuration.py
+++ /dev/null
@@ -1,447 +0,0 @@
-# coding: utf-8
-
-"""
- Agent Communication Protocol
-
- Specification of the API protocol for communication with an agent. # noqa: E501
-
- The version of the OpenAPI document: v0.2
- Generated by OpenAPI Generator (https://openapi-generator.tech)
-
- Do not edit the class manually.
-"""
-
-
-import copy
-import http.client as httplib
-import logging
-import sys
-
-import urllib3
-
-JSON_SCHEMA_VALIDATION_KEYWORDS = {
- "multipleOf",
- "maximum",
- "exclusiveMaximum",
- "minimum",
- "exclusiveMinimum",
- "maxLength",
- "minLength",
- "pattern",
- "maxItems",
- "minItems",
-}
-
-
-class Configuration(object):
- """This class contains various settings of the API client.
-
- :param host: Base url.
- :param api_key: Dict to store API key(s).
- Each entry in the dict specifies an API key.
- The dict key is the name of the security scheme in the OAS specification.
- The dict value is the API key secret.
- :param api_key_prefix: Dict to store API prefix (e.g. Bearer).
- The dict key is the name of the security scheme in the OAS specification.
- The dict value is an API key prefix when generating the auth data.
- :param username: Username for HTTP basic authentication.
- :param password: Password for HTTP basic authentication.
- :param access_token: Access token.
- :param server_index: Index to servers configuration.
- :param server_variables: Mapping with string values to replace variables in
- templated server configuration. The validation of enums is performed for
- variables with defined enum values before.
- :param server_operation_index: Mapping from operation ID to an index to server
- configuration.
- :param server_operation_variables: Mapping from operation ID to a mapping with
- string values to replace variables in templated server configuration.
- The validation of enums is performed for variables with defined enum values before.
- :param ssl_ca_cert: str - the path to a file of concatenated CA certificates
- in PEM format.
-
- """
-
- _default = None
-
- def __init__(
- self,
- host=None,
- api_key=None,
- api_key_prefix=None,
- username=None,
- password=None,
- access_token=None,
- server_index=None,
- server_variables=None,
- server_operation_index=None,
- server_operation_variables=None,
- ssl_ca_cert=None,
- ):
- """Constructor"""
- self._base_path = "http://localhost" if host is None else host
- """Default Base url
- """
- self.server_index = 0 if server_index is None and host is None else server_index
- self.server_operation_index = server_operation_index or {}
- """Default server index
- """
- self.server_variables = server_variables or {}
- self.server_operation_variables = server_operation_variables or {}
- """Default server variables
- """
- self.temp_folder_path = None
- """Temp file folder for downloading files
- """
- # Authentication Settings
- self.api_key = {}
- if api_key:
- self.api_key = api_key
- """dict to store API key(s)
- """
- self.api_key_prefix = {}
- if api_key_prefix:
- self.api_key_prefix = api_key_prefix
- """dict to store API prefix (e.g. Bearer)
- """
- self.refresh_api_key_hook = None
- """function hook to refresh API key if expired
- """
- self.username = username
- """Username for HTTP basic authentication
- """
- self.password = password
- """Password for HTTP basic authentication
- """
- self.access_token = access_token
- """Access token
- """
- self.logger = {}
- """Logging Settings
- """
- self.logger["package_logger"] = logging.getLogger("agent_protocol_client")
- self.logger["urllib3_logger"] = logging.getLogger("urllib3")
- self.logger_format = "%(asctime)s %(levelname)s %(message)s"
- """Log format
- """
- self.logger_stream_handler = None
- """Log stream handler
- """
- self.logger_file_handler = None
- """Log file handler
- """
- self.logger_file = None
- """Debug file location
- """
- self.debug = False
- """Debug switch
- """
-
- self.verify_ssl = True
- """SSL/TLS verification
- Set this to false to skip verifying SSL certificate when calling API
- from https server.
- """
- self.ssl_ca_cert = ssl_ca_cert
- """Set this to customize the certificate file to verify the peer.
- """
- self.cert_file = None
- """client certificate file
- """
- self.key_file = None
- """client key file
- """
- self.assert_hostname = None
- """Set this to True/False to enable/disable SSL hostname verification.
- """
- self.tls_server_name = None
- """SSL/TLS Server Name Indication (SNI)
- Set this to the SNI value expected by the server.
- """
-
- self.connection_pool_maxsize = 100
- """This value is passed to the aiohttp to limit simultaneous connections.
- Default values is 100, None means no-limit.
- """
-
- self.proxy = None
- """Proxy URL
- """
- self.proxy_headers = None
- """Proxy headers
- """
- self.safe_chars_for_path_param = ""
- """Safe chars for path_param
- """
- self.retries = None
- """Adding retries to override urllib3 default value 3
- """
- # Enable client side validation
- self.client_side_validation = True
-
- self.socket_options = None
- """Options to pass down to the underlying urllib3 socket
- """
-
- self.datetime_format = "%Y-%m-%dT%H:%M:%S.%f%z"
- """datetime format
- """
-
- self.date_format = "%Y-%m-%d"
- """date format
- """
-
- def __deepcopy__(self, memo):
- cls = self.__class__
- result = cls.__new__(cls)
- memo[id(self)] = result
- for k, v in self.__dict__.items():
- if k not in ("logger", "logger_file_handler"):
- setattr(result, k, copy.deepcopy(v, memo))
- # shallow copy of loggers
- result.logger = copy.copy(self.logger)
- # use setters to configure loggers
- result.logger_file = self.logger_file
- result.debug = self.debug
- return result
-
- def __setattr__(self, name, value):
- object.__setattr__(self, name, value)
-
- @classmethod
- def set_default(cls, default):
- """Set default instance of configuration.
-
- It stores default configuration, which can be
- returned by get_default_copy method.
-
- :param default: object of Configuration
- """
- cls._default = default
-
- @classmethod
- def get_default_copy(cls):
- """Deprecated. Please use `get_default` instead.
-
- Deprecated. Please use `get_default` instead.
-
- :return: The configuration object.
- """
- return cls.get_default()
-
- @classmethod
- def get_default(cls):
- """Return the default configuration.
-
- This method returns newly created, based on default constructor,
- object of Configuration class or returns a copy of default
- configuration.
-
- :return: The configuration object.
- """
- if cls._default is None:
- cls._default = Configuration()
- return cls._default
-
- @property
- def logger_file(self):
- """The logger file.
-
- If the logger_file is None, then add stream handler and remove file
- handler. Otherwise, add file handler and remove stream handler.
-
- :param value: The logger_file path.
- :type: str
- """
- return self.__logger_file
-
- @logger_file.setter
- def logger_file(self, value):
- """The logger file.
-
- If the logger_file is None, then add stream handler and remove file
- handler. Otherwise, add file handler and remove stream handler.
-
- :param value: The logger_file path.
- :type: str
- """
- self.__logger_file = value
- if self.__logger_file:
- # If set logging file,
- # then add file handler and remove stream handler.
- self.logger_file_handler = logging.FileHandler(self.__logger_file)
- self.logger_file_handler.setFormatter(self.logger_formatter)
- for _, logger in self.logger.items():
- logger.addHandler(self.logger_file_handler)
-
- @property
- def debug(self):
- """Debug status
-
- :param value: The debug status, True or False.
- :type: bool
- """
- return self.__debug
-
- @debug.setter
- def debug(self, value):
- """Debug status
-
- :param value: The debug status, True or False.
- :type: bool
- """
- self.__debug = value
- if self.__debug:
- # if debug status is True, turn on debug logging
- for _, logger in self.logger.items():
- logger.setLevel(logging.DEBUG)
- # turn on httplib debug
- httplib.HTTPConnection.debuglevel = 1
- else:
- # if debug status is False, turn off debug logging,
- # setting log level to default `logging.WARNING`
- for _, logger in self.logger.items():
- logger.setLevel(logging.WARNING)
- # turn off httplib debug
- httplib.HTTPConnection.debuglevel = 0
-
- @property
- def logger_format(self):
- """The logger format.
-
- The logger_formatter will be updated when sets logger_format.
-
- :param value: The format string.
- :type: str
- """
- return self.__logger_format
-
- @logger_format.setter
- def logger_format(self, value):
- """The logger format.
-
- The logger_formatter will be updated when sets logger_format.
-
- :param value: The format string.
- :type: str
- """
- self.__logger_format = value
- self.logger_formatter = logging.Formatter(self.__logger_format)
-
- def get_api_key_with_prefix(self, identifier, alias=None):
- """Gets API key (with prefix if set).
-
- :param identifier: The identifier of apiKey.
- :param alias: The alternative identifier of apiKey.
- :return: The token for api key authentication.
- """
- if self.refresh_api_key_hook is not None:
- self.refresh_api_key_hook(self)
- key = self.api_key.get(
- identifier, self.api_key.get(alias) if alias is not None else None
- )
- if key:
- prefix = self.api_key_prefix.get(identifier)
- if prefix:
- return "%s %s" % (prefix, key)
- else:
- return key
-
- def get_basic_auth_token(self):
- """Gets HTTP basic authentication header (string).
-
- :return: The token for basic HTTP authentication.
- """
- username = ""
- if self.username is not None:
- username = self.username
- password = ""
- if self.password is not None:
- password = self.password
- return urllib3.util.make_headers(basic_auth=username + ":" + password).get(
- "authorization"
- )
-
- def auth_settings(self):
- """Gets Auth Settings dict for api client.
-
- :return: The Auth Settings information dict.
- """
- auth = {}
- return auth
-
- def to_debug_report(self):
- """Gets the essential information for debugging.
-
- :return: The report for debugging.
- """
- return (
- "Python SDK Debug Report:\n"
- "OS: {env}\n"
- "Python Version: {pyversion}\n"
- "Version of the API: v0.2\n"
- "SDK Package Version: 1.0.0".format(env=sys.platform, pyversion=sys.version)
- )
-
- def get_host_settings(self):
- """Gets an array of host settings
-
- :return: An array of host settings
- """
- return [
- {
- "url": "",
- "description": "No description provided",
- }
- ]
-
- def get_host_from_settings(self, index, variables=None, servers=None):
- """Gets host URL based on the index and variables
- :param index: array index of the host settings
- :param variables: hash of variable and the corresponding value
- :param servers: an array of host settings or None
- :return: URL based on host settings
- """
- if index is None:
- return self._base_path
-
- variables = {} if variables is None else variables
- servers = self.get_host_settings() if servers is None else servers
-
- try:
- server = servers[index]
- except IndexError:
- raise ValueError(
- "Invalid index {0} when selecting the host settings. "
- "Must be less than {1}".format(index, len(servers))
- )
-
- url = server["url"]
-
- # go through variables and replace placeholders
- for variable_name, variable in server.get("variables", {}).items():
- used_value = variables.get(variable_name, variable["default_value"])
-
- if "enum_values" in variable and used_value not in variable["enum_values"]:
- raise ValueError(
- "The variable `{0}` in the host URL has invalid value "
- "{1}. Must be {2}.".format(
- variable_name, variables[variable_name], variable["enum_values"]
- )
- )
-
- url = url.replace("{" + variable_name + "}", used_value)
-
- return url
-
- @property
- def host(self):
- """Return generated host."""
- return self.get_host_from_settings(
- self.server_index, variables=self.server_variables
- )
-
- @host.setter
- def host(self, value):
- """Fix base path."""
- self._base_path = value
- self.server_index = None
diff --git a/benchmark/agbenchmark/agent_protocol_client/docs/AgentApi.md b/benchmark/agbenchmark/agent_protocol_client/docs/AgentApi.md
deleted file mode 100644
index 7f0be780c..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/docs/AgentApi.md
+++ /dev/null
@@ -1,615 +0,0 @@
-# agbenchmark.agent_protocol_client.AgentApi
-
-All URIs are relative to _http://localhost_
-
-| Method | HTTP request | Description |
-| ---------------------------------------------------------------------------- | ------------------------------------------------------ | ------------------------------------------------------------- |
-| [**create_agent_task**](AgentApi.md#create_agent_task) | **POST** /agent/tasks | Creates a task for the agent. |
-| [**download_agent_task_artifact**](AgentApi.md#download_agent_task_artifact) | **GET** /agent/tasks/{task_id}/artifacts/{artifact_id} | Download a specified artifact. |
-| [**execute_agent_task_step**](AgentApi.md#execute_agent_task_step) | **POST** /agent/tasks/{task_id}/steps | Execute a step in the specified agent task. |
-| [**get_agent_task**](AgentApi.md#get_agent_task) | **GET** /agent/tasks/{task_id} | Get details about a specified agent task. |
-| [**get_agent_task_step**](AgentApi.md#get_agent_task_step) | **GET** /agent/tasks/{task_id}/steps/{step_id} | Get details about a specified task step. |
-| [**list_agent_task_artifacts**](AgentApi.md#list_agent_task_artifacts) | **GET** /agent/tasks/{task_id}/artifacts | List all artifacts that have been created for the given task. |
-| [**list_agent_task_steps**](AgentApi.md#list_agent_task_steps) | **GET** /agent/tasks/{task_id}/steps | List all steps for the specified task. |
-| [**list_agent_tasks_ids**](AgentApi.md#list_agent_tasks_ids) | **GET** /agent/tasks | List all tasks that have been created for the agent. |
-| [**upload_agent_task_artifacts**](AgentApi.md#upload_agent_task_artifacts) | **POST** /agent/tasks/{task_id}/artifacts | Upload an artifact for the specified task. |
-
-# **create_agent_task**
-
-> Task create_agent_task(task_request_body=task_request_body)
-
-Creates a task for the agent.
-
-### Example
-
-```python
-import time
-import os
-import agent_protocol_client
-from agbenchmark.agent_protocol_client.models.task import Task
-from agbenchmark.agent_protocol_client.models.task_request_body import TaskRequestBody
-from agbenchmark.agent_protocol_client.rest import ApiException
-from pprint import pprint
-
-# Defining the host is optional and defaults to http://localhost
-# See configuration.py for a list of all supported configuration parameters.
-configuration = agbenchmark.agent_protocol_client.Configuration(
- host = "http://localhost"
-)
-
-
-# Enter a context with an instance of the API client
-async with agbenchmark.agent_protocol_client.ApiClient(configuration) as api_client:
- # Create an instance of the API class
- api_instance = agbenchmark.agent_protocol_client.AgentApi(api_client)
- task_request_body = agbenchmark.agent_protocol_client.TaskRequestBody() # TaskRequestBody | (optional)
-
- try:
- # Creates a task for the agent.
- api_response = await api_instance.create_agent_task(task_request_body=task_request_body)
- print("The response of AgentApi->create_agent_task:\n")
- pprint(api_response)
- except Exception as e:
- print("Exception when calling AgentApi->create_agent_task: %s\n" % e)
-```
-
-### Parameters
-
-| Name | Type | Description | Notes |
-| --------------------- | ----------------------------------------- | ----------- | ---------- |
-| **task_request_body** | [**TaskRequestBody**](TaskRequestBody.md) | | [optional] |
-
-### Return type
-
-[**Task**](Task.md)
-
-### Authorization
-
-No authorization required
-
-### HTTP request headers
-
-- **Content-Type**: application/json
-- **Accept**: application/json
-
-### HTTP response details
-
-| Status code | Description | Response headers |
-| ----------- | ------------------------------------------ | ---------------- |
-| **200** | A new agent task was successfully created. | - |
-| **0** | Internal Server Error | - |
-
-[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
-
-# **download_agent_task_artifact**
-
-> bytearray download_agent_task_artifact(task_id, artifact_id)
-
-Download a specified artifact.
-
-### Example
-
-```python
-import time
-import os
-import agent_protocol_client
-from agbenchmark.agent_protocol_client.rest import ApiException
-from pprint import pprint
-
-# Defining the host is optional and defaults to http://localhost
-# See configuration.py for a list of all supported configuration parameters.
-configuration = agbenchmark.agent_protocol_client.Configuration(
- host = "http://localhost"
-)
-
-
-# Enter a context with an instance of the API client
-async with agbenchmark.agent_protocol_client.ApiClient(configuration) as api_client:
- # Create an instance of the API class
- api_instance = agbenchmark.agent_protocol_client.AgentApi(api_client)
- task_id = 'task_id_example' # str | ID of the task
- artifact_id = 'artifact_id_example' # str | ID of the artifact
-
- try:
- # Download a specified artifact.
- api_response = await api_instance.download_agent_task_artifact(task_id, artifact_id)
- print("The response of AgentApi->download_agent_task_artifact:\n")
- pprint(api_response)
- except Exception as e:
- print("Exception when calling AgentApi->download_agent_task_artifact: %s\n" % e)
-```
-
-### Parameters
-
-| Name | Type | Description | Notes |
-| --------------- | ------- | ------------------ | ----- |
-| **task_id** | **str** | ID of the task |
-| **artifact_id** | **str** | ID of the artifact |
-
-### Return type
-
-**bytearray**
-
-### Authorization
-
-No authorization required
-
-### HTTP request headers
-
-- **Content-Type**: Not defined
-- **Accept**: application/octet-stream
-
-### HTTP response details
-
-| Status code | Description | Response headers |
-| ----------- | ------------------------------------- | ---------------- |
-| **200** | Returned the content of the artifact. | - |
-| **0** | Internal Server Error | - |
-
-[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
-
-# **execute_agent_task_step**
-
-> Step execute_agent_task_step(task_id, step_request_body=step_request_body)
-
-Execute a step in the specified agent task.
-
-### Example
-
-```python
-import time
-import os
-import agent_protocol_client
-from agbenchmark.agent_protocol_client.models.step import Step
-from agbenchmark.agent_protocol_client.models.step_request_body import StepRequestBody
-from agbenchmark.agent_protocol_client.rest import ApiException
-from pprint import pprint
-
-# Defining the host is optional and defaults to http://localhost
-# See configuration.py for a list of all supported configuration parameters.
-configuration = agbenchmark.agent_protocol_client.Configuration(
- host = "http://localhost"
-)
-
-
-# Enter a context with an instance of the API client
-async with agbenchmark.agent_protocol_client.ApiClient(configuration) as api_client:
- # Create an instance of the API class
- api_instance = agbenchmark.agent_protocol_client.AgentApi(api_client)
- task_id = 'task_id_example' # str | ID of the task
- step_request_body = agbenchmark.agent_protocol_client.StepRequestBody() # StepRequestBody | (optional)
-
- try:
- # Execute a step in the specified agent task.
- api_response = await api_instance.execute_agent_task_step(task_id, step_request_body=step_request_body)
- print("The response of AgentApi->execute_agent_task_step:\n")
- pprint(api_response)
- except Exception as e:
- print("Exception when calling AgentApi->execute_agent_task_step: %s\n" % e)
-```
-
-### Parameters
-
-| Name | Type | Description | Notes |
-| --------------------- | ----------------------------------------- | -------------- | ---------- |
-| **task_id** | **str** | ID of the task |
-| **step_request_body** | [**StepRequestBody**](StepRequestBody.md) | | [optional] |
-
-### Return type
-
-[**Step**](Step.md)
-
-### Authorization
-
-No authorization required
-
-### HTTP request headers
-
-- **Content-Type**: application/json
-- **Accept**: application/json
-
-### HTTP response details
-
-| Status code | Description | Response headers |
-| ----------- | --------------------------------- | ---------------- |
-| **200** | Executed step for the agent task. | - |
-| **0** | Internal Server Error | - |
-
-[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
-
-# **get_agent_task**
-
-> Task get_agent_task(task_id)
-
-Get details about a specified agent task.
-
-### Example
-
-```python
-import time
-import os
-import agent_protocol_client
-from agbenchmark.agent_protocol_client.models.task import Task
-from agbenchmark.agent_protocol_client.rest import ApiException
-from pprint import pprint
-
-# Defining the host is optional and defaults to http://localhost
-# See configuration.py for a list of all supported configuration parameters.
-configuration = agbenchmark.agent_protocol_client.Configuration(
- host = "http://localhost"
-)
-
-
-# Enter a context with an instance of the API client
-async with agbenchmark.agent_protocol_client.ApiClient(configuration) as api_client:
- # Create an instance of the API class
- api_instance = agbenchmark.agent_protocol_client.AgentApi(api_client)
- task_id = 'task_id_example' # str | ID of the task
-
- try:
- # Get details about a specified agent task.
- api_response = await api_instance.get_agent_task(task_id)
- print("The response of AgentApi->get_agent_task:\n")
- pprint(api_response)
- except Exception as e:
- print("Exception when calling AgentApi->get_agent_task: %s\n" % e)
-```
-
-### Parameters
-
-| Name | Type | Description | Notes |
-| ----------- | ------- | -------------- | ----- |
-| **task_id** | **str** | ID of the task |
-
-### Return type
-
-[**Task**](Task.md)
-
-### Authorization
-
-No authorization required
-
-### HTTP request headers
-
-- **Content-Type**: Not defined
-- **Accept**: application/json
-
-### HTTP response details
-
-| Status code | Description | Response headers |
-| ----------- | ------------------------------------- | ---------------- |
-| **200** | Returned details about an agent task. | - |
-| **0** | Internal Server Error | - |
-
-[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
-
-# **get_agent_task_step**
-
-> Step get_agent_task_step(task_id, step_id)
-
-Get details about a specified task step.
-
-### Example
-
-```python
-import time
-import os
-import agent_protocol_client
-from agbenchmark.agent_protocol_client.models.step import Step
-from agbenchmark.agent_protocol_client.rest import ApiException
-from pprint import pprint
-
-# Defining the host is optional and defaults to http://localhost
-# See configuration.py for a list of all supported configuration parameters.
-configuration = agbenchmark.agent_protocol_client.Configuration(
- host = "http://localhost"
-)
-
-
-# Enter a context with an instance of the API client
-async with agbenchmark.agent_protocol_client.ApiClient(configuration) as api_client:
- # Create an instance of the API class
- api_instance = agbenchmark.agent_protocol_client.AgentApi(api_client)
- task_id = 'task_id_example' # str | ID of the task
- step_id = 'step_id_example' # str | ID of the step
-
- try:
- # Get details about a specified task step.
- api_response = await api_instance.get_agent_task_step(task_id, step_id)
- print("The response of AgentApi->get_agent_task_step:\n")
- pprint(api_response)
- except Exception as e:
- print("Exception when calling AgentApi->get_agent_task_step: %s\n" % e)
-```
-
-### Parameters
-
-| Name | Type | Description | Notes |
-| ----------- | ------- | -------------- | ----- |
-| **task_id** | **str** | ID of the task |
-| **step_id** | **str** | ID of the step |
-
-### Return type
-
-[**Step**](Step.md)
-
-### Authorization
-
-No authorization required
-
-### HTTP request headers
-
-- **Content-Type**: Not defined
-- **Accept**: application/json
-
-### HTTP response details
-
-| Status code | Description | Response headers |
-| ----------- | ------------------------------------------ | ---------------- |
-| **200** | Returned details about an agent task step. | - |
-| **0** | Internal Server Error | - |
-
-[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
-
-# **list_agent_task_artifacts**
-
-> List[Artifact] list_agent_task_artifacts(task_id)
-
-List all artifacts that have been created for the given task.
-
-### Example
-
-```python
-import time
-import os
-import agent_protocol_client
-from agbenchmark.agent_protocol_client.models.artifact import Artifact
-from agbenchmark.agent_protocol_client.rest import ApiException
-from pprint import pprint
-
-# Defining the host is optional and defaults to http://localhost
-# See configuration.py for a list of all supported configuration parameters.
-configuration = agbenchmark.agent_protocol_client.Configuration(
- host = "http://localhost"
-)
-
-
-# Enter a context with an instance of the API client
-async with agbenchmark.agent_protocol_client.ApiClient(configuration) as api_client:
- # Create an instance of the API class
- api_instance = agbenchmark.agent_protocol_client.AgentApi(api_client)
- task_id = 'task_id_example' # str | ID of the task
-
- try:
- # List all artifacts that have been created for the given task.
- api_response = await api_instance.list_agent_task_artifacts(task_id)
- print("The response of AgentApi->list_agent_task_artifacts:\n")
- pprint(api_response)
- except Exception as e:
- print("Exception when calling AgentApi->list_agent_task_artifacts: %s\n" % e)
-```
-
-### Parameters
-
-| Name | Type | Description | Notes |
-| ----------- | ------- | -------------- | ----- |
-| **task_id** | **str** | ID of the task |
-
-### Return type
-
-[**List[Artifact]**](Artifact.md)
-
-### Authorization
-
-No authorization required
-
-### HTTP request headers
-
-- **Content-Type**: Not defined
-- **Accept**: application/json
-
-### HTTP response details
-
-| Status code | Description | Response headers |
-| ----------- | ------------------------------------- | ---------------- |
-| **200** | Returned the content of the artifact. | - |
-| **0** | Internal Server Error | - |
-
-[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
-
-# **list_agent_task_steps**
-
-> List[str] list_agent_task_steps(task_id)
-
-List all steps for the specified task.
-
-### Example
-
-```python
-import time
-import os
-import agent_protocol_client
-from agbenchmark.agent_protocol_client.rest import ApiException
-from pprint import pprint
-
-# Defining the host is optional and defaults to http://localhost
-# See configuration.py for a list of all supported configuration parameters.
-configuration = agbenchmark.agent_protocol_client.Configuration(
- host = "http://localhost"
-)
-
-
-# Enter a context with an instance of the API client
-async with agbenchmark.agent_protocol_client.ApiClient(configuration) as api_client:
- # Create an instance of the API class
- api_instance = agbenchmark.agent_protocol_client.AgentApi(api_client)
- task_id = 'task_id_example' # str | ID of the task
-
- try:
- # List all steps for the specified task.
- api_response = await api_instance.list_agent_task_steps(task_id)
- print("The response of AgentApi->list_agent_task_steps:\n")
- pprint(api_response)
- except Exception as e:
- print("Exception when calling AgentApi->list_agent_task_steps: %s\n" % e)
-```
-
-### Parameters
-
-| Name | Type | Description | Notes |
-| ----------- | ------- | -------------- | ----- |
-| **task_id** | **str** | ID of the task |
-
-### Return type
-
-**List[str]**
-
-### Authorization
-
-No authorization required
-
-### HTTP request headers
-
-- **Content-Type**: Not defined
-- **Accept**: application/json
-
-### HTTP response details
-
-| Status code | Description | Response headers |
-| ----------- | ------------------------------------------------------------- | ---------------- |
-| **200** | Returned list of agent&#39;s step IDs for the specified task. | - |
-| **0** | Internal Server Error | - |
-
-[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
-
-# **list_agent_tasks_ids**
-
-> List[str] list_agent_tasks_ids()
-
-List all tasks that have been created for the agent.
-
-### Example
-
-```python
-import time
-import os
-import agent_protocol_client
-from agbenchmark.agent_protocol_client.rest import ApiException
-from pprint import pprint
-
-# Defining the host is optional and defaults to http://localhost
-# See configuration.py for a list of all supported configuration parameters.
-configuration = agbenchmark.agent_protocol_client.Configuration(
- host = "http://localhost"
-)
-
-
-# Enter a context with an instance of the API client
-async with agbenchmark.agent_protocol_client.ApiClient(configuration) as api_client:
- # Create an instance of the API class
- api_instance = agbenchmark.agent_protocol_client.AgentApi(api_client)
-
- try:
- # List all tasks that have been created for the agent.
- api_response = await api_instance.list_agent_tasks_ids()
- print("The response of AgentApi->list_agent_tasks_ids:\n")
- pprint(api_response)
- except Exception as e:
- print("Exception when calling AgentApi->list_agent_tasks_ids: %s\n" % e)
-```
-
-### Parameters
-
-This endpoint does not need any parameter.
-
-### Return type
-
-**List[str]**
-
-### Authorization
-
-No authorization required
-
-### HTTP request headers
-
-- **Content-Type**: Not defined
-- **Accept**: application/json
-
-### HTTP response details
-
-| Status code | Description | Response headers |
-| ----------- | -------------------------------------- | ---------------- |
-| **200** | Returned list of agent&#39;s task IDs. | - |
-| **0** | Internal Server Error | - |
-
-[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
-
-# **upload_agent_task_artifacts**
-
-> Artifact upload_agent_task_artifacts(task_id, file, relative_path=relative_path)
-
-Upload an artifact for the specified task.
-
-### Example
-
-```python
-import time
-import os
-import agent_protocol_client
-from agbenchmark.agent_protocol_client.models.artifact import Artifact
-from agbenchmark.agent_protocol_client.rest import ApiException
-from pprint import pprint
-
-# Defining the host is optional and defaults to http://localhost
-# See configuration.py for a list of all supported configuration parameters.
-configuration = agbenchmark.agent_protocol_client.Configuration(
- host = "http://localhost"
-)
-
-
-# Enter a context with an instance of the API client
-async with agbenchmark.agent_protocol_client.ApiClient(configuration) as api_client:
- # Create an instance of the API class
- api_instance = agbenchmark.agent_protocol_client.AgentApi(api_client)
- task_id = 'task_id_example' # str | ID of the task
- file = None # bytearray | File to upload.
- relative_path = 'relative_path_example' # str | Relative path of the artifact in the agent's workspace. (optional)
-
- try:
- # Upload an artifact for the specified task.
- api_response = await api_instance.upload_agent_task_artifacts(task_id, file, relative_path=relative_path)
- print("The response of AgentApi->upload_agent_task_artifacts:\n")
- pprint(api_response)
- except Exception as e:
- print("Exception when calling AgentApi->upload_agent_task_artifacts: %s\n" % e)
-```
-
-### Parameters
-
-| Name | Type | Description | Notes |
-| ----------------- | ------------- | ----------------------------------------------------------- | ---------- |
-| **task_id** | **str** | ID of the task |
-| **file** | **bytearray** | File to upload. |
-| **relative_path** | **str** | Relative path of the artifact in the agent&#39;s workspace. | [optional] |
-
-### Return type
-
-[**Artifact**](Artifact.md)
-
-### Authorization
-
-No authorization required
-
-### HTTP request headers
-
-- **Content-Type**: multipart/form-data
-- **Accept**: application/json
-
-### HTTP response details
-
-| Status code | Description | Response headers |
-| ----------- | ------------------------------------- | ---------------- |
-| **200** | Returned the content of the artifact. | - |
-| **0** | Internal Server Error | - |
-
-[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
diff --git a/benchmark/agbenchmark/agent_protocol_client/exceptions.py b/benchmark/agbenchmark/agent_protocol_client/exceptions.py
deleted file mode 100644
index bd26eac3c..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/exceptions.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# coding: utf-8
-
-"""
- Agent Communication Protocol
-
- Specification of the API protocol for communication with an agent. # noqa: E501
-
- The version of the OpenAPI document: v0.2
- Generated by OpenAPI Generator (https://openapi-generator.tech)
-
- Do not edit the class manually.
-"""
-
-
-class OpenApiException(Exception):
- """The base exception class for all OpenAPIExceptions"""
-
-
-class ApiTypeError(OpenApiException, TypeError):
- def __init__(self, msg, path_to_item=None, valid_classes=None, key_type=None):
- """Raises an exception for TypeErrors
-
- Args:
- msg (str): the exception message
-
- Keyword Args:
- path_to_item (list): a list of keys an indices to get to the
- current_item
- None if unset
- valid_classes (tuple): the primitive classes that current item
- should be an instance of
- None if unset
- key_type (bool): False if our value is a value in a dict
- True if it is a key in a dict
- False if our item is an item in a list
- None if unset
- """
- self.path_to_item = path_to_item
- self.valid_classes = valid_classes
- self.key_type = key_type
- full_msg = msg
- if path_to_item:
- full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
- super(ApiTypeError, self).__init__(full_msg)
-
-
-class ApiValueError(OpenApiException, ValueError):
- def __init__(self, msg, path_to_item=None):
- """
- Args:
- msg (str): the exception message
-
- Keyword Args:
- path_to_item (list) the path to the exception in the
- received_data dict. None if unset
- """
-
- self.path_to_item = path_to_item
- full_msg = msg
- if path_to_item:
- full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
- super(ApiValueError, self).__init__(full_msg)
-
-
-class ApiAttributeError(OpenApiException, AttributeError):
- def __init__(self, msg, path_to_item=None):
- """
- Raised when an attribute reference or assignment fails.
-
- Args:
- msg (str): the exception message
-
- Keyword Args:
- path_to_item (None/list) the path to the exception in the
- received_data dict
- """
- self.path_to_item = path_to_item
- full_msg = msg
- if path_to_item:
- full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
- super(ApiAttributeError, self).__init__(full_msg)
-
-
-class ApiKeyError(OpenApiException, KeyError):
- def __init__(self, msg, path_to_item=None):
- """
- Args:
- msg (str): the exception message
-
- Keyword Args:
- path_to_item (None/list) the path to the exception in the
- received_data dict
- """
- self.path_to_item = path_to_item
- full_msg = msg
- if path_to_item:
- full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
- super(ApiKeyError, self).__init__(full_msg)
-
-
-class ApiException(OpenApiException):
- def __init__(self, status=None, reason=None, http_resp=None):
- if http_resp:
- self.status = http_resp.status
- self.reason = http_resp.reason
- self.body = http_resp.data
- self.headers = http_resp.getheaders()
- else:
- self.status = status
- self.reason = reason
- self.body = None
- self.headers = None
-
- def __str__(self):
- """Custom error messages for exception"""
- error_message = "({0})\n" "Reason: {1}\n".format(self.status, self.reason)
- if self.headers:
- error_message += "HTTP response headers: {0}\n".format(self.headers)
-
- if self.body:
- error_message += "HTTP response body: {0}\n".format(self.body)
-
- return error_message
-
-
-class NotFoundException(ApiException):
- def __init__(self, status=None, reason=None, http_resp=None):
- super(NotFoundException, self).__init__(status, reason, http_resp)
-
-
-class UnauthorizedException(ApiException):
- def __init__(self, status=None, reason=None, http_resp=None):
- super(UnauthorizedException, self).__init__(status, reason, http_resp)
-
-
-class ForbiddenException(ApiException):
- def __init__(self, status=None, reason=None, http_resp=None):
- super(ForbiddenException, self).__init__(status, reason, http_resp)
-
-
-class ServiceException(ApiException):
- def __init__(self, status=None, reason=None, http_resp=None):
- super(ServiceException, self).__init__(status, reason, http_resp)
-
-
-def render_path(path_to_item):
- """Returns a string representation of a path"""
- result = ""
- for pth in path_to_item:
- if isinstance(pth, int):
- result += "[{0}]".format(pth)
- else:
- result += "['{0}']".format(pth)
- return result
diff --git a/benchmark/agbenchmark/agent_protocol_client/models/__init__.py b/benchmark/agbenchmark/agent_protocol_client/models/__init__.py
deleted file mode 100644
index c7bd82dcd..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/models/__init__.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# coding: utf-8
-
-# flake8: noqa
-"""
- Agent Communication Protocol
-
- Specification of the API protocol for communication with an agent. # noqa: E501
-
- The version of the OpenAPI document: v0.2
- Generated by OpenAPI Generator (https://openapi-generator.tech)
-
- Do not edit the class manually.
-"""
-
-
-# import models into model package
-from agbenchmark.agent_protocol_client.models.artifact import Artifact
-from agbenchmark.agent_protocol_client.models.artifacts import Artifacts
-from agbenchmark.agent_protocol_client.models.pagination import Pagination
-from agbenchmark.agent_protocol_client.models.step import Step
-from agbenchmark.agent_protocol_client.models.step_all_of import StepAllOf
-from agbenchmark.agent_protocol_client.models.step_request_body import StepRequestBody
-from agbenchmark.agent_protocol_client.models.task import Task
-from agbenchmark.agent_protocol_client.models.task_all_of import TaskAllOf
-from agbenchmark.agent_protocol_client.models.task_request_body import TaskRequestBody
diff --git a/benchmark/agbenchmark/agent_protocol_client/models/artifact.py b/benchmark/agbenchmark/agent_protocol_client/models/artifact.py
deleted file mode 100644
index d2e7c101b..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/models/artifact.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# coding: utf-8
-
-
-from __future__ import annotations
-
-import json
-import pprint
-import re # noqa: F401
-from typing import Optional
-
-from pydantic import BaseModel, Field, StrictStr
-
-
-class Artifact(BaseModel):
- """
- Artifact that the task has produced.
- """
-
- artifact_id: StrictStr = Field(..., description="ID of the artifact.")
- file_name: StrictStr = Field(..., description="Filename of the artifact.")
- relative_path: Optional[StrictStr] = Field(
- None, description="Relative path of the artifact in the agent's workspace."
- )
- __properties = ["artifact_id", "file_name", "relative_path"]
- created_at: StrictStr = Field(..., description="Creation date of the artifact.")
- # modified_at: StrictStr = Field(..., description="Modification date of the artifact.")
- agent_created: bool = Field(..., description="True if created by the agent")
-
- class Config:
- """Pydantic configuration"""
-
- allow_population_by_field_name = True
- validate_assignment = True
-
- def to_str(self) -> str:
- """Returns the string representation of the model using alias"""
- return pprint.pformat(self.dict(by_alias=True))
-
- def to_json(self) -> str:
- """Returns the JSON representation of the model using alias"""
- return json.dumps(self.to_dict())
-
- @classmethod
- def from_json(cls, json_str: str) -> Artifact:
- """Create an instance of Artifact from a JSON string"""
- return cls.from_dict(json.loads(json_str))
-
- def to_dict(self):
- """Returns the dictionary representation of the model using alias"""
- _dict = self.dict(by_alias=True, exclude={}, exclude_none=True)
- return _dict
-
- @classmethod
- def from_dict(cls, obj: dict) -> Artifact:
- """Create an instance of Artifact from a dict"""
- if obj is None:
- return None
-
- if not isinstance(obj, dict):
- return Artifact.parse_obj(obj)
-
- _obj = Artifact.parse_obj(
- {
- "artifact_id": obj.get("artifact_id"),
- "file_name": obj.get("file_name"),
- "relative_path": obj.get("relative_path"),
- "created_at": obj.get("created_at"),
- "modified_at": obj.get("modified_at"),
- "agent_created": obj.get("agent_created"),
- }
- )
- return _obj
diff --git a/benchmark/agbenchmark/agent_protocol_client/models/artifacts.py b/benchmark/agbenchmark/agent_protocol_client/models/artifacts.py
deleted file mode 100644
index 1d61a0558..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/models/artifacts.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# coding: utf-8
-
-"""
- Agent Communication Protocol
-
- Specification of the API protocol for communication with an agent. # noqa: E501
-
- The version of the OpenAPI document: v0.2
- Generated by OpenAPI Generator (https://openapi-generator.tech)
-
- Do not edit the class manually.
-"""
-
-
-from __future__ import annotations
-
-import json
-import pprint
-import re # noqa: F401
-
-from pydantic import BaseModel
-
-from agbenchmark.agent_protocol_client.models.artifact import Artifact
-from agbenchmark.agent_protocol_client.models.pagination import Pagination
-
-
-class Artifacts(BaseModel):
- """
- Artifacts that the task has produced.
- """
-
- artifacts: list[Artifact]
- pagination: Pagination
-
- class Config:
- """Pydantic configuration"""
-
- allow_population_by_field_name = True
- validate_assignment = True
-
- def to_str(self) -> str:
- """Returns the string representation of the model using alias"""
- return pprint.pformat(self.dict(by_alias=True))
-
- def to_json(self) -> str:
- """Returns the JSON representation of the model using alias"""
- return json.dumps(self.to_dict())
-
- @classmethod
- def from_json(cls, json_str: str) -> Artifacts:
- """Create an instance of Artifacts from a JSON string"""
- return cls.from_dict(json.loads(json_str))
-
- def to_dict(self):
- """Returns the dictionary representation of the model using alias"""
- _dict = self.dict(by_alias=True, exclude={}, exclude_none=True)
- return _dict
-
- @classmethod
- def from_dict(cls, obj: dict) -> Artifacts:
- """Create an instance of Artifacts from a dict"""
- if obj is None:
- return None
-
- if not isinstance(obj, dict):
- return Artifacts.parse_obj(obj)
-
- _obj = Artifacts.parse_obj(
- {
- "artifacts": obj.get("artifacts"),
- "pagination": obj.get("pagination"),
- }
- )
- return _obj
-
-
-Artifacts.update_forward_refs()
diff --git a/benchmark/agbenchmark/agent_protocol_client/models/pagination.py b/benchmark/agbenchmark/agent_protocol_client/models/pagination.py
deleted file mode 100644
index 433de1f28..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/models/pagination.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# coding: utf-8
-
-"""
- Agent Communication Protocol
-
- Specification of the API protocol for communication with an agent. # noqa: E501
-
- The version of the OpenAPI document: v0.2
- Generated by OpenAPI Generator (https://openapi-generator.tech)
-
- Do not edit the class manually.
-"""
-
-
-from __future__ import annotations
-
-import json
-import pprint
-import re # noqa: F401
-
-from pydantic import BaseModel
-
-
-class Pagination(BaseModel):
- """
- Pagination that the task has produced.
- """
-
- total_items: int
- total_pages: int
- current_page: int
- page_size: int
-
- class Config:
- """Pydantic configuration"""
-
- allow_population_by_field_name = True
- validate_assignment = True
-
- def to_str(self) -> str:
- """Returns the string representation of the model using alias"""
- return pprint.pformat(self.dict(by_alias=True))
-
- def to_json(self) -> str:
- """Returns the JSON representation of the model using alias"""
- return json.dumps(self.to_dict())
-
- @classmethod
- def from_json(cls, json_str: str) -> Pagination:
- """Create an instance of Pagination from a JSON string"""
- return cls.from_dict(json.loads(json_str))
-
- def to_dict(self):
- """Returns the dictionary representation of the model using alias"""
- _dict = self.dict(by_alias=True, exclude={}, exclude_none=True)
- return _dict
-
- @classmethod
- def from_dict(cls, obj: dict) -> Pagination:
- """Create an instance of Pagination from a dict"""
- if obj is None:
- return None
-
- if not isinstance(obj, dict):
- return Pagination.parse_obj(obj)
-
- _obj = Pagination.parse_obj(
- {
- "total_items": obj.get("total_items"),
- "total_pages": obj.get("total_pages"),
- "current_page": obj.get("current_page"),
- "page_size": obj.get("page_size"),
- }
- )
- return _obj
diff --git a/benchmark/agbenchmark/agent_protocol_client/models/step.py b/benchmark/agbenchmark/agent_protocol_client/models/step.py
deleted file mode 100644
index befef1abc..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/models/step.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# coding: utf-8
-
-"""
- Agent Communication Protocol
-
- Specification of the API protocol for communication with an agent. # noqa: E501
-
- The version of the OpenAPI document: v0.2
- Generated by OpenAPI Generator (https://openapi-generator.tech)
-
- Do not edit the class manually.
-"""
-
-
-from __future__ import annotations
-
-import json
-import pprint
-import re # noqa: F401
-from typing import Any, Optional
-
-from pydantic import BaseModel, Field, StrictBool, StrictStr, conlist, validator
-
-from agbenchmark.agent_protocol_client.models.artifact import Artifact
-
-
-class Step(BaseModel):
- """
- Step
- """
-
- input: Optional[StrictStr] = Field(None, description="Input prompt for the step.")
- additional_input: Optional[Any] = Field(
- None, description="Input parameters for the task step. Any value is allowed."
- )
- task_id: StrictStr = Field(
- ..., description="The ID of the task this step belongs to."
- )
- step_id: StrictStr = Field(..., description="The ID of the task step.")
- name: Optional[StrictStr] = Field(None, description="The name of the task step.")
- status: StrictStr = Field(..., description="The status of the task step.")
- output: Optional[StrictStr] = Field(None, description="Output of the task step.")
- additional_output: Optional[Any] = Field(
- None,
- description="Output that the task step has produced. Any value is allowed.",
- )
- artifacts: conlist(Artifact) = Field(
- ..., description="A list of artifacts that the step has produced."
- )
- is_last: Optional[StrictBool] = Field(
- False, description="Whether this is the last step in the task."
- )
- __properties = [
- "input",
- "additional_input",
- "task_id",
- "step_id",
- "name",
- "status",
- "output",
- "additional_output",
- "artifacts",
- "is_last",
- ]
-
- @validator("status")
- def status_validate_enum(cls, value):
- """Validates the enum"""
- if value not in ("created", "completed"):
- raise ValueError("must be one of enum values ('created', 'completed')")
- return value
-
- class Config:
- """Pydantic configuration"""
-
- allow_population_by_field_name = True
- validate_assignment = True
-
- def to_str(self) -> str:
- """Returns the string representation of the model using alias"""
- return pprint.pformat(self.dict(by_alias=True))
-
- def to_json(self) -> str:
- """Returns the JSON representation of the model using alias"""
- return json.dumps(self.to_dict())
-
- @classmethod
- def from_json(cls, json_str: str) -> Step:
- """Create an instance of Step from a JSON string"""
- return cls.from_dict(json.loads(json_str))
-
- def to_dict(self):
- """Returns the dictionary representation of the model using alias"""
- _dict = self.dict(by_alias=True, exclude={}, exclude_none=True)
- # override the default output from pydantic by calling `to_dict()` of each item in artifacts (list)
- _items = []
- if self.artifacts:
- for _item in self.artifacts:
- if _item:
- _items.append(_item.to_dict())
- _dict["artifacts"] = _items
- # set to None if additional_input (nullable) is None
- # and __fields_set__ contains the field
- if self.additional_input is None and "additional_input" in self.__fields_set__:
- _dict["additional_input"] = None
-
- # set to None if additional_output (nullable) is None
- # and __fields_set__ contains the field
- if (
- self.additional_output is None
- and "additional_output" in self.__fields_set__
- ):
- _dict["additional_output"] = None
-
- return _dict
-
- @classmethod
- def from_dict(cls, obj: dict) -> Step:
- """Create an instance of Step from a dict"""
- if obj is None:
- return None
-
- if not isinstance(obj, dict):
- return Step.parse_obj(obj)
-
- _obj = Step.parse_obj(
- {
- "input": obj.get("input"),
- "additional_input": obj.get("additional_input"),
- "task_id": obj.get("task_id"),
- "step_id": obj.get("step_id"),
- "name": obj.get("name"),
- "status": obj.get("status"),
- "output": obj.get("output"),
- "additional_output": obj.get("additional_output"),
- "artifacts": [
- Artifact.from_dict(_item) for _item in obj.get("artifacts")
- ]
- if obj.get("artifacts") is not None
- else None,
- "is_last": obj.get("is_last")
- if obj.get("is_last") is not None
- else False,
- }
- )
- return _obj
diff --git a/benchmark/agbenchmark/agent_protocol_client/models/step_all_of.py b/benchmark/agbenchmark/agent_protocol_client/models/step_all_of.py
deleted file mode 100644
index 8770f3b31..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/models/step_all_of.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# coding: utf-8
-
-"""
- Agent Communication Protocol
-
- Specification of the API protocol for communication with an agent. # noqa: E501
-
- The version of the OpenAPI document: v0.2
- Generated by OpenAPI Generator (https://openapi-generator.tech)
-
- Do not edit the class manually.
-"""
-
-
-from __future__ import annotations
-
-import json
-import pprint
-import re # noqa: F401
-from typing import Any, Optional
-
-from pydantic import BaseModel, Field, StrictBool, StrictStr, conlist, validator
-
-from agbenchmark.agent_protocol_client.models.artifact import Artifact
-
-
-class StepAllOf(BaseModel):
- """
- StepAllOf
- """
-
- task_id: StrictStr = Field(
- ..., description="The ID of the task this step belongs to."
- )
- step_id: StrictStr = Field(..., description="The ID of the task step.")
- name: Optional[StrictStr] = Field(None, description="The name of the task step.")
- status: StrictStr = Field(..., description="The status of the task step.")
- output: Optional[StrictStr] = Field(None, description="Output of the task step.")
- additional_output: Optional[Any] = Field(
- None,
- description="Output that the task step has produced. Any value is allowed.",
- )
- artifacts: conlist(Artifact) = Field(
- ..., description="A list of artifacts that the step has produced."
- )
- is_last: Optional[StrictBool] = Field(
- False, description="Whether this is the last step in the task."
- )
- __properties = [
- "task_id",
- "step_id",
- "name",
- "status",
- "output",
- "additional_output",
- "artifacts",
- "is_last",
- ]
-
- @validator("status")
- def status_validate_enum(cls, value):
- """Validates the enum"""
- if value not in ("created", "completed"):
- raise ValueError("must be one of enum values ('created', 'completed')")
- return value
-
- class Config:
- """Pydantic configuration"""
-
- allow_population_by_field_name = True
- validate_assignment = True
-
- def to_str(self) -> str:
- """Returns the string representation of the model using alias"""
- return pprint.pformat(self.dict(by_alias=True))
-
- def to_json(self) -> str:
- """Returns the JSON representation of the model using alias"""
- return json.dumps(self.to_dict())
-
- @classmethod
- def from_json(cls, json_str: str) -> StepAllOf:
- """Create an instance of StepAllOf from a JSON string"""
- return cls.from_dict(json.loads(json_str))
-
- def to_dict(self):
- """Returns the dictionary representation of the model using alias"""
- _dict = self.dict(by_alias=True, exclude={}, exclude_none=True)
- # override the default output from pydantic by calling `to_dict()` of each item in artifacts (list)
- _items = []
- if self.artifacts:
- for _item in self.artifacts:
- if _item:
- _items.append(_item.to_dict())
- _dict["artifacts"] = _items
- # set to None if additional_output (nullable) is None
- # and __fields_set__ contains the field
- if (
- self.additional_output is None
- and "additional_output" in self.__fields_set__
- ):
- _dict["additional_output"] = None
-
- return _dict
-
- @classmethod
- def from_dict(cls, obj: dict) -> StepAllOf:
- """Create an instance of StepAllOf from a dict"""
- if obj is None:
- return None
-
- if not isinstance(obj, dict):
- return StepAllOf.parse_obj(obj)
-
- _obj = StepAllOf.parse_obj(
- {
- "task_id": obj.get("task_id"),
- "step_id": obj.get("step_id"),
- "name": obj.get("name"),
- "status": obj.get("status"),
- "output": obj.get("output"),
- "additional_output": obj.get("additional_output"),
- "artifacts": [
- Artifact.from_dict(_item) for _item in obj.get("artifacts")
- ]
- if obj.get("artifacts") is not None
- else None,
- "is_last": obj.get("is_last")
- if obj.get("is_last") is not None
- else False,
- }
- )
- return _obj
diff --git a/benchmark/agbenchmark/agent_protocol_client/models/step_request_body.py b/benchmark/agbenchmark/agent_protocol_client/models/step_request_body.py
deleted file mode 100644
index 47d642cfb..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/models/step_request_body.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# coding: utf-8
-
-"""
- Agent Communication Protocol
-
- Specification of the API protocol for communication with an agent. # noqa: E501
-
- The version of the OpenAPI document: v0.2
- Generated by OpenAPI Generator (https://openapi-generator.tech)
-
- Do not edit the class manually.
-"""
-
-
-from __future__ import annotations
-
-import json
-import pprint
-import re # noqa: F401
-from typing import Any, Optional
-
-from pydantic import BaseModel, Field, StrictStr
-
-
-class StepRequestBody(BaseModel):
- """
- Body of the task request.
- """
-
- input: Optional[StrictStr] = Field(None, description="Input prompt for the step.")
- additional_input: Optional[Any] = Field(
- None, description="Input parameters for the task step. Any value is allowed."
- )
- __properties = ["input", "additional_input"]
-
- class Config:
- """Pydantic configuration"""
-
- allow_population_by_field_name = True
- validate_assignment = True
-
- def to_str(self) -> str:
- """Returns the string representation of the model using alias"""
- return pprint.pformat(self.dict(by_alias=True))
-
- def to_json(self) -> str:
- """Returns the JSON representation of the model using alias"""
- return json.dumps(self.to_dict())
-
- @classmethod
- def from_json(cls, json_str: str) -> StepRequestBody:
- """Create an instance of StepRequestBody from a JSON string"""
- return cls.from_dict(json.loads(json_str))
-
- def to_dict(self):
- """Returns the dictionary representation of the model using alias"""
- _dict = self.dict(by_alias=True, exclude={}, exclude_none=True)
- # set to None if additional_input (nullable) is None
- # and __fields_set__ contains the field
- if self.additional_input is None and "additional_input" in self.__fields_set__:
- _dict["additional_input"] = None
-
- return _dict
-
- @classmethod
- def from_dict(cls, obj: dict) -> StepRequestBody:
- """Create an instance of StepRequestBody from a dict"""
- if obj is None:
- return None
-
- if not isinstance(obj, dict):
- return StepRequestBody.parse_obj(obj)
-
- _obj = StepRequestBody.parse_obj(
- {"input": obj.get("input"), "additional_input": obj.get("additional_input")}
- )
- return _obj
diff --git a/benchmark/agbenchmark/agent_protocol_client/models/step_result.py b/benchmark/agbenchmark/agent_protocol_client/models/step_result.py
deleted file mode 100644
index 99bf8f820..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/models/step_result.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# coding: utf-8
-
-"""
- Agent Communication Protocol
-
- Specification of the API protocol for communication with an agent. # noqa: E501
-
- The version of the OpenAPI document: v1
- Generated by OpenAPI Generator (https://openapi-generator.tech)
-
- Do not edit the class manually.
-"""
-
-
-from __future__ import annotations
-
-import json
-import pprint
-import re # noqa: F401
-from typing import Any, Optional
-
-from pydantic import BaseModel, Field, StrictBool, conlist
-
-
-class StepResult(BaseModel):
- """
- Result of the task step.
- """
-
- output: Optional[Any] = Field(
- None,
- description="Output that the task step has produced. Any value is allowed.",
- )
- artifacts: conlist(Any) = Field(
- ..., description="A list of artifacts that the step has produced."
- )
- is_last: Optional[StrictBool] = Field(
- False, description="Whether this is the last step in the task."
- )
- __properties = ["output", "artifacts", "is_last"]
-
- class Config:
- """Pydantic configuration"""
-
- allow_population_by_field_name = True
- validate_assignment = True
-
- def to_str(self) -> str:
- """Returns the string representation of the model using alias"""
- return pprint.pformat(self.dict(by_alias=True))
-
- def to_json(self) -> str:
- """Returns the JSON representation of the model using alias"""
- return json.dumps(self.to_dict())
-
- @classmethod
- def from_json(cls, json_str: str) -> StepResult:
- """Create an instance of StepResult from a JSON string"""
- return cls.from_dict(json.loads(json_str))
-
- def to_dict(self):
- """Returns the dictionary representation of the model using alias"""
- _dict = self.dict(by_alias=True, exclude={}, exclude_none=True)
- # set to None if output (nullable) is None
- # and __fields_set__ contains the field
- if self.output is None and "output" in self.__fields_set__:
- _dict["output"] = None
-
- return _dict
-
- @classmethod
- def from_dict(cls, obj: dict) -> StepResult:
- """Create an instance of StepResult from a dict"""
- if obj is None:
- return None
-
- if not isinstance(obj, dict):
- return StepResult.parse_obj(obj)
-
- _obj = StepResult.parse_obj(
- {
- "output": obj.get("output"),
- "artifacts": obj.get("artifacts"),
- "is_last": obj.get("is_last")
- if obj.get("is_last") is not None
- else False,
- }
- )
- return _obj
diff --git a/benchmark/agbenchmark/agent_protocol_client/models/task.py b/benchmark/agbenchmark/agent_protocol_client/models/task.py
deleted file mode 100644
index 90329f25f..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/models/task.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# coding: utf-8
-
-"""
- Agent Communication Protocol
-
- Specification of the API protocol for communication with an agent. # noqa: E501
-
- The version of the OpenAPI document: v0.2
- Generated by OpenAPI Generator (https://openapi-generator.tech)
-
- Do not edit the class manually.
-"""
-
-
-from __future__ import annotations
-
-import json
-import pprint
-import re # noqa: F401
-from typing import Any, Optional
-
-from pydantic import BaseModel, Field, StrictStr, conlist
-
-from agbenchmark.agent_protocol_client.models.artifact import Artifact
-
-
-class Task(BaseModel):
- """
- Task
- """
-
- input: Optional[StrictStr] = Field(None, description="Input prompt for the task.")
- additional_input: Optional[Any] = Field(
- None, description="Input parameters for the task. Any value is allowed."
- )
- task_id: StrictStr = Field(..., description="The ID of the task.")
- artifacts: conlist(Artifact) = Field(
- ..., description="A list of artifacts that the task has produced."
- )
- __properties = ["input", "additional_input", "task_id", "artifacts"]
-
- class Config:
- """Pydantic configuration"""
-
- allow_population_by_field_name = True
- validate_assignment = True
-
- def to_str(self) -> str:
- """Returns the string representation of the model using alias"""
- return pprint.pformat(self.dict(by_alias=True))
-
- def to_json(self) -> str:
- """Returns the JSON representation of the model using alias"""
- return json.dumps(self.to_dict())
-
- @classmethod
- def from_json(cls, json_str: str) -> Task:
- """Create an instance of Task from a JSON string"""
- return cls.from_dict(json.loads(json_str))
-
- def to_dict(self):
- """Returns the dictionary representation of the model using alias"""
- _dict = self.dict(by_alias=True, exclude={}, exclude_none=True)
- # override the default output from pydantic by calling `to_dict()` of each item in artifacts (list)
- _items = []
- if self.artifacts:
- for _item in self.artifacts:
- if _item:
- _items.append(_item.to_dict())
- _dict["artifacts"] = _items
- # set to None if additional_input (nullable) is None
- # and __fields_set__ contains the field
- if self.additional_input is None and "additional_input" in self.__fields_set__:
- _dict["additional_input"] = None
-
- return _dict
-
- @classmethod
- def from_dict(cls, obj: dict) -> Task:
- """Create an instance of Task from a dict"""
- if obj is None:
- return None
-
- if not isinstance(obj, dict):
- return Task.parse_obj(obj)
-
- _obj = Task.parse_obj(
- {
- "input": obj.get("input"),
- "additional_input": obj.get("additional_input"),
- "task_id": obj.get("task_id"),
- "artifacts": [
- Artifact.from_dict(_item) for _item in obj.get("artifacts")
- ]
- if obj.get("artifacts") is not None
- else None,
- }
- )
- return _obj
diff --git a/benchmark/agbenchmark/agent_protocol_client/models/task_all_of.py b/benchmark/agbenchmark/agent_protocol_client/models/task_all_of.py
deleted file mode 100644
index 710377de6..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/models/task_all_of.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# coding: utf-8
-
-"""
- Agent Communication Protocol
-
- Specification of the API protocol for communication with an agent. # noqa: E501
-
- The version of the OpenAPI document: v0.2
- Generated by OpenAPI Generator (https://openapi-generator.tech)
-
- Do not edit the class manually.
-"""
-
-
-from __future__ import annotations
-
-import json
-import pprint
-import re # noqa: F401
-
-from pydantic import BaseModel, Field, StrictStr, conlist
-
-from agbenchmark.agent_protocol_client.models.artifact import Artifact
-
-
-class TaskAllOf(BaseModel):
- """
- Definition of an agent task.
- """
-
- task_id: StrictStr = Field(..., description="The ID of the task.")
- artifacts: conlist(Artifact) = Field(
- ..., description="A list of artifacts that the task has produced."
- )
- __properties = ["task_id", "artifacts"]
-
- class Config:
- """Pydantic configuration"""
-
- allow_population_by_field_name = True
- validate_assignment = True
-
- def to_str(self) -> str:
- """Returns the string representation of the model using alias"""
- return pprint.pformat(self.dict(by_alias=True))
-
- def to_json(self) -> str:
- """Returns the JSON representation of the model using alias"""
- return json.dumps(self.to_dict())
-
- @classmethod
- def from_json(cls, json_str: str) -> TaskAllOf:
- """Create an instance of TaskAllOf from a JSON string"""
- return cls.from_dict(json.loads(json_str))
-
- def to_dict(self):
- """Returns the dictionary representation of the model using alias"""
- _dict = self.dict(by_alias=True, exclude={}, exclude_none=True)
- # override the default output from pydantic by calling `to_dict()` of each item in artifacts (list)
- _items = []
- if self.artifacts:
- for _item in self.artifacts:
- if _item:
- _items.append(_item.to_dict())
- _dict["artifacts"] = _items
- return _dict
-
- @classmethod
- def from_dict(cls, obj: dict) -> TaskAllOf:
- """Create an instance of TaskAllOf from a dict"""
- if obj is None:
- return None
-
- if not isinstance(obj, dict):
- return TaskAllOf.parse_obj(obj)
-
- _obj = TaskAllOf.parse_obj(
- {
- "task_id": obj.get("task_id"),
- "artifacts": [
- Artifact.from_dict(_item) for _item in obj.get("artifacts")
- ]
- if obj.get("artifacts") is not None
- else None,
- }
- )
- return _obj
diff --git a/benchmark/agbenchmark/agent_protocol_client/models/task_request_body.py b/benchmark/agbenchmark/agent_protocol_client/models/task_request_body.py
deleted file mode 100644
index 40a5d4ecb..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/models/task_request_body.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# coding: utf-8
-
-"""
- Agent Communication Protocol
-
- Specification of the API protocol for communication with an agent. # noqa: E501
-
- The version of the OpenAPI document: v0.2
- Generated by OpenAPI Generator (https://openapi-generator.tech)
-
- Do not edit the class manually.
-"""
-
-
-from __future__ import annotations
-
-import json
-import pprint
-import re # noqa: F401
-from typing import Any, Optional
-
-from pydantic import BaseModel, Field, StrictStr
-
-
-class TaskRequestBody(BaseModel):
- """
- Body of the task request.
- """
-
- input: Optional[StrictStr] = Field(None, description="Input prompt for the task.")
- additional_input: Optional[Any] = Field(
- None, description="Input parameters for the task. Any value is allowed."
- )
- __properties = ["input", "additional_input"]
-
- class Config:
- """Pydantic configuration"""
-
- allow_population_by_field_name = True
- validate_assignment = True
-
- def to_str(self) -> str:
- """Returns the string representation of the model using alias"""
- return pprint.pformat(self.dict(by_alias=True))
-
- def to_json(self) -> str:
- """Returns the JSON representation of the model using alias"""
- return json.dumps(self.to_dict())
-
- @classmethod
- def from_json(cls, json_str: str) -> TaskRequestBody:
- """Create an instance of TaskRequestBody from a JSON string"""
- return cls.from_dict(json.loads(json_str))
-
- def to_dict(self):
- """Returns the dictionary representation of the model using alias"""
- _dict = self.dict(by_alias=True, exclude={}, exclude_none=True)
- # set to None if additional_input (nullable) is None
- # and __fields_set__ contains the field
- if self.additional_input is None and "additional_input" in self.__fields_set__:
- _dict["additional_input"] = None
-
- return _dict
-
- @classmethod
- def from_dict(cls, obj: dict) -> TaskRequestBody:
- """Create an instance of TaskRequestBody from a dict"""
- if obj is None:
- return None
-
- if not isinstance(obj, dict):
- return TaskRequestBody.parse_obj(obj)
-
- _obj = TaskRequestBody.parse_obj(
- {"input": obj.get("input"), "additional_input": obj.get("additional_input")}
- )
- return _obj
diff --git a/benchmark/agbenchmark/agent_protocol_client/rest.py b/benchmark/agbenchmark/agent_protocol_client/rest.py
deleted file mode 100644
index 7c29a9598..000000000
--- a/benchmark/agbenchmark/agent_protocol_client/rest.py
+++ /dev/null
@@ -1,311 +0,0 @@
-# coding: utf-8
-
-"""
- Agent Communication Protocol
-
- Specification of the API protocol for communication with an agent. # noqa: E501
-
- The version of the OpenAPI document: v0.2
- Generated by OpenAPI Generator (https://openapi-generator.tech)
-
- Do not edit the class manually.
-"""
-
-
-import io
-import json
-import logging
-import re
-import ssl
-from urllib.parse import urlencode
-
-import aiohttp
-
-from agbenchmark.agent_protocol_client.exceptions import ApiException, ApiValueError
-
-logger = logging.getLogger(__name__)
-
-
-class RESTResponse(io.IOBase):
- def __init__(self, resp, data):
- self.aiohttp_response = resp
- self.status = resp.status
- self.reason = resp.reason
- self.data = data
-
- def getheaders(self):
- """Returns a CIMultiDictProxy of the response headers."""
- return self.aiohttp_response.headers
-
- def getheader(self, name, default=None):
- """Returns a given response header."""
- return self.aiohttp_response.headers.get(name, default)
-
-
-class RESTClientObject(object):
- def __init__(self, configuration, pools_size=4, maxsize=None):
- # maxsize is number of requests to host that are allowed in parallel
- if maxsize is None:
- maxsize = configuration.connection_pool_maxsize
-
- ssl_context = ssl.create_default_context(cafile=configuration.ssl_ca_cert)
- if configuration.cert_file:
- ssl_context.load_cert_chain(
- configuration.cert_file, keyfile=configuration.key_file
- )
-
- if not configuration.verify_ssl:
- ssl_context.check_hostname = False
- ssl_context.verify_mode = ssl.CERT_NONE
-
- connector = aiohttp.TCPConnector(limit=maxsize, ssl=ssl_context)
-
- self.proxy = configuration.proxy
- self.proxy_headers = configuration.proxy_headers
-
- # https pool manager
- self.pool_manager = aiohttp.ClientSession(connector=connector, trust_env=True)
-
- async def close(self):
- await self.pool_manager.close()
-
- async def request(
- self,
- method,
- url,
- query_params=None,
- headers=None,
- body=None,
- post_params=None,
- _preload_content=True,
- _request_timeout=None,
- ):
- """Execute request
-
- :param method: http request method
- :param url: http request url
- :param query_params: query parameters in the url
- :param headers: http request headers
- :param body: request json body, for `application/json`
- :param post_params: request post parameters,
- `application/x-www-form-urlencoded`
- and `multipart/form-data`
- :param _preload_content: this is a non-applicable field for
- the AiohttpClient.
- :param _request_timeout: timeout setting for this request. If one
- number provided, it will be total request
- timeout. It can also be a pair (tuple) of
- (connection, read) timeouts.
- """
- method = method.upper()
- assert method in ["GET", "HEAD", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"]
-
- if post_params and body:
- raise ApiValueError(
- "body parameter cannot be used with post_params parameter."
- )
-
- post_params = post_params or {}
- headers = headers or {}
- # url already contains the URL query string
- # so reset query_params to empty dict
- query_params = {}
- timeout = _request_timeout or 5 * 60
-
- if "Content-Type" not in headers:
- headers["Content-Type"] = "application/json"
-
- args = {"method": method, "url": url, "timeout": timeout, "headers": headers}
-
- if self.proxy:
- args["proxy"] = self.proxy
- if self.proxy_headers:
- args["proxy_headers"] = self.proxy_headers
-
- if query_params:
- args["url"] += "?" + urlencode(query_params)
-
- # For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
- if method in ["POST", "PUT", "PATCH", "OPTIONS", "DELETE"]:
- if re.search("json", headers["Content-Type"], re.IGNORECASE):
- if body is not None:
- body = json.dumps(body)
- args["data"] = body
- elif (
- headers["Content-Type"] == "application/x-www-form-urlencoded"
- ): # noqa: E501
- args["data"] = aiohttp.FormData(post_params)
- elif headers["Content-Type"] == "multipart/form-data":
- # must del headers['Content-Type'], or the correct
- # Content-Type which generated by aiohttp
- del headers["Content-Type"]
- data = aiohttp.FormData()
- for param in post_params:
- k, v = param
- if isinstance(v, tuple) and len(v) == 3:
- data.add_field(k, value=v[1], filename=v[0], content_type=v[2])
- else:
- data.add_field(k, v)
- args["data"] = data
-
- # Pass a `bytes` parameter directly in the body to support
- # other content types than Json when `body` argument is provided
- # in serialized form
- elif isinstance(body, bytes):
- args["data"] = body
- else:
- # Cannot generate the request from given parameters
- msg = """Cannot prepare a request message for provided
- arguments. Please check that your arguments match
- declared content type."""
- raise ApiException(status=0, reason=msg)
-
- r = await self.pool_manager.request(**args)
- if _preload_content:
- data = await r.read()
- r = RESTResponse(r, data)
-
- # log response body
- logger.debug("response body: %s", r.data)
-
- if not 200 <= r.status <= 299:
- raise ApiException(http_resp=r)
-
- return r
-
- async def get_request(
- self,
- url,
- headers=None,
- query_params=None,
- _preload_content=True,
- _request_timeout=None,
- ):
- return await self.request(
- "GET",
- url,
- headers=headers,
- _preload_content=_preload_content,
- _request_timeout=_request_timeout,
- query_params=query_params,
- )
-
- async def head_request(
- self,
- url,
- headers=None,
- query_params=None,
- _preload_content=True,
- _request_timeout=None,
- ):
- return await self.request(
- "HEAD",
- url,
- headers=headers,
- _preload_content=_preload_content,
- _request_timeout=_request_timeout,
- query_params=query_params,
- )
-
- async def options_request(
- self,
- url,
- headers=None,
- query_params=None,
- post_params=None,
- body=None,
- _preload_content=True,
- _request_timeout=None,
- ):
- return await self.request(
- "OPTIONS",
- url,
- headers=headers,
- query_params=query_params,
- post_params=post_params,
- _preload_content=_preload_content,
- _request_timeout=_request_timeout,
- body=body,
- )
-
- async def delete_request(
- self,
- url,
- headers=None,
- query_params=None,
- body=None,
- _preload_content=True,
- _request_timeout=None,
- ):
- return await self.request(
- "DELETE",
- url,
- headers=headers,
- query_params=query_params,
- _preload_content=_preload_content,
- _request_timeout=_request_timeout,
- body=body,
- )
-
- async def post_request(
- self,
- url,
- headers=None,
- query_params=None,
- post_params=None,
- body=None,
- _preload_content=True,
- _request_timeout=None,
- ):
- return await self.request(
- "POST",
- url,
- headers=headers,
- query_params=query_params,
- post_params=post_params,
- _preload_content=_preload_content,
- _request_timeout=_request_timeout,
- body=body,
- )
-
- async def put_request(
- self,
- url,
- headers=None,
- query_params=None,
- post_params=None,
- body=None,
- _preload_content=True,
- _request_timeout=None,
- ):
- return await self.request(
- "PUT",
- url,
- headers=headers,
- query_params=query_params,
- post_params=post_params,
- _preload_content=_preload_content,
- _request_timeout=_request_timeout,
- body=body,
- )
-
- async def patch_request(
- self,
- url,
- headers=None,
- query_params=None,
- post_params=None,
- body=None,
- _preload_content=True,
- _request_timeout=None,
- ):
- return await self.request(
- "PATCH",
- url,
- headers=headers,
- query_params=query_params,
- post_params=post_params,
- _preload_content=_preload_content,
- _request_timeout=_request_timeout,
- body=body,
- )
diff --git a/benchmark/agbenchmark/app.py b/benchmark/agbenchmark/app.py
index ad14cb692..0ea002996 100644
--- a/benchmark/agbenchmark/app.py
+++ b/benchmark/agbenchmark/app.py
@@ -1,78 +1,74 @@
import datetime
+import glob
+import json
+import logging
+import sys
+import time
import uuid
from collections import defaultdict, deque
+from multiprocessing import Process
from pathlib import Path
+from typing import Any, Optional
import httpx
-
-from agbenchmark.agent_protocol_client import (
- AgentApi,
- ApiClient,
- ApiException,
- Configuration,
+import psutil
+from agent_protocol_client import AgentApi, ApiClient, ApiException, Configuration
+from agent_protocol_client.models import Task, TaskRequestBody
+from fastapi import APIRouter, FastAPI, HTTPException, Request, Response
+from fastapi.middleware.cors import CORSMiddleware
+from pydantic import BaseModel, Extra, ValidationError
+
+from agbenchmark.config import AgentBenchmarkConfig
+from agbenchmark.reports.processing.report_types_v2 import (
+ BenchmarkRun,
+ Metrics,
+ RepositoryInfo,
+ RunDetails,
+ TaskInfo,
)
-from agbenchmark.reports.processing.report_types_v2 import BenchmarkRun
from agbenchmark.schema import TaskEvalRequestBody
+from agbenchmark.utils.data_types import ChallengeData
from agbenchmark.utils.utils import write_pretty_json
-configuration = Configuration(host="http://localhost:8000" + "/ap/v1")
-
-import json
-import os
-import sys
-from typing import Any, Optional
-
-import psutil
-from fastapi import APIRouter, FastAPI
-from fastapi import (
- HTTPException as FastAPIHTTPException, # Import HTTPException from FastAPI
-)
-from fastapi import Request, Response
-from fastapi.middleware.cors import CORSMiddleware
-
-from agbenchmark.execute_sub_process import execute_subprocess
-from agbenchmark.schema import Task, TaskRequestBody
-
-sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
-from fastapi import FastAPI
-from pydantic import BaseModel, Extra
-
-router = APIRouter()
-import glob
+sys.path.append(str(Path(__file__).parent.parent))
-# Change the current working directory to the benchmark path
-# home_path = find_absolute_benchmark_path()
-# os.chdir(home_path)
+logger = logging.getLogger(__name__)
-general_command = ["poetry", "run", "agbenchmark", "start", "--backend"]
-
-import psutil
-
-challenges_path = os.path.join(os.path.dirname(__file__), "challenges")
-
-json_files = deque(
+CHALLENGES: dict[str, ChallengeData] = {}
+challenges_path = Path(__file__).parent / "challenges"
+challenge_spec_files = deque(
glob.glob(
f"{challenges_path}/**/data.json",
recursive=True,
)
)
-CHALLENGES = {}
-task_informations = defaultdict(dict)
+logger.debug("Loading challenges...")
+while challenge_spec_files:
+ challenge_spec_file = Path(challenge_spec_files.popleft())
+ challenge_relpath = challenge_spec_file.relative_to(challenges_path.parent)
+ if challenge_relpath.is_relative_to("challenges/deprecated"):
+ continue
-while json_files:
- json_file = json_files.popleft()
+ logger.debug(f"Loading {challenge_relpath}...")
+ try:
+ challenge_info = ChallengeData.parse_file(challenge_spec_file)
+ except ValidationError as e:
+ if logging.getLogger().level == logging.DEBUG:
+ logger.warning(f"Spec file {challenge_relpath} failed to load:\n{e}")
+ logger.debug(f"Invalid challenge spec: {challenge_spec_file.read_text()}")
+ continue
+ challenge_info.spec_file = challenge_spec_file
- with open(json_file, "r") as file:
- data = json.load(file)
+ if not challenge_info.eval_id:
+ challenge_info.eval_id = str(uuid.uuid4())
+ # this will sort all the keys of the JSON systematically
+ # so that the order is always the same
+ write_pretty_json(challenge_info.dict(), challenge_spec_file)
- if "eval_id" not in data:
- data["eval_id"] = str(uuid.uuid4())
- # this will sort all the keys of the JSON systematically so that the order is always the same
- write_pretty_json(data, json_file)
- # ok
- CHALLENGES[data["eval_id"]] = data
- CHALLENGES[data["eval_id"]]["path"] = json_file
+ CHALLENGES[challenge_info.eval_id] = challenge_info
+
+task_informations = defaultdict(dict[str, Any])
def find_agbenchmark_without_uvicorn():
@@ -93,10 +89,10 @@ def find_agbenchmark_without_uvicorn():
):
try:
# Convert the process.info dictionary values to strings and concatenate them
- full_info = " ".join([str(v) for k, v in process.info.items()])
+ full_info = " ".join([str(v) for k, v in process.as_dict().items()])
if "agbenchmark" in full_info and "uvicorn" not in full_info:
- pids.append(process.info["pid"])
+ pids.append(process.pid)
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return pids
@@ -114,24 +110,12 @@ class CreateReportRequest(BaseModel):
updates_list = []
-updates_list = []
-
-import json
-
origins = [
"http://localhost:8000",
"http://localhost:8080",
"http://127.0.0.1:5000",
"http://localhost:5000",
]
-app = FastAPI()
-app.add_middleware(
- CORSMiddleware,
- allow_origins=origins,
- allow_credentials=True,
- allow_methods=["*"],
- allow_headers=["*"],
-)
def stream_output(pipe):
@@ -139,275 +123,210 @@ def stream_output(pipe):
print(line, end="")
-@router.post("/reports")
-def run_single_test(body: CreateReportRequest) -> Any:
- pids = find_agbenchmark_without_uvicorn()
- print(f"pids already running with agbenchmark: {pids}")
- print(body.dict())
- # it's a hack because other parts of the code are using sys.argv
- print(os.getcwd())
- command_options = ["agbenchmark"]
- # if body.category:
- # sys.argv.append(f"--category={body.category}")
- command_options.append(f"--test={body.test}")
- if body.mock:
- command_options.append("--mock")
-
- execute_subprocess(command_options, 200)
- import json
- from pathlib import Path
-
- print("finished running")
- # List all folders in the current working directory
- path_reports = Path.cwd() / "agbenchmark_config" / "reports"
- folders = [folder for folder in path_reports.iterdir() if folder.is_dir()]
-
- # Sort the folders based on their names
- sorted_folders = sorted(folders, key=lambda x: x.name)
-
- # Get the last folder
- last_folder = sorted_folders[-1] if sorted_folders else None
-
- # Read report.json from this folder
- if last_folder:
- report_path = last_folder / "report.json"
- print(report_path)
- if report_path.exists():
- with report_path.open() as file:
- data = json.load(file)
- print(data)
- else:
- print(f"'report.json' does not exist in '{last_folder}'")
- else:
- print("No folders found.")
-
- return Response(
- content=json.dumps(data),
- status_code=200,
- media_type="application/json",
+def setup_fastapi_app(agbenchmark_config: AgentBenchmarkConfig) -> FastAPI:
+ from agbenchmark.agent_api_interface import (
+ copy_agent_artifacts_into_folder,
+ upload_artifacts,
)
+ from agbenchmark.agent_interface import copy_artifacts_into_temp_folder
+ from agbenchmark.generate_test import create_challenge_from_spec_file
+ from agbenchmark.main import run_benchmark
+ configuration = Configuration(
+ host=agbenchmark_config.host or "http://localhost:8000"
+ )
+ app = FastAPI()
+ app.add_middleware(
+ CORSMiddleware,
+ allow_origins=origins,
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+ )
+ router = APIRouter()
-import json
-from typing import Any
-
-from fastapi import FastAPI, Request, Response
-
+ @router.post("/reports")
+ def run_single_test(body: CreateReportRequest) -> dict:
+ pids = find_agbenchmark_without_uvicorn()
+ logger.info(f"pids already running with agbenchmark: {pids}")
-@router.get("/updates")
-def get_updates(request: Request) -> Any:
- from agbenchmark.__main__ import UPDATES_JSON_PATH
+ logger.debug(f"Request to /reports: {body.dict()}")
- try:
- # Read data from the "update.json" file (provide the correct file path)
- with open(UPDATES_JSON_PATH, "r") as file:
- data = json.load(file)
-
- # Get the last_update_time from the query parameter
- query_param = request.query_params.get("last_update_time")
-
- if query_param is None:
- # Handle the case when last_update_time is not provided
- print("ERROR: last_update_time parameter is missing")
- return Response(
- content=json.dumps({"error": "last_update_time parameter is missing"}),
- status_code=400,
- media_type="application/json",
- headers={"Content-Type": "application/json"},
+ # Start the benchmark in a separate thread
+ benchmark_process = Process(
+ target=lambda: run_benchmark(
+ config=agbenchmark_config,
+ tests=(body.test,),
+ mock=body.mock or False,
)
-
- # Convert query_param to a Unix timestamp (assuming it's in seconds as a string)
- query_timestamp = int(query_param)
-
- # Filter the data based on the timestamp (keep timestamps before query_timestamp)
- filtered_data = [item for item in data if item["timestamp"] > query_timestamp]
-
- # Extract only the "content" field from each item
- filtered_data = [item["content"] for item in filtered_data]
-
- # Convert the filtered data to JSON
- filtered_json = json.dumps(filtered_data, indent=2)
-
- print("INFO: Returning filtered data to the client")
- return Response(
- content=filtered_json,
- status_code=200,
- media_type="application/json",
- headers={"Content-Type": "application/json"},
- )
- except FileNotFoundError:
- print("ERROR: File not found: updates.json")
- return Response(
- content=json.dumps({"error": "File not found"}),
- status_code=404,
- media_type="application/json",
- headers={"Content-Type": "application/json"},
)
-
-
-@router.post("/agent/tasks", tags=["agent"], response_model=Task)
-async def create_agent_task(task_eval_request: TaskEvalRequestBody) -> Task:
- """
- Creates a new task using the provided TaskRequestBody and returns a Task.
-
- Args:
- request (Request): FastAPI request object.
- task (TaskRequestBody): The task request containing input and additional input data.
-
- Returns:
- Task: A new task with task_id, input, additional_input, and empty lists for artifacts and steps.
-
- Example:
- Request (TaskRequestBody defined in schema.py):
- {
- "input": "Write the words you receive to the file 'output.txt'.",
- "additional_input": "python/code"
- }
-
- Response (Task defined in schema.py):
- {
- "task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
- "input": "Write the word 'Washington' to a .txt file",
- "additional_input": "python/code",
- "artifacts": [],
- }
- """
- from agbenchmark.agent_api_interface import upload_artifacts
-
- try:
- async with ApiClient(configuration) as api_client:
- api_instance = AgentApi(api_client)
- task_input = CHALLENGES[task_eval_request.eval_id]["task"]
-
- task_request_body = TaskRequestBody(input=task_input)
- task_response = await api_instance.create_agent_task(
- task_request_body=task_request_body
- )
- task_informations[task_response.task_id][
- "benchmark_start_time"
- ] = datetime.datetime.now(datetime.timezone.utc).strftime(
- "%Y-%m-%dT%H:%M:%S+00:00"
+ benchmark_process.start()
+
+ # Wait for the benchmark to finish, with a timeout of 200 seconds
+ timeout = 200
+ start_time = time.time()
+ while benchmark_process.is_alive():
+ if time.time() - start_time > timeout:
+ logger.warning(f"Benchmark run timed out after {timeout} seconds")
+ benchmark_process.terminate()
+ break
+ time.sleep(1)
+ else:
+ logger.debug(f"Benchmark finished running in {time.time() - start_time} s")
+
+ # List all folders in the current working directory
+ path_reports = agbenchmark_config.reports_folder
+ folders = [folder for folder in path_reports.iterdir() if folder.is_dir()]
+
+ # Sort the folders based on their names
+ sorted_folders = sorted(folders, key=lambda x: x.name)
+
+ # Get the last folder
+ latest_folder = sorted_folders[-1] if sorted_folders else None
+
+ # Read report.json from this folder
+ if latest_folder:
+ report_path = latest_folder / "report.json"
+ logger.debug(f"Getting latest report from {report_path}")
+ if report_path.exists():
+ with report_path.open() as file:
+ data = json.load(file)
+ logger.debug(f"Report data: {data}")
+ else:
+ logger.error(
+ "Could not get result after running benchmark: "
+ f"'report.json' does not exist in '{latest_folder}'"
+ )
+ else:
+ logger.error(
+ "Could not get result after running benchmark: no reports found"
)
- task_informations[task_response.task_id][
- "eval_id"
- ] = task_eval_request.eval_id
- await upload_artifacts(
- api_instance,
- str(Path(CHALLENGES[task_eval_request.eval_id]["path"]).parent),
- task_response.task_id,
- "artifacts_in",
+
+ return data
+
+ @router.post("/agent/tasks", tags=["agent"])
+ async def create_agent_task(task_eval_request: TaskEvalRequestBody) -> Task:
+ """
+ Creates a new task using the provided TaskEvalRequestBody and returns a Task.
+
+ Args:
+ task_eval_request: `TaskRequestBody` including an eval_id.
+
+ Returns:
+ Task: A new task with task_id, input, additional_input,
+ and empty lists for artifacts and steps.
+
+ Example:
+ Request (TaskEvalRequestBody defined in schema.py):
+ {
+ ...,
+ "eval_id": "50da533e-3904-4401-8a07-c49adf88b5eb"
+ }
+
+ Response (Task defined in `agent_protocol_client.models`):
+ {
+ "task_id": "50da533e-3904-4401-8a07-c49adf88b5eb",
+ "input": "Write the word 'Washington' to a .txt file",
+ "artifacts": []
+ }
+ """
+ try:
+ async with ApiClient(configuration) as api_client:
+ api_instance = AgentApi(api_client)
+ task_input = CHALLENGES[task_eval_request.eval_id].task
+
+ task_request_body = TaskRequestBody(input=task_input)
+ task_response = await api_instance.create_agent_task(
+ task_request_body=task_request_body
+ )
+ task_informations[task_response.task_id][
+ "benchmark_start_time"
+ ] = datetime.datetime.now(datetime.timezone.utc).strftime(
+ "%Y-%m-%dT%H:%M:%S+00:00"
+ )
+ task_informations[task_response.task_id][
+ "eval_id"
+ ] = task_eval_request.eval_id
+ await upload_artifacts(
+ api_instance,
+ str(CHALLENGES[task_eval_request.eval_id].spec_file.parent),
+ task_response.task_id,
+ "artifacts_in",
+ )
+ return task_response
+ except ApiException as e:
+ logger.error(f"Error whilst trying to create a task:\n{e}")
+ logger.error(
+ "The above error was caused while processing request: "
+ f"{task_eval_request}"
)
- return Response(
- content=task_response.json(),
- status_code=200,
- media_type="application/json",
+ raise HTTPException(500)
+
+ @router.post("/agent/tasks/{task_id}/steps")
+ async def proxy(request: Request, task_id: str):
+ timeout = httpx.Timeout(300.0, read=300.0) # 5 minutes
+ async with httpx.AsyncClient(timeout=timeout) as client:
+ # Construct the new URL
+ new_url = f"{configuration.host}/ap/v1/agent/tasks/{task_id}/steps"
+
+ # Forward the request
+ response = await client.post(
+ new_url,
+ data=await request.body(),
+ headers=dict(request.headers),
)
- except ApiException as e:
- print(f"Error whilst trying to create a task: {task_eval_request}")
- return Response(
- content=json.dumps({"error": "Internal server error"}),
- status_code=500,
- media_type="application/json",
- )
-
-
-@router.post("/agent/tasks/{task_id}/steps")
-async def proxy(request: Request, task_id: str):
- timeout = httpx.Timeout(300.0, read=300.0) # 5 minutes
- async with httpx.AsyncClient(timeout=timeout) as client:
- # Construct the new URL
- new_url = f"http://localhost:8000/ap/v1/agent/tasks/{task_id}/steps"
-
- # Forward the request
- response = await client.post(
- new_url,
- data=await request.body(),
- headers=dict(request.headers),
- )
- # Return the response from the forwarded request
- return Response(content=response.content, status_code=response.status_code)
+ # Return the response from the forwarded request
+ return Response(content=response.content, status_code=response.status_code)
+ @router.post("/agent/tasks/{task_id}/evaluations")
+ async def create_evaluation(task_id: str) -> BenchmarkRun:
+ challenge_info = CHALLENGES[task_informations[task_id]["eval_id"]]
+ workspace = agbenchmark_config.temp_folder
+ try:
+ async with ApiClient(configuration) as api_client:
+ api_instance = AgentApi(api_client)
+ await copy_agent_artifacts_into_folder(api_instance, task_id, workspace)
+
+ artifact_path = challenge_info.spec_file.parent
+ copy_artifacts_into_temp_folder(workspace, "custom_python", artifact_path)
+
+ challenge = create_challenge_from_spec_file(challenge_info.spec_file)
+ scores = challenge.get_scores(workspace)
+ is_score_100 = 1 in scores["values"]
+
+ eval_info = BenchmarkRun(
+ repository_info=RepositoryInfo(),
+ run_details=RunDetails(
+ command=f"agbenchmark --test={challenge_info.name}",
+ benchmark_start_time=(
+ task_informations[task_id]["benchmark_start_time"]
+ ),
+ test_name=challenge_info.name,
+ ),
+ task_info=TaskInfo(
+ data_path=str(
+ challenge_info.spec_file.relative_to(challenges_path.parent)
+ ),
+ is_regression=None,
+ category=[c.value for c in challenge_info.category],
+ task=challenge_info.task,
+ answer=challenge_info.ground.answer,
+ description=challenge_info.info.description,
+ ),
+ metrics=Metrics(
+ success=is_score_100,
+ attempted=True,
+ ),
+ config={},
+ )
-@router.post("/agent/tasks/{task_id}/evaluations")
-async def create_evaluation(task_id: str) -> deque:
- from agbenchmark.__main__ import TEMP_FOLDER_ABS_PATH
- from agbenchmark.agent_api_interface import copy_agent_artifacts_into_temp_folder
- from agbenchmark.agent_interface import copy_artifacts_into_temp_folder
- from agbenchmark.generate_test import create_challenge
-
- try:
- async with ApiClient(configuration) as api_client:
- api_instance = AgentApi(api_client)
- await copy_agent_artifacts_into_temp_folder(api_instance, task_id)
- # add custom python
- data = CHALLENGES[task_informations[task_id]["eval_id"]]
-
- artifact_path = str(Path(data["path"]).parent)
- copy_artifacts_into_temp_folder(
- TEMP_FOLDER_ABS_PATH, "custom_python", artifact_path
- )
- json_file = CHALLENGES[task_informations[task_id]["eval_id"]]["path"]
- json_files = deque()
-
- _, challenge_class = create_challenge(data, json_file, json_files)
- challenge_instance = challenge_class()
- scores = challenge_instance.get_scores(config={})
- test_name = "Test" + data["name"]
- is_score_100 = 1 in scores["values"]
-
- info_details = {
- "repository_info": {
- "repo_url": None,
- "team_name": None,
- "benchmark_git_commit_sha": None,
- "agent_git_commit_sha": None,
- },
- "run_details": {
- "run_id": None,
- "command": "agbenchmark" + " --test=" + test_name,
- "completion_time": None,
- "benchmark_start_time": task_informations[task_id][
- "benchmark_start_time"
- ],
- "test_name": data["name"],
- },
- "task_info": {
- "data_path": data["path"].split("benchmark/", 1)[-1],
- "is_regression": None,
- "category": data["category"],
- "task": data["task"],
- "answer": data["ground"]["answer"],
- "description": data["info"]["description"],
- },
- "metrics": {
- "difficulty": None,
- "success": is_score_100,
- "attempted": True,
- "success_percentage": None,
- "cost": None,
- "run_time": None,
- },
- "reached_cutoff": None,
- "config": {},
- }
-
- BenchmarkRun.parse_obj(info_details)
-
- print(json.dumps(info_details, indent=4))
- return Response(
- content=json.dumps(info_details),
- status_code=200,
- media_type="application/json",
- )
- except ApiException as e:
- print(f"Error whilst trying to evaluate the task: {task_id}")
- return Response(
- content=json.dumps({"error": "Internal server error"}),
- status_code=500,
- media_type="application/json",
- )
- # path = Path(json_file).resolve()
+ logger.debug(f"Returning evaluation data:\n{eval_info.json(indent=4)}")
+ return eval_info
+ except ApiException as e:
+ logger.error(f"Error {e} whilst trying to evaluate task: {task_id}")
+ raise HTTPException(500)
+ app.include_router(router, prefix="/ap/v1")
-app.include_router(router, prefix="/ap/v1")
+ return app
diff --git a/benchmark/agbenchmark/challenges/__init__.py b/benchmark/agbenchmark/challenges/__init__.py
index e69de29bb..85d6aa86d 100644
--- a/benchmark/agbenchmark/challenges/__init__.py
+++ b/benchmark/agbenchmark/challenges/__init__.py
@@ -0,0 +1,32 @@
+import glob
+import json
+import logging
+from pathlib import Path
+
+logger = logging.getLogger(__name__)
+
+
+def get_unique_categories() -> set[str]:
+ """
+ Find all data.json files in the directory relative to this file and its
+ subdirectories, read the "category" field from each file, and return a set of unique
+ categories.
+ """
+ categories = set()
+
+ challenges_dir = Path(__file__).parent
+ glob_path = f"{challenges_dir}/**/data.json"
+
+ for data_file in glob.glob(glob_path, recursive=True):
+ with open(data_file, "r") as f:
+ try:
+ challenge_data = json.load(f)
+ categories.update(challenge_data.get("category", []))
+ except json.JSONDecodeError:
+ logger.error(f"Error: {data_file} is not a valid JSON file.")
+ continue
+ except IOError:
+ logger.error(f"IOError: file could not be read: {data_file}")
+ continue
+
+ return categories
diff --git a/benchmark/agbenchmark/challenges/verticals/scrape/4_revenue_retrieval_2/data.json b/benchmark/agbenchmark/challenges/verticals/scrape/4_revenue_retrieval_2/data.json
index 8ca61b9e7..ea1230326 100644
--- a/benchmark/agbenchmark/challenges/verticals/scrape/4_revenue_retrieval_2/data.json
+++ b/benchmark/agbenchmark/challenges/verticals/scrape/4_revenue_retrieval_2/data.json
@@ -16,21 +16,21 @@
".txt"
],
"should_contain": [
- "15",
- "112",
- "117",
- "204",
- "413",
- "2,0",
- "3,198",
- "4,046",
- "7,000",
- "11,759",
- "21,461",
- "24,578",
- "31,536",
- "53,823",
- "81,462"
+ "15",
+ "112",
+ "117",
+ "204",
+ "413",
+ "2,0",
+ "3,198",
+ "4,046",
+ "7,000",
+ "11,759",
+ "21,461",
+ "24,578",
+ "31,536",
+ "53,823",
+ "81,462"
],
"should_not_contain": []
},
diff --git a/benchmark/agbenchmark/config.py b/benchmark/agbenchmark/config.py
new file mode 100644
index 000000000..a1002bf73
--- /dev/null
+++ b/benchmark/agbenchmark/config.py
@@ -0,0 +1,119 @@
+import json
+import sys
+from datetime import datetime
+from pathlib import Path
+from typing import Optional
+
+from pydantic import BaseSettings
+
+
+def _calculate_info_test_path(base_path: Path, benchmark_start_time: datetime) -> Path:
+ """
+ Calculates the path to the directory where the test report will be saved.
+ """
+ # Ensure the reports path exists
+ base_path.mkdir(parents=True, exist_ok=True)
+
+ # Get current UTC date-time stamp
+ date_stamp = benchmark_start_time.strftime("%Y%m%dT%H%M%S")
+
+ # Default run name
+ run_name = "full_run"
+
+ # Map command-line arguments to their respective labels
+ arg_labels = {
+ "--test": None,
+ "--category": None,
+ "--maintain": "maintain",
+ "--improve": "improve",
+ "--explore": "explore",
+ }
+
+ # Identify the relevant command-line argument
+ for arg, label in arg_labels.items():
+ if arg in sys.argv:
+ test_arg = sys.argv[sys.argv.index(arg) + 1] if label is None else None
+ run_name = arg.strip("--")
+ if test_arg:
+ run_name = f"{run_name}_{test_arg}"
+ break
+
+ # Create the full new directory path with ISO standard UTC date-time stamp
+ report_path = base_path / f"{date_stamp}_{run_name}"
+
+ # Ensure the new directory is created
+ # FIXME: this is not a desirable side-effect of loading the config
+ report_path.mkdir(exist_ok=True)
+
+ return report_path
+
+
+class AgentBenchmarkConfig(BaseSettings, extra="allow"):
+ """
+ Configuration model and loader for the AGBenchmark.
+
+ Projects that want to use AGBenchmark should contain an agbenchmark_config folder
+ with a config.json file that - at minimum - specifies the `host` at which the
+ subject application exposes an Agent Protocol compliant API.
+ """
+
+ agbenchmark_config_dir: Path
+ """Path to the agbenchmark_config folder of the subject agent application."""
+
+ categories: list[str] | None = None
+ """Categories to benchmark the agent for. If omitted, all categories are assumed."""
+
+ host: str
+ """Host (scheme://address:port) of the subject agent application."""
+
+ @classmethod
+ def load(cls, config_dir: Optional[Path] = None) -> "AgentBenchmarkConfig":
+ config_dir = config_dir or cls.find_config_folder()
+ with (config_dir / "config.json").open("r") as f:
+ return cls(
+ agbenchmark_config_dir=config_dir,
+ **json.load(f),
+ )
+
+ @staticmethod
+ def find_config_folder(for_dir: Path = Path.cwd()) -> Path:
+ """
+ Find the closest ancestor folder containing an agbenchmark_config folder,
+ and returns the path of that agbenchmark_config folder.
+ """
+ current_directory = for_dir
+ while current_directory != Path("/"):
+ if (path := current_directory / "agbenchmark_config").exists():
+ if (path / "config.json").is_file():
+ return path
+ current_directory = current_directory.parent
+ raise FileNotFoundError(
+ "No 'agbenchmark_config' directory found in the path hierarchy."
+ )
+
+ @property
+ def config_file(self) -> Path:
+ return self.agbenchmark_config_dir / "config.json"
+
+ @property
+ def reports_folder(self) -> Path:
+ return self.agbenchmark_config_dir / "reports"
+
+ def get_report_dir(self, benchmark_start_time: datetime) -> Path:
+ return _calculate_info_test_path(self.reports_folder, benchmark_start_time)
+
+ @property
+ def regression_tests_file(self) -> Path:
+ return self.reports_folder / "regression_tests.json"
+
+ @property
+ def success_rate_file(self) -> Path:
+ return self.reports_folder / "success_rate.json"
+
+ @property
+ def challenges_already_beaten_file(self) -> Path:
+ return self.agbenchmark_config_dir / "challenges_already_beaten.json"
+
+ @property
+ def temp_folder(self) -> Path:
+ return self.agbenchmark_config_dir / "temp_folder"
diff --git a/benchmark/agbenchmark/conftest.py b/benchmark/agbenchmark/conftest.py
index 71db82e01..e54746e56 100644
--- a/benchmark/agbenchmark/conftest.py
+++ b/benchmark/agbenchmark/conftest.py
@@ -1,167 +1,127 @@
import contextlib
import json
+import logging
import os
import shutil
-import sys
import threading
import time
-from pathlib import Path # noqa
+from pathlib import Path
from typing import Any, Generator
import pytest
-from agbenchmark.__main__ import TEMP_FOLDER_ABS_PATH
+from agbenchmark.config import AgentBenchmarkConfig
from agbenchmark.reports.reports import (
finalize_reports,
generate_single_call_report,
session_finish,
)
-from agbenchmark.utils.data_types import AgentBenchmarkConfig
+from agbenchmark.utils.challenge import Challenge
+from agbenchmark.utils.data_types import Category
GLOBAL_TIMEOUT = (
1500 # The tests will stop after 25 minutes so we can send the reports.
)
+agbenchmark_config = AgentBenchmarkConfig.load()
+logger = logging.getLogger(__name__)
+
pytest_plugins = ["agbenchmark.utils.dependencies"]
collect_ignore = ["challenges"]
suite_reports: dict[str, list] = {}
-def load_config_from_request(request: Any) -> AgentBenchmarkConfig:
- """
- This function loads the configuration for the agent benchmark from a given request.
-
- Args:
- request (Any): The request object from which the agent benchmark configuration is to be loaded.
-
- Returns:
- AgentBenchmarkConfig: The loaded agent benchmark configuration.
-
- Raises:
- json.JSONDecodeError: If the benchmark configuration file is not a valid JSON file.
- """
- agent_benchmark_config_path = Path.cwd() / "agbenchmark_config" / "config.json"
- try:
- with open(agent_benchmark_config_path, "r") as f:
- agent_benchmark_config = AgentBenchmarkConfig(**json.load(f))
- agent_benchmark_config.agent_benchmark_config_path = (
- agent_benchmark_config_path
- )
- return agent_benchmark_config
- except json.JSONDecodeError:
- print("Error: benchmark_config.json is not a valid JSON file.")
- raise
-
-
@pytest.fixture(scope="module")
-def config(request: Any) -> Any:
- """
- This pytest fixture is responsible for loading the agent benchmark configuration from a given request.
- This fixture is scoped to the module level, meaning it's invoked once per test module.
-
- Args:
- request (Any): The request object from which the agent benchmark configuration is to be loaded.
-
- Returns:
- Any: The loaded configuration dictionary.
-
- Raises:
- json.JSONDecodeError: If the benchmark configuration file is not a valid JSON file.
- """
- config = {}
- agent_benchmark_config_path = Path.cwd() / "agbenchmark_config" / "config.json"
- try:
- with open(agent_benchmark_config_path, "r") as f:
- agent_benchmark_config = AgentBenchmarkConfig(**json.load(f))
- agent_benchmark_config.agent_benchmark_config_path = (
- agent_benchmark_config_path
- )
- except json.JSONDecodeError:
- print("Error: benchmark_config.json is not a valid JSON file.")
- raise
-
- config["AgentBenchmarkConfig"] = agent_benchmark_config
-
- return config
+def config() -> AgentBenchmarkConfig:
+ return agbenchmark_config
@pytest.fixture(autouse=True)
-def temp_folder() -> Generator[str, None, None]:
+def temp_folder() -> Generator[Path, None, None]:
"""
- This pytest fixture is responsible for setting up and tearing down the temporary folder for each test.
+ Pytest fixture that sets up and tears down the temporary folder for each test.
It is automatically used in every test due to the 'autouse=True' parameter.
- It is used in order to let agbenchmark store files so they can then be evaluated.
"""
# create output directory if it doesn't exist
- if not os.path.exists(TEMP_FOLDER_ABS_PATH):
- os.makedirs(TEMP_FOLDER_ABS_PATH, exist_ok=True)
+ if not os.path.exists(agbenchmark_config.temp_folder):
+ os.makedirs(agbenchmark_config.temp_folder, exist_ok=True)
- yield
+ yield agbenchmark_config.temp_folder
# teardown after test function completes
if not os.getenv("KEEP_TEMP_FOLDER_FILES"):
- for filename in os.listdir(TEMP_FOLDER_ABS_PATH):
- file_path = os.path.join(TEMP_FOLDER_ABS_PATH, filename)
+ for filename in os.listdir(agbenchmark_config.temp_folder):
+ file_path = os.path.join(agbenchmark_config.temp_folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
- print(f"Failed to delete {file_path}. Reason: {e}")
+ logger.warning(f"Failed to delete {file_path}. Reason: {e}")
-def pytest_addoption(parser: Any) -> None:
+def pytest_addoption(parser: pytest.Parser) -> None:
"""
- This function is a pytest hook that is called to add command-line options.
- It is used to add custom command-line options that are specific to the agent benchmark tests.
- These options can be used to control the behavior of the tests.
- The "--mock" option is used to run the tests in mock mode.
- The "--host" option is used to specify the host for the tests.
- The "--category" option is used to run only tests of a specific category.
- The "--nc" option is used to run the tests without caching.
- The "--cutoff" option is used to specify a cutoff time for the tests.
- The "--improve" option is used to run only the tests that are marked for improvement.
- The "--maintain" option is used to run only the tests that are marked for maintenance.
- The "--explore" option is used to run the tests in exploration mode.
- The "--test" option is used to run a specific test.
- The "--no_dep" option is used to run the tests without dependencies.
- The "--keep_answers" option is used to keep the answers of the tests.
+ Pytest hook that adds command-line options to the `pytest` command.
+ The added options are specific to agbenchmark and control its behavior:
+ * `--mock` is used to run the tests in mock mode.
+ * `--host` is used to specify the host for the tests.
+ * `--category` is used to run only tests of a specific category.
+ * `--nc` is used to run the tests without caching.
+ * `--cutoff` is used to specify a cutoff time for the tests.
+ * `--improve` is used to run only the tests that are marked for improvement.
+ * `--maintain` is used to run only the tests that are marked for maintenance.
+ * `--explore` is used to run the tests in exploration mode.
+ * `--test` is used to run a specific test.
+ * `--no-dep` is used to run the tests without dependencies.
+ * `--keep-answers` is used to keep the answers of the tests.
Args:
- parser (Any): The parser object to which the command-line options are added.
+ parser: The Pytest CLI parser to which the command-line options are added.
"""
- parser.addoption("--no_dep", action="store_true", default=False)
- parser.addoption("--mock", action="store_true", default=False)
- parser.addoption("--host", action="store_true", default=None)
- parser.addoption("--nc", action="store_true", default=False)
- parser.addoption("--cutoff", action="store_true", default=False)
- parser.addoption("--category", action="store_true", default=False)
- parser.addoption("--test", action="store_true", default=None)
- parser.addoption("--improve", action="store_true", default=False)
- parser.addoption("--maintain", action="store_true", default=False)
- parser.addoption("--explore", action="store_true", default=False)
- parser.addoption("--keep-answers", action="store_true", default=False)
+ parser.addoption("--no-dep", action="store_true")
+ parser.addoption("--mock", action="store_true")
+ parser.addoption("--host", default=None)
+ parser.addoption("--nc", action="store_true")
+ parser.addoption("--cutoff", action="store")
+ parser.addoption("--category", action="append")
+ parser.addoption("--test", action="append")
+ parser.addoption("--improve", action="store_true")
+ parser.addoption("--maintain", action="store_true")
+ parser.addoption("--explore", action="store_true")
+ parser.addoption("--keep-answers", action="store_true")
+
+
+def pytest_configure(config: pytest.Config) -> None:
+ # Register category markers to prevent "unknown marker" warnings
+ for category in Category:
+ config.addinivalue_line("markers", f"{category.value}: {category}")
@pytest.fixture(autouse=True)
-def check_regression(request: Any) -> None:
+def check_regression(request: pytest.FixtureRequest) -> None:
"""
- This pytest fixture is responsible for checking if a test is a regression test.
- It is automatically used in every test due to the 'autouse=True' parameter.
- The test name and the agent benchmark configuration are retrieved from the request object.
- The regression reports are loaded from the path specified in the agent benchmark configuration.
- If the "--improve" option is used and the test name exists in the regression tests, the test is skipped.
- If the "--maintain" option is used and the test name does not exist in the regression tests, the test is also skipped.
+ Fixture that checks for every test if it should be treated as a regression test,
+ and whether to skip it based on that.
+
+ The test name is retrieved from the `request` object. Regression reports are loaded
+ from the path specified in the benchmark configuration.
+
+ Effect:
+ * If the `--improve` option is used and the current test is considered a regression
+ test, it is skipped.
+ * If the `--maintain` option is used and the current test is not considered a
+ regression test, it is also skipped.
Args:
- request (Any): The request object from which the test name and the agent benchmark configuration are retrieved.
+ request: The request object from which the test name and the benchmark
+ configuration are retrieved.
"""
test_name = request.node.parent.name
- agent_benchmark_config = load_config_from_request(request)
- with contextlib.suppress(Exception):
- test = agent_benchmark_config.get_regression_reports_path()
- data = json.loads(test)
+ with contextlib.suppress(FileNotFoundError):
+ regression_report = agbenchmark_config.regression_tests_file
+ data = json.loads(regression_report.read_bytes())
challenge_location = getattr(request.node.parent.cls, "CHALLENGE_LOCATION", "")
skip_string = f"Skipping {test_name} at {challenge_location}"
@@ -173,55 +133,33 @@ def check_regression(request: Any) -> None:
pytest.skip(f"{skip_string} because it's not a regression test")
-# this is to get the challenge_data from every test
-@pytest.fixture(autouse=True)
-def challenge_data(request: Any) -> None:
- """
- This pytest fixture is responsible for providing the challenge data for each test.
- It is automatically used in every test due to the 'autouse=True' parameter.
- The challenge data is retrieved from the request object's parameters.
- This fixture is essential for the pytest system as it provides the necessary data for each test.
-
- Args:
- request (Any): The request object from which the challenge data is retrieved.
-
- Returns:
- None: The challenge data is directly passed to the test function and does not need to be returned.
- """
- return request.param
-
-
@pytest.fixture(autouse=True, scope="session")
-def mock(request: Any) -> None:
+def mock(request: pytest.FixtureRequest) -> bool:
"""
- This pytest fixture is responsible for retrieving the value of the "--mock" command-line option.
- It is automatically used in every test session due to the 'autouse=True' parameter and 'session' scope.
- The "--mock" option is used to run the tests in mock mode.
- This fixture is essential for the pytest system as it provides the necessary command-line option value for each test session.
+ Pytest fixture that retrieves the value of the `--mock` command-line option.
+ The `--mock` option is used to run the tests in mock mode.
Args:
- request (Any): The request object from which the "--mock" option value is retrieved.
+ request: The `pytest.FixtureRequest` from which the `--mock` option value
+ is retrieved.
Returns:
- None: The "--mock" option value is directly passed to the test session and does not need to be returned.
+ bool: Whether `--mock` is set for this session.
"""
return request.config.getoption("--mock")
@pytest.fixture(autouse=True, scope="function")
-def timer(request: Any) -> Any:
+def timer(request: pytest.FixtureRequest) -> Generator[None, None, None]:
"""
- This pytest fixture is responsible for timing the execution of each test.
- It is automatically used in every test due to the 'autouse=True' parameter and 'function' scope.
+ Pytest fixture that times the execution of each test.
At the start of each test, it records the current time.
- After the test function completes, it calculates the run time and appends it to the test node's user properties.
- This allows the run time of each test to be accessed later for reporting or analysis.
+ After the test function completes, it calculates the run time and adds it to
+ the test node's `user_properties`.
Args:
- request (Any): The request object from which the test node is retrieved.
-
- Yields:
- None: Control is yielded back to the test function.
+ request: The `pytest.FixtureRequest` object through which the run time is stored
+ in the test node's `user_properties`.
"""
start_time = time.time()
yield
@@ -229,33 +167,21 @@ def timer(request: Any) -> Any:
request.node.user_properties.append(("run_time", run_time))
-def pytest_runtest_makereport(item: Any, call: Any) -> None:
+def pytest_runtest_makereport(item: pytest.Item, call: pytest.CallInfo) -> None:
"""
- This function is a pytest hook that is called when a test report is being generated.
+ Pytest hook that is called when a test report is being generated.
It is used to generate and finalize reports for each test.
Args:
- item (Any): The test item for which the report is being generated.
- call (Any): The call object from which the test result is retrieved.
+ item: The test item for which the report is being generated.
+ call: The call object from which the test result is retrieved.
"""
- challenge_data = item.funcargs.get("challenge_data", None)
-
- if not challenge_data:
- # this will only happen for dummy dependency setup tests
- return
-
- challenge_location: str = getattr(item.cls, "CHALLENGE_LOCATION", "")
-
- flags = (
- "--test" in sys.argv
- or "--maintain" in sys.argv
- or "--improve" in sys.argv
- or "--explore" in sys.argv
- )
+ challenge: type[Challenge] = item.cls # type: ignore
+ challenge_data = challenge.data
+ challenge_location = challenge.CHALLENGE_LOCATION
if call.when == "call":
answers = getattr(item, "answers", None)
- challenge_location: str = getattr(item.cls, "CHALLENGE_LOCATION", "")
test_name = item.nodeid.split("::")[1]
item.test_name = test_name
@@ -264,14 +190,14 @@ def pytest_runtest_makereport(item: Any, call: Any) -> None:
)
if call.when == "teardown":
- finalize_reports(item, challenge_data)
+ finalize_reports(agbenchmark_config, item, challenge_data)
def timeout_monitor(start_time: int) -> None:
"""
- This function is responsible for monitoring the total execution time of the test suite.
- It runs in a separate thread and checks every second if the total execution time has exceeded the global timeout.
- If the global timeout is exceeded, it terminates the pytest session with a specific return code.
+ Function that limits the total execution time of the test suite.
+ This function is supposed to be run in a separate thread and calls `pytest.exit`
+ if the total execution time has exceeded the global timeout.
Args:
start_time (int): The start time of the test suite.
@@ -282,14 +208,11 @@ def timeout_monitor(start_time: int) -> None:
pytest.exit("Test suite exceeded the global timeout", returncode=1)
-def pytest_sessionstart(session: Any) -> None:
+def pytest_sessionstart(session: pytest.Session) -> None:
"""
- This function is a pytest hook that is called at the start of the test session.
- It starts the timeout monitor in a separate thread.
- The timeout monitor checks if the total execution time of the test suite has exceeded the global timeout.
+ Pytest hook that is called at the start of a test session.
- Args:
- session (Any): The pytest session object.
+ Sets up and runs a `timeout_monitor` in a separate thread.
"""
start_time = time.time()
t = threading.Thread(target=timeout_monitor, args=(start_time,))
@@ -297,94 +220,125 @@ def pytest_sessionstart(session: Any) -> None:
t.start()
-def pytest_sessionfinish(session: Any) -> None:
+def pytest_sessionfinish(session: pytest.Session) -> None:
"""
- This function is a pytest hook that is called at the end of the test session.
- It is used to finalize and save the test reports.
- The reports are saved in a specific location defined in the suite reports.
+ Pytest hook that is called at the end of a test session.
- Args:
- session (Any): The pytest session object.
+ Finalizes and saves the test reports.
"""
- session_finish(suite_reports)
+ session_finish(agbenchmark_config, suite_reports)
@pytest.fixture
-def scores(request: Any) -> None:
+def scores(request: pytest.FixtureRequest) -> None:
"""
- This pytest fixture is responsible for retrieving the scores of the test class.
- The scores are retrieved from the test class's 'scores' attribute using the test class name.
- This fixture is essential for the pytest system as it provides the necessary scores for each test.
+ Pytest fixture that retrieves the scores of the test class.
+ The scores are retrieved from the `Challenge.scores` attribute
+ using the test class name.
Args:
- request (Any): The request object from which the test class is retrieved.
-
- Returns:
- None: The scores are directly passed to the test function and do not need to be returned.
+ request: The request object.
"""
- test_class_name = request.node.cls.__name__
- return request.node.cls.scores.get(test_class_name)
+ challenge: type[Challenge] = request.node.cls
+ return challenge.scores.get(challenge.__name__)
-# this is adding the dependency marker and category markers automatically from the json
-def pytest_collection_modifyitems(items: Any, config: Any) -> None:
+def pytest_collection_modifyitems(
+ items: list[pytest.Item], config: pytest.Config
+) -> None:
"""
- This function is a pytest hook that is called after the test collection has been performed.
- It is used to modify the collected test items based on the agent benchmark configuration.
- The function loads the agent benchmark configuration from the specified path and retrieves the regression reports.
- For each test item, it checks if the test method exists and retrieves the dependencies and categories from the test class instance.
- If the "--improve" or "--category" options are used, the dependencies are filtered based on the regression data.
- If the "--test", "--no_dep", or "--maintain" options are used, the dependencies are cleared.
- The function then dynamically adds the 'depends' and 'category' markers to the test item.
- This function is essential for the pytest system as it provides the necessary modification of the test items based on the agent benchmark configuration.
+ Pytest hook that is called after initial test collection has been performed.
+ Modifies the collected test items based on the agent benchmark configuration,
+ adding the dependency marker and category markers.
Args:
- items (Any): The collected test items to be modified.
- config (Any): The pytest configuration object from which the agent benchmark configuration path is retrieved.
+ items: The collected test items to be modified.
+ config: The active pytest configuration.
"""
- agent_benchmark_config_path = str(Path.cwd() / "agbenchmark_config" / "config.json")
- try:
- with open(agent_benchmark_config_path) as f:
- agent_benchmark_config = AgentBenchmarkConfig(**json.load(f))
- except json.JSONDecodeError:
- print("Error: benchmark_config.json is not a valid JSON file.")
- raise
-
- regression_file = agent_benchmark_config.get_regression_reports_path()
- data = (
- json.loads(open(regression_file, "r").read())
- if os.path.exists(regression_file)
- else {}
+ regression_file = agbenchmark_config.regression_tests_file
+ regression_tests: dict[str, Any] = (
+ json.loads(regression_file.read_bytes()) if regression_file.is_file() else {}
)
- for item in items:
- # Assuming item.cls is your test class
- test_class_instance = item.cls()
+ try:
+ challenges_beaten_in_the_past = json.loads(
+ agbenchmark_config.challenges_already_beaten_file.read_bytes()
+ )
+ except FileNotFoundError:
+ challenges_beaten_in_the_past = {}
+
+ selected_tests: tuple[str] = config.getoption("--test") # type: ignore
+ selected_categories: tuple[str] = config.getoption("--category") # type: ignore
+
+ # Can't use a for-loop to remove items in-place
+ i = 0
+ while i < len(items):
+ item = items[i]
+ challenge = item.cls
+ challenge_name = item.cls.__name__
+
+ if not issubclass(challenge, Challenge):
+ item.warn(
+ pytest.PytestCollectionWarning(
+ f"Non-challenge item collected: {challenge}"
+ )
+ )
+ i += 1
+ continue
+
+ # --test: remove the test from the set if it's not specifically selected
+ if selected_tests and challenge.data.name not in selected_tests:
+ items.remove(item)
+ continue
- if "test_method" not in item.name:
+ # Filter challenges for --maintain, --improve, and --explore:
+ # --maintain -> only challenges expected to be passed (= regression tests)
+ # --improve -> only challenges that so far are not passed (reliably)
+ # --explore -> only challenges that have never been passed
+ is_regression_test = regression_tests.get(challenge.data.name, None)
+ has_been_passed = challenges_beaten_in_the_past.get(challenge.data.name, False)
+ if (
+ (config.getoption("--maintain") and not is_regression_test)
+ or (config.getoption("--improve") and is_regression_test)
+ or (config.getoption("--explore") and has_been_passed)
+ ):
+ items.remove(item)
continue
- # Then you can access your properties
- name = item.parent.cls.__name__
- # dependencies = test_class_instance.data.dependencies
-
- # Filter dependencies if they exist in regression data if its an improvement test
- # if config.getoption("--improve") or config.getoption(
- # "--category"
- # ):
- # dependencies = [dep for dep in dependencies if not data.get(dep, None)]
- # if (
- # config.getoption("--test")
- # or config.getoption("--no_dep")
- # or config.getoption("--maintain")
- # ):
- dependencies = test_class_instance.dependencies
-
- # Add depends marker dynamically
- item.add_marker(pytest.mark.depends(on=dependencies, name=name))
-
- categories = test_class_instance.data.category
-
- # Add category marker dynamically
- for category in categories:
- item.add_marker(getattr(pytest.mark, category))
+ dependencies = challenge.data.dependencies
+ if (
+ config.getoption("--test")
+ or config.getoption("--no-dep")
+ or config.getoption("--maintain")
+ ):
+ # Ignore dependencies:
+ # --test -> user selected specific tests to run, don't care about deps
+ # --no-dep -> ignore dependency relations regardless of test selection
+ # --maintain -> all "regression" tests must pass, so run all of them
+ dependencies = []
+ elif config.getoption("--improve"):
+ # Filter dependencies, keep only deps that are not "regression" tests
+ dependencies = [
+ d for d in dependencies if not regression_tests.get(d, None)
+ ]
+
+ # Set category markers
+ challenge_categories = [c.value for c in challenge.data.category]
+ for category in challenge_categories:
+ item.add_marker(category)
+
+ # Enforce category selection
+ if selected_categories:
+ if not set(challenge_categories).intersection(set(selected_categories)):
+ items.remove(item)
+ continue
+ # # Filter dependencies, keep only deps from selected categories
+ # dependencies = [
+ # d for d in dependencies
+ # if not set(d.categories).intersection(set(selected_categories))
+ # ]
+
+ # Add marker for the DependencyManager
+ item.add_marker(pytest.mark.depends(on=dependencies, name=challenge_name))
+
+ i += 1
diff --git a/benchmark/agbenchmark/execute_sub_process.py b/benchmark/agbenchmark/execute_sub_process.py
deleted file mode 100644
index b981e6be5..000000000
--- a/benchmark/agbenchmark/execute_sub_process.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import platform
-import queue
-import select
-import subprocess
-import time
-from threading import Thread
-from typing import Any
-
-import psutil
-
-
-def run_linux_env(process: Any, start_time: float, timeout: float) -> None:
- while True:
- try:
- # This checks if there's data to be read from stdout without blocking.
- if process.stdout and select.select([process.stdout], [], [], 0)[0]:
- output = process.stdout.readline()
- print(output.strip())
- except Exception as e:
- continue
-
- # Check if process has ended, has no more output, or exceeded timeout
- if process.poll() is not None or (time.time() - start_time > timeout):
- break
-
- if time.time() - start_time > timeout:
- print("The Python function has exceeded the time limit and was terminated.")
- parent = psutil.Process(process.pid)
- for child in parent.children(recursive=True):
- child.kill()
- parent.kill()
-
- else:
- print("The Python function has finished running.")
-
-
-def enqueue_output(out: Any, my_queue: Any) -> None:
- for line in iter(out.readline, b""):
- my_queue.put(line)
- out.close()
-
-
-def run_windows_env(process: Any, start_time: float, timeout: float) -> None:
- my_queue: Any = queue.Queue()
- thread = Thread(target=enqueue_output, args=(process.stdout, my_queue))
- thread.daemon = True
- thread.start()
-
- while True:
- try:
- output = my_queue.get_nowait().strip()
- print(output)
- except queue.Empty:
- pass
-
- if process.poll() is not None or (time.time() - start_time > timeout):
- break
-
- if time.time() - start_time > timeout:
- print("The Python function has exceeded the time limit and was terminated.")
- process.terminate()
-
-
-def execute_subprocess(command, timeout):
- process = subprocess.Popen(
- command,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- universal_newlines=True,
- bufsize=1,
- )
- start_time = time.time()
- if platform.system() == "Windows":
- run_windows_env(process, start_time, timeout)
- else:
- run_linux_env(process, start_time, timeout)
- process.wait()
- if process.returncode != 0:
- print(f"The agent timed out")
diff --git a/benchmark/agbenchmark/generate_test.py b/benchmark/agbenchmark/generate_test.py
index 363d53697..82f9a09e1 100644
--- a/benchmark/agbenchmark/generate_test.py
+++ b/benchmark/agbenchmark/generate_test.py
@@ -1,147 +1,34 @@
import glob
import importlib
-import json
+import logging
import os
-import sys
-import types
from collections import deque
from pathlib import Path
-from typing import Any, Dict, Optional, Union
-import pytest
-
-from agbenchmark.__main__ import CHALLENGES_ALREADY_BEATEN
-from agbenchmark.agent_api_interface import append_updates_file
-from agbenchmark.agent_protocol_client.models.step import Step
from agbenchmark.utils.challenge import Challenge
-from agbenchmark.utils.data_types import AgentBenchmarkConfig, ChallengeData
+from agbenchmark.utils.data_types import ChallengeData
DATA_CATEGORY = {}
+logger = logging.getLogger(__name__)
-def create_single_test(
- data: Dict[str, Any] | ChallengeData,
- challenge_location: str,
- file_datum: Optional[list[dict[str, Any]]] = None,
-) -> None:
- challenge_data = None
- artifacts_location = None
- if isinstance(data, ChallengeData):
- challenge_data = data
- data = data.get_data()
-
- DATA_CATEGORY[data["name"]] = data["category"][0]
-
- # Define test class dynamically
- challenge_class = types.new_class(f"Test{data['name']}", (Challenge,))
- print(challenge_location)
- # clean_challenge_location = get_test_path(challenge_location)
- setattr(challenge_class, "CHALLENGE_LOCATION", challenge_location)
-
- setattr(
- challenge_class,
- "ARTIFACTS_LOCATION",
- artifacts_location or str(Path(challenge_location).resolve().parent),
- )
-
- # Define test method within the dynamically created class
- @pytest.mark.asyncio
- async def test_method(self, config: Dict[str, Any], request) -> None: # type: ignore
- # create a random number between 0 and 1
- test_name = self.data.name
-
- try:
- with open(CHALLENGES_ALREADY_BEATEN, "r") as f:
- challenges_beaten_in_the_past = json.load(f)
- except:
- challenges_beaten_in_the_past = {}
-
- if request.config.getoption("--explore") and challenges_beaten_in_the_past.get(
- test_name, False
- ):
- return None
-
- # skip optional categories
- self.skip_optional_categories(config)
-
- from helicone.lock import HeliconeLockManager
-
- if os.environ.get("HELICONE_API_KEY"):
- HeliconeLockManager.write_custom_property("challenge", self.data.name)
- cutoff = self.data.cutoff or 60
+def create_challenge_from_spec_file(spec_file: Path) -> type[Challenge]:
+ challenge = Challenge.from_challenge_spec(spec_file)
+ DATA_CATEGORY[challenge.data.name] = challenge.data.category[0].value
+ return challenge
- timeout = cutoff
- if "--nc" in sys.argv:
- timeout = 100000
- if "--cutoff" in sys.argv:
- timeout = int(sys.argv[sys.argv.index("--cutoff") + 1])
- await self.setup_challenge(config, timeout)
+def create_challenge_from_spec_file_path(spec_file_path: str) -> type[Challenge]:
+ spec_file = Path(spec_file_path).resolve()
+ return create_challenge_from_spec_file(spec_file)
- scores = self.get_scores(config)
- request.node.answers = (
- scores["answers"] if "--keep-answers" in sys.argv else None
- )
- del scores["answers"] # remove answers from scores
- request.node.scores = scores # store scores in request.node
- is_score_100 = 1 in scores["values"]
-
- evaluation = "Correct!" if is_score_100 else "Incorrect."
- eval_step = Step(
- input=evaluation,
- additional_input=None,
- task_id="irrelevant, this step is a hack",
- step_id="irrelevant, this step is a hack",
- name="",
- status="created",
- output=None,
- additional_output=None,
- artifacts=[],
- is_last=True,
- )
- await append_updates_file(eval_step)
-
- assert is_score_100
-
- # Parametrize the method here
- test_method = pytest.mark.parametrize(
- "challenge_data",
- [data],
- indirect=True,
- )(test_method)
-
- setattr(challenge_class, "test_method", test_method)
-
- # Attach the new class to a module so it can be discovered by pytest
- module = importlib.import_module(__name__)
- setattr(module, f"Test{data['name']}", challenge_class)
- return challenge_class
-
-def create_single_suite_challenge(challenge_data: ChallengeData, path: Path) -> None:
- create_single_test(challenge_data, str(path))
-
-
-def create_challenge(
- data: Dict[str, Any],
- json_file: str,
- json_files: deque,
-) -> Union[deque, Any]:
- path = Path(json_file).resolve()
- print("Creating challenge for", path)
-
- challenge_class = create_single_test(data, str(path))
- print("Creation complete for", path)
-
- return json_files, challenge_class
-
-
-def generate_tests() -> None: # sourcery skip: invert-any-all
- print("Generating tests...")
+def load_challenges() -> None:
+ logger.info("Loading challenges...")
challenges_path = os.path.join(os.path.dirname(__file__), "challenges")
- print(f"Looking for challenges in {challenges_path}...")
+ logger.debug(f"Looking for challenges in {challenges_path}...")
json_files = deque(
glob.glob(
@@ -150,74 +37,39 @@ def generate_tests() -> None: # sourcery skip: invert-any-all
)
)
- print(f"Found {len(json_files)} challenges.")
- print(f"Sample path: {json_files[0]}")
-
- agent_benchmark_config_path = str(Path.cwd() / "agbenchmark_config" / "config.json")
- try:
- with open(agent_benchmark_config_path, "r") as f:
- agent_benchmark_config = AgentBenchmarkConfig(**json.load(f))
- agent_benchmark_config.agent_benchmark_config_path = (
- agent_benchmark_config_path
- )
- except json.JSONDecodeError:
- print("Error: benchmark_config.json is not a valid JSON file.")
- raise
-
- regression_reports_path = agent_benchmark_config.get_regression_reports_path()
- if regression_reports_path and os.path.exists(regression_reports_path):
- with open(regression_reports_path, "r") as f:
- regression_tests = json.load(f)
- else:
- regression_tests = {}
+ logger.debug(f"Found {len(json_files)} challenges.")
+ logger.debug(f"Sample path: {json_files[0]}")
+ loaded, ignored = 0, 0
while json_files:
- json_file = (
- json_files.popleft()
- ) # Take and remove the first element from json_files
+ # Take and remove the first element from json_files
+ json_file = json_files.popleft()
if challenge_should_be_ignored(json_file):
+ ignored += 1
continue
- data = ChallengeData.get_json_from_path(json_file)
+ challenge_info = ChallengeData.parse_file(json_file)
- commands = sys.argv
- # --by flag
- if "--category" in commands:
- categories = data.get("category", [])
- commands_set = set(commands)
+ challenge_class = create_challenge_from_spec_file_path(json_file)
- # Convert the combined list to a set
- categories_set = set(categories)
+ logger.debug(f"Generated test for {challenge_info.name}")
+ _add_challenge_to_module(challenge_class)
+ loaded += 1
- # If there's no overlap with commands
- if not categories_set.intersection(commands_set):
- continue
+ logger.info(f"Loading challenges complete: loaded {loaded}, ignored {ignored}.")
- # --test flag, only run the test if it's the exact one specified
- tests = []
- for command in commands:
- if command.startswith("--test="):
- tests.append(command.split("=")[1])
- if tests and data["name"] not in tests:
- continue
-
- # --maintain and --improve flag
- in_regression = regression_tests.get(data["name"], None)
- improve_flag = in_regression and "--improve" in commands
- maintain_flag = not in_regression and "--maintain" in commands
- if "--maintain" in commands and maintain_flag:
- continue
- elif "--improve" in commands and improve_flag:
- continue
- json_files, challenge_class = create_challenge(data, json_file, json_files)
-
- print(f"Generated test for {data['name']}.")
- print("Test generation complete.")
+def challenge_should_be_ignored(json_file_path: str):
+ return (
+ "challenges/deprecated" in json_file_path
+ or "challenges/library" in json_file_path
+ )
-def challenge_should_be_ignored(json_file):
- return "challenges/deprecated" in json_file or "challenges/library" in json_file
+def _add_challenge_to_module(challenge: type[Challenge]):
+ # Attach the Challenge class to this module so it can be discovered by pytest
+ module = importlib.import_module(__name__)
+ setattr(module, f"{challenge.__name__}", challenge)
-generate_tests()
+load_challenges()
diff --git a/benchmark/agbenchmark/main.py b/benchmark/agbenchmark/main.py
new file mode 100644
index 000000000..234bd2bb7
--- /dev/null
+++ b/benchmark/agbenchmark/main.py
@@ -0,0 +1,153 @@
+import logging
+import os
+from pathlib import Path
+from typing import Optional, Sequence
+
+from dotenv import load_dotenv
+
+from agbenchmark.challenges import get_unique_categories
+from agbenchmark.config import AgentBenchmarkConfig
+
+load_dotenv()
+
+logger = logging.getLogger(__name__)
+
+
+def run_benchmark(
+ config: AgentBenchmarkConfig,
+ maintain: bool = False,
+ improve: bool = False,
+ explore: bool = False,
+ tests: tuple[str] = tuple(),
+ categories: tuple[str] = tuple(),
+ skip_categories: tuple[str] = tuple(),
+ mock: bool = False,
+ no_dep: bool = False,
+ no_cutoff: bool = False,
+ cutoff: Optional[int] = None,
+ keep_answers: bool = False,
+ server: bool = False,
+) -> int:
+ """
+ Starts the benchmark. If a category flag is provided, only challenges with the
+ corresponding mark will be run.
+ """
+ import pytest
+
+ from agbenchmark.reports.ReportManager import SingletonReportManager
+
+ validate_args(
+ maintain=maintain,
+ improve=improve,
+ explore=explore,
+ tests=tests,
+ categories=categories,
+ skip_categories=skip_categories,
+ no_cutoff=no_cutoff,
+ cutoff=cutoff,
+ )
+
+ SingletonReportManager()
+
+ for key, value in vars(config).items():
+ logger.debug(f"config.{key} = {repr(value)}")
+
+ pytest_args = ["-vs"]
+
+ if tests:
+ logger.info(f"Running specific test(s): {' '.join(tests)}")
+ pytest_args += [f"--test={t}" for t in tests]
+ else:
+ all_categories = get_unique_categories()
+
+ if categories or skip_categories:
+ categories_to_run = set(categories) or all_categories
+ if skip_categories:
+ categories_to_run = categories_to_run.difference(set(skip_categories))
+ assert categories_to_run, "Error: You can't skip all categories"
+ pytest_args += [f"--category={c}" for c in categories_to_run]
+ logger.info(f"Running tests of category: {categories_to_run}")
+ else:
+ logger.info("Running all categories")
+
+ if maintain:
+ logger.info("Running only regression tests")
+ elif improve:
+ logger.info("Running only non-regression tests")
+ elif explore:
+ logger.info("Only attempt challenges that have never been beaten")
+
+ if mock:
+ # TODO: unhack
+ os.environ[
+ "IS_MOCK"
+ ] = "True" # ugly hack to make the mock work when calling from API
+
+ # Pass through flags
+ for flag, active in {
+ "--maintain": maintain,
+ "--improve": improve,
+ "--explore": explore,
+ "--no-dep": no_dep,
+ "--mock": mock,
+ "--nc": no_cutoff,
+ "--keep-answers": keep_answers,
+ }.items():
+ if active:
+ pytest_args.append(flag)
+
+ if cutoff:
+ pytest_args.append(f"--cutoff={cutoff}")
+ logger.debug(f"Setting cuttoff override to {cutoff} seconds.")
+
+ current_dir = Path(__file__).resolve().parent
+ pytest_args.append(str(current_dir / "generate_test.py"))
+
+ pytest_args.append("--cache-clear")
+ exit_code = pytest.main(pytest_args)
+
+ SingletonReportManager.clear_instance()
+ return exit_code
+
+
+class InvalidInvocationError(ValueError):
+ pass
+
+
+def validate_args(
+ maintain: bool,
+ improve: bool,
+ explore: bool,
+ tests: Sequence[str],
+ categories: Sequence[str],
+ skip_categories: Sequence[str],
+ no_cutoff: bool,
+ cutoff: Optional[int],
+) -> None:
+ if categories:
+ all_categories = get_unique_categories()
+ invalid_categories = set(categories) - all_categories
+ if invalid_categories:
+ raise InvalidInvocationError(
+ "One or more invalid categories were specified: "
+ f"{', '.join(invalid_categories)}.\n"
+ f"Valid categories are: {', '.join(all_categories)}."
+ )
+
+ if (maintain + improve + explore) > 1:
+ raise InvalidInvocationError(
+ "You can't use --maintain, --improve or --explore at the same time. "
+ "Please choose one."
+ )
+
+ if tests and (categories or skip_categories or maintain or improve or explore):
+ raise InvalidInvocationError(
+ "If you're running a specific test make sure no other options are "
+ "selected. Please just pass the --test."
+ )
+
+ if no_cutoff and cutoff:
+ raise InvalidInvocationError(
+ "You can't use both --nc and --cutoff at the same time. "
+ "Please choose one."
+ )
diff --git a/benchmark/agbenchmark/reports/ReportManager.py b/benchmark/agbenchmark/reports/ReportManager.py
index 8e56682e5..eadb7c0e2 100644
--- a/benchmark/agbenchmark/reports/ReportManager.py
+++ b/benchmark/agbenchmark/reports/ReportManager.py
@@ -4,11 +4,12 @@ import os
import sys
import time
from datetime import datetime, timezone
+from pathlib import Path
+from agbenchmark.config import AgentBenchmarkConfig
from agbenchmark.reports.processing.graphs import save_single_radar_chart
from agbenchmark.reports.processing.process_report import get_agent_category
from agbenchmark.reports.processing.report_types import Report
-from agbenchmark.utils.data_types import AgentBenchmarkConfig
from agbenchmark.utils.utils import get_highest_success_difficulty
@@ -16,32 +17,26 @@ class SingletonReportManager:
instance = None
def __new__(cls):
- from agbenchmark.reports.agent_benchmark_config import (
- get_agent_benchmark_config,
- )
-
if not cls.instance:
cls.instance = super(SingletonReportManager, cls).__new__(cls)
- agent_benchmark_config = get_agent_benchmark_config()
+ agent_benchmark_config = AgentBenchmarkConfig.load()
benchmark_start_time_dt = datetime.now(
timezone.utc
) # or any logic to fetch the datetime
# Make the Managers class attributes
cls.REGRESSION_MANAGER = ReportManager(
- agent_benchmark_config.get_regression_reports_path(),
+ agent_benchmark_config.regression_tests_file,
benchmark_start_time_dt,
)
cls.INFO_MANAGER = ReportManager(
- str(
- agent_benchmark_config.get_reports_path(benchmark_start_time_dt)
- / "report.json"
- ),
+ agent_benchmark_config.get_report_dir(benchmark_start_time_dt)
+ / "report.json",
benchmark_start_time_dt,
)
cls.INTERNAL_INFO_MANAGER = ReportManager(
- agent_benchmark_config.get_success_rate_path(), benchmark_start_time_dt
+ agent_benchmark_config.success_rate_file, benchmark_start_time_dt
)
return cls.instance
@@ -57,21 +52,20 @@ class SingletonReportManager:
class ReportManager:
"""Abstracts interaction with the regression tests file"""
- def __init__(self, filename: str, benchmark_start_time: str):
- self.filename = filename
+ def __init__(self, report_file: Path, benchmark_start_time: datetime):
+ self.report_file = report_file
self.start_time = time.time()
self.benchmark_start_time = benchmark_start_time
self.load()
def load(self) -> None:
- if not os.path.exists(self.filename):
- os.makedirs(os.path.dirname(self.filename), exist_ok=True)
- with open(self.filename, "w") as f:
- pass
+ if not self.report_file.exists():
+ self.report_file.parent.mkdir(exist_ok=True)
+ self.report_file.touch()
try:
- with open(self.filename, "r") as f:
+ with self.report_file.open("r") as f:
file_content = (
f.read().strip()
) # read the content and remove any leading/trailing whitespace
@@ -87,7 +81,7 @@ class ReportManager:
self.save()
def save(self) -> None:
- with open(self.filename, "w") as f:
+ with self.report_file.open("w") as f:
json.dump(self.tests, f, indent=4)
def add_test(self, test_name: str, test_details: dict | list) -> None:
@@ -137,7 +131,7 @@ class ReportManager:
if len(agent_categories) > 1:
save_single_radar_chart(
agent_categories,
- config.get_reports_path(self.benchmark_start_time) / "radar_chart.png",
+ config.get_report_dir(self.benchmark_start_time) / "radar_chart.png",
)
self.save()
diff --git a/benchmark/agbenchmark/reports/agent_benchmark_config.py b/benchmark/agbenchmark/reports/agent_benchmark_config.py
deleted file mode 100644
index 3b45ed713..000000000
--- a/benchmark/agbenchmark/reports/agent_benchmark_config.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import json
-from pathlib import Path
-
-from agbenchmark.utils.data_types import AgentBenchmarkConfig
-
-
-def get_agent_benchmark_config() -> AgentBenchmarkConfig:
- agent_benchmark_config_path = str(Path.cwd() / "agbenchmark_config" / "config.json")
- try:
- with open(agent_benchmark_config_path, "r") as f:
- agent_benchmark_config = AgentBenchmarkConfig(**json.load(f))
- agent_benchmark_config.agent_benchmark_config_path = (
- agent_benchmark_config_path
- )
- return agent_benchmark_config
- except json.JSONDecodeError:
- print("Error: benchmark_config.json is not a valid JSON file.")
- raise
diff --git a/benchmark/agbenchmark/reports/processing/process_report.py b/benchmark/agbenchmark/reports/processing/process_report.py
index b390ba2f9..1f73ed3c0 100644
--- a/benchmark/agbenchmark/reports/processing/process_report.py
+++ b/benchmark/agbenchmark/reports/processing/process_report.py
@@ -1,4 +1,5 @@
import json
+import logging
import os
from pathlib import Path
from typing import Any
@@ -9,6 +10,8 @@ from agbenchmark.reports.processing.get_files import (
from agbenchmark.reports.processing.report_types import Report, Test
from agbenchmark.utils.data_types import STRING_DIFFICULTY_MAP
+logger = logging.getLogger(__name__)
+
def get_reports_data(report_path: str) -> dict[str, Any]:
latest_files = get_latest_report_from_agent_directories(report_path)
@@ -60,7 +63,7 @@ def all_agent_categories(reports_data: dict[str, Any]) -> dict[str, Any]:
for name, report in reports_data.items():
categories = get_agent_category(report)
if categories: # only add to all_categories if categories is not empty
- print(f"Adding {name}: {categories}")
+ logger.debug(f"Adding {name}: {categories}")
all_categories[name] = categories
return all_categories
diff --git a/benchmark/agbenchmark/reports/processing/report_types_v2.py b/benchmark/agbenchmark/reports/processing/report_types_v2.py
index 94b53ca97..34a679b6f 100644
--- a/benchmark/agbenchmark/reports/processing/report_types_v2.py
+++ b/benchmark/agbenchmark/reports/processing/report_types_v2.py
@@ -1,7 +1,6 @@
-from typing import Dict, List
+from pydantic import BaseModel, constr
datetime_format = r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+00:00$"
-from pydantic import BaseModel, constr
class BaseModelBenchmark(BaseModel):
@@ -14,32 +13,32 @@ class TaskInfo(BaseModelBenchmark):
is_regression: bool | None
answer: str
description: str
- category: List[str]
+ category: list[str]
task: str
class RepositoryInfo(BaseModelBenchmark):
- repo_url: str | None
- team_name: str | None
- benchmark_git_commit_sha: str | None
- agent_git_commit_sha: str | None
+ repo_url: str | None = None
+ team_name: str | None = None
+ agent_git_commit_sha: str | None = None
+ benchmark_git_commit_sha: str | None = None
class Metrics(BaseModelBenchmark):
- difficulty: str | None
+ cost: float | None = None
success: bool
- success_percentage: float | None
- run_time: str | None
- fail_reason: str | None
attempted: bool
- cost: float | None
+ difficulty: str | None = None
+ run_time: str | None = None
+ fail_reason: str | None = None
+ success_percentage: float | None = None
class RunDetails(BaseModelBenchmark):
test_name: str
- run_id: str | None
+ run_id: str | None = None
command: str
- completion_time: str | None
+ completion_time: str | None = None
benchmark_start_time: constr(regex=datetime_format)
@@ -48,5 +47,5 @@ class BenchmarkRun(BaseModelBenchmark):
run_details: RunDetails
task_info: TaskInfo
metrics: Metrics
- reached_cutoff: bool | None
- config: Dict[str, str | dict[str, str]]
+ reached_cutoff: bool | None = None
+ config: dict[str, str | dict[str, str]]
diff --git a/benchmark/agbenchmark/reports/reports.py b/benchmark/agbenchmark/reports/reports.py
index a1164bab7..684b715b7 100644
--- a/benchmark/agbenchmark/reports/reports.py
+++ b/benchmark/agbenchmark/reports/reports.py
@@ -1,20 +1,24 @@
import json
+import logging
import os
import sys
+from pathlib import Path
from typing import Any, Dict
-from agbenchmark.__main__ import CHALLENGES_ALREADY_BEATEN
-from agbenchmark.reports.agent_benchmark_config import get_agent_benchmark_config
+import pytest
+
+from agbenchmark.config import AgentBenchmarkConfig
from agbenchmark.reports.ReportManager import SingletonReportManager
-from agbenchmark.utils.data_types import DifficultyLevel
+from agbenchmark.utils.data_types import ChallengeData, DifficultyLevel
from agbenchmark.utils.get_data_from_helicone import get_data_from_helicone
from agbenchmark.utils.utils import calculate_success_percentage
+logger = logging.getLogger(__name__)
+
def get_previous_test_results(
test_name: str, info_details: dict[str, Any]
) -> list[bool]:
- agent_tests: dict[str, list[bool]] = {}
mock = os.getenv("IS_MOCK") # Check if --mock is in sys.argv
prev_test_results = SingletonReportManager().INTERNAL_INFO_MANAGER.tests.get(
@@ -49,17 +53,14 @@ def update_regression_tests(
def generate_single_call_report(
- item: Any,
- call: Any,
- challenge_data: dict[str, Any],
+ item: pytest.Item,
+ call: pytest.CallInfo,
+ challenge_data: ChallengeData,
answers: dict[str, Any],
- challenge_location,
- test_name,
+ challenge_location: str,
+ test_name: str,
) -> None:
- try:
- difficulty = challenge_data["info"]["difficulty"]
- except KeyError:
- return None
+ difficulty = challenge_data.info.difficulty
if isinstance(difficulty, DifficultyLevel):
difficulty = difficulty.value
@@ -77,10 +78,10 @@ def generate_single_call_report(
info_details: Any = {
"data_path": challenge_location,
"is_regression": False,
- "category": challenge_data["category"],
- "task": challenge_data["task"],
- "answer": challenge_data["ground"]["answer"],
- "description": challenge_data["info"]["description"],
+ "category": challenge_data.category,
+ "task": challenge_data.task,
+ "answer": challenge_data.ground.answer,
+ "description": challenge_data.info.description,
"metrics": {
"difficulty": difficulty,
"success": False,
@@ -91,8 +92,8 @@ def generate_single_call_report(
if answers:
info_details["answers"] = answers
- if "metadata" in challenge_data:
- info_details["metadata"] = challenge_data["metadata"]
+ if challenge_data.metadata:
+ info_details["metadata"] = challenge_data.metadata
mock = os.getenv("IS_MOCK") # Check if --mock is in sys.argv
if call:
@@ -116,7 +117,9 @@ def generate_single_call_report(
return info_details
-def finalize_reports(item: Any, challenge_data: dict[str, Any]) -> None:
+def finalize_reports(
+ config: AgentBenchmarkConfig, item: pytest.Item, challenge_data: ChallengeData
+) -> None:
run_time = dict(item.user_properties).get("run_time")
info_details = getattr(item, "info_details", {})
@@ -126,8 +129,9 @@ def finalize_reports(item: Any, challenge_data: dict[str, Any]) -> None:
if run_time is not None:
cost = None
if "--mock" not in sys.argv and os.environ.get("HELICONE_API_KEY"):
- print("Getting cost from Helicone")
+ logger.debug("Getting cost from Helicone")
cost = get_data_from_helicone(test_name)
+ logger.debug(f"Cost: {cost}")
info_details["metrics"]["cost"] = cost
@@ -142,29 +146,33 @@ def finalize_reports(item: Any, challenge_data: dict[str, Any]) -> None:
info_details["metrics"]["run_time"] = f"{str(round(run_time, 3))} seconds"
- info_details["reached_cutoff"] = float(run_time) > challenge_data["cutoff"]
+ info_details["reached_cutoff"] = float(run_time) > challenge_data.cutoff
if "--mock" not in sys.argv:
- update_challenges_already_beaten(info_details, test_name)
+ update_challenges_already_beaten(
+ config.challenges_already_beaten_file, info_details, test_name
+ )
if info_details.get("tests") is not None:
for nested_test_name, nested_test_info in info_details[
"tests"
].items():
update_challenges_already_beaten(
- nested_test_info, nested_test_name
+ config.challenges_already_beaten_file,
+ nested_test_info,
+ nested_test_name,
)
SingletonReportManager().INFO_MANAGER.add_test(test_name, info_details)
def update_challenges_already_beaten(
- info_details: Dict[str, Any], test_name: str
+ challenges_already_beaten_file: Path, info_details: Dict[str, Any], test_name: str
) -> None:
current_run_successful = info_details["metrics"]["success"]
try:
- with open(CHALLENGES_ALREADY_BEATEN, "r") as f:
+ with open(challenges_already_beaten_file, "r") as f:
challenge_data = json.load(f)
- except:
+ except FileNotFoundError:
challenge_data = {}
challenge_beaten_in_the_past = challenge_data.get(test_name)
@@ -172,13 +180,13 @@ def update_challenges_already_beaten(
if challenge_beaten_in_the_past is None and not current_run_successful:
challenge_data[test_name] = False
- with open(CHALLENGES_ALREADY_BEATEN, "w") as f:
+ with open(challenges_already_beaten_file, "w") as f:
json.dump(challenge_data, f, indent=4)
-def session_finish(suite_reports: dict) -> None:
- agent_benchmark_config = get_agent_benchmark_config()
-
+def session_finish(
+ agbenchmark_config: AgentBenchmarkConfig, suite_reports: dict
+) -> None:
SingletonReportManager().INTERNAL_INFO_MANAGER.save()
- SingletonReportManager().INFO_MANAGER.end_info_report(agent_benchmark_config)
+ SingletonReportManager().INFO_MANAGER.end_info_report(agbenchmark_config)
SingletonReportManager().REGRESSION_MANAGER.save()
diff --git a/benchmark/agbenchmark/schema.py b/benchmark/agbenchmark/schema.py
index 4849c2ad6..2aed562da 100644
--- a/benchmark/agbenchmark/schema.py
+++ b/benchmark/agbenchmark/schema.py
@@ -1,79 +1,14 @@
-# generated by fastapi-codegen:
-# filename: ../../postman/schemas/openapi.yaml
-# timestamp: 2023-08-25T10:36:11+00:00
-
from __future__ import annotations
-from datetime import datetime
-from enum import Enum
-from typing import List, Optional
+from typing import Optional
from pydantic import BaseModel, Field
-class ArtifactUpload(BaseModel):
- file: str = Field(..., description="File to upload.", format="binary")
- relative_path: str = Field(
- ...,
- description="Relative path of the artifact in the agent's workspace.",
- example="python/code",
- )
-
-
-class Pagination(BaseModel):
- total_items: int = Field(..., description="Total number of items.", example=42)
- total_pages: int = Field(..., description="Total number of pages.", example=97)
- current_page: int = Field(..., description="Current_page page number.", example=1)
- page_size: int = Field(..., description="Number of items per page.", example=25)
-
-
class TaskInput(BaseModel):
pass
-class Artifact(BaseModel):
- created_at: datetime = Field(
- ...,
- description="The creation datetime of the task.",
- example="2023-01-01T00:00:00Z",
- json_encoders={datetime: lambda v: v.isoformat()},
- )
- modified_at: datetime = Field(
- ...,
- description="The modification datetime of the task.",
- example="2023-01-01T00:00:00Z",
- json_encoders={datetime: lambda v: v.isoformat()},
- )
- artifact_id: str = Field(
- ...,
- description="ID of the artifact.",
- example="b225e278-8b4c-4f99-a696-8facf19f0e56",
- )
- agent_created: bool = Field(
- ...,
- description="Whether the artifact has been created by the agent.",
- example=False,
- )
- relative_path: str = Field(
- ...,
- description="Relative path of the artifact in the agents workspace.",
- example="/my_folder/my_other_folder/",
- )
- file_name: str = Field(
- ...,
- description="Filename of the artifact.",
- example="main.py",
- )
-
-
-class StepInput(BaseModel):
- pass
-
-
-class StepOutput(BaseModel):
- pass
-
-
class TaskRequestBody(BaseModel):
input: str = Field(
...,
@@ -86,108 +21,3 @@ class TaskRequestBody(BaseModel):
class TaskEvalRequestBody(TaskRequestBody):
eval_id: str
-
-
-class Task(TaskRequestBody):
- created_at: datetime = Field(
- ...,
- description="The creation datetime of the task.",
- example="2023-01-01T00:00:00Z",
- json_encoders={datetime: lambda v: v.isoformat()},
- )
- modified_at: datetime = Field(
- ...,
- description="The modification datetime of the task.",
- example="2023-01-01T00:00:00Z",
- json_encoders={datetime: lambda v: v.isoformat()},
- )
- task_id: str = Field(
- ...,
- description="The ID of the task.",
- example="50da533e-3904-4401-8a07-c49adf88b5eb",
- )
- artifacts: Optional[List[Artifact]] = Field(
- [],
- description="A list of artifacts that the task has produced.",
- example=[
- "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
- "ab7b4091-2560-4692-a4fe-d831ea3ca7d6",
- ],
- )
-
-
-class StepRequestBody(BaseModel):
- name: Optional[str] = Field(
- None, description="The name of the task step.", example="Write to file"
- )
- input: Optional[str] = Field(
- None,
- min_length=1,
- description="Input prompt for the step.",
- example="Washington",
- )
- additional_input: Optional[StepInput] = {}
-
-
-class Status(Enum):
- created = "created"
- running = "running"
- completed = "completed"
-
-
-class Step(StepRequestBody):
- created_at: datetime = Field(
- ...,
- description="The creation datetime of the task.",
- example="2023-01-01T00:00:00Z",
- json_encoders={datetime: lambda v: v.isoformat()},
- )
- modified_at: datetime = Field(
- ...,
- description="The modification datetime of the task.",
- example="2023-01-01T00:00:00Z",
- json_encoders={datetime: lambda v: v.isoformat()},
- )
- task_id: str = Field(
- ...,
- description="The ID of the task this step belongs to.",
- example="50da533e-3904-4401-8a07-c49adf88b5eb",
- )
- step_id: str = Field(
- ...,
- description="The ID of the task step.",
- example="6bb1801a-fd80-45e8-899a-4dd723cc602e",
- )
- name: Optional[str] = Field(
- None, description="The name of the task step.", example="Write to file"
- )
- status: Status = Field(
- ..., description="The status of the task step.", example="created"
- )
- output: Optional[str] = Field(
- None,
- description="Output of the task step.",
- example="I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')",
- )
- additional_output: Optional[StepOutput] = {}
- artifacts: Optional[List[Artifact]] = Field(
- [], description="A list of artifacts that the step has produced."
- )
- is_last: bool = Field(
- ..., description="Whether this is the last step in the task.", example=True
- )
-
-
-class TaskListResponse(BaseModel):
- tasks: Optional[List[Task]] = None
- pagination: Optional[Pagination] = None
-
-
-class TaskStepsListResponse(BaseModel):
- steps: Optional[List[Step]] = None
- pagination: Optional[Pagination] = None
-
-
-class TaskArtifactsListResponse(BaseModel):
- artifacts: Optional[List[Artifact]] = None
- pagination: Optional[Pagination] = None
diff --git a/benchmark/agbenchmark/utils/challenge.py b/benchmark/agbenchmark/utils/challenge.py
index a32ab6cf7..c7d1f36f6 100644
--- a/benchmark/agbenchmark/utils/challenge.py
+++ b/benchmark/agbenchmark/utils/challenge.py
@@ -1,17 +1,20 @@
import glob
+import json
+import logging
import math
import os
import subprocess
import sys
from abc import ABC
from pathlib import Path
-from typing import Any, Dict, List
+from typing import Any, ClassVar, List
import openai
import pytest
+from colorama import Fore, Style
-from agbenchmark.__main__ import OPTIONAL_CATEGORIES, TEMP_FOLDER_ABS_PATH
from agbenchmark.agent_api_interface import run_api_agent
+from agbenchmark.config import AgentBenchmarkConfig
from agbenchmark.utils.data_types import ChallengeData, Ground
from agbenchmark.utils.prompts import (
END_PROMPT,
@@ -19,43 +22,84 @@ from agbenchmark.utils.prompts import (
PROMPT_MAP,
SCORING_MAP,
)
-from agbenchmark.utils.utils import agent_eligibible_for_optional_categories
+
+logger = logging.getLogger(__name__)
+
+with open(
+ Path(__file__).parent.parent / "challenges" / "optional_categories.json"
+) as f:
+ OPTIONAL_CATEGORIES: list[str] = json.load(f)["optional_categories"]
class Challenge(ABC):
"""The parent class to all specific challenges classes.
Defines helper methods for running a challenge"""
- _data_cache: Dict[str, ChallengeData] = {}
- CHALLENGE_LOCATION: str = ""
- scores: dict[str, Any] = {} # this is for suites
+ data: ChallengeData
+ CHALLENGE_LOCATION: ClassVar[str]
+ ARTIFACTS_LOCATION: ClassVar[str]
+ scores: ClassVar[dict[str, Any]] = {} # this is for suites
+
+ @staticmethod
+ def from_challenge_spec(spec_file: Path) -> type["Challenge"]:
+ challenge_data = ChallengeData.parse_file(spec_file)
+
+ challenge_class_name = f"Test{challenge_data.name}"
+ logger.debug(f"Creating {challenge_class_name} from spec: {spec_file}")
+ return type(
+ challenge_class_name,
+ (Challenge,),
+ {
+ "data": challenge_data,
+ "CHALLENGE_LOCATION": str(spec_file),
+ "ARTIFACTS_LOCATION": str(spec_file.resolve().parent),
+ },
+ )
- @property
- def data(self) -> ChallengeData:
- if self.CHALLENGE_LOCATION not in self._data_cache:
- self._data_cache[self.CHALLENGE_LOCATION] = ChallengeData.deserialize(
- self.CHALLENGE_LOCATION
- )
- return self._data_cache[self.CHALLENGE_LOCATION]
+ # Define test method within the dynamically created class
+ @pytest.mark.asyncio
+ async def test_method(
+ self, config: AgentBenchmarkConfig, request: pytest.FixtureRequest
+ ) -> None:
+ # skip optional categories
+ self.skip_optional_categories(config)
+
+ if os.environ.get("HELICONE_API_KEY"):
+ from helicone.lock import HeliconeLockManager
+
+ HeliconeLockManager.write_custom_property("challenge", self.data.name)
+
+ timeout = self.data.cutoff or 60
+
+ if request.config.getoption("--nc"):
+ timeout = 100000
+ elif cutoff := request.config.getoption("--cutoff"):
+ timeout = int(cutoff)
+
+ await self.run_challenge(config, timeout)
- @property
- def task(self) -> str:
- return self.data.task
+ scores = self.get_scores(config.temp_folder)
+ request.node.answers = (
+ scores["answers"] if request.config.getoption("--keep-answers") else None
+ )
+ del scores["answers"] # remove answers from scores
+ request.node.scores = scores # store scores in request.node
+ is_score_100 = 1 in scores["values"]
- @property
- def dependencies(self) -> list:
- return self.data.dependencies
+ assert is_score_100
- async def setup_challenge(self, config: Dict[str, Any], cutoff: int) -> None:
+ async def run_challenge(self, config: AgentBenchmarkConfig, cutoff: int) -> None:
from agbenchmark.agent_interface import copy_artifacts_into_temp_folder
- if not self.task:
+ if not self.data.task:
return
print(
- f"\033[1;35m============Starting {self.data.name} challenge============\033[0m"
+ f"{Fore.MAGENTA + Style.BRIGHT}{'='*24} "
+ f"Starting {self.data.name} challenge"
+ f" {'='*24}{Style.RESET_ALL}"
)
- print(f"\033[1;30mTask: {self.task}\033[0m")
+ print(f"{Fore.BLACK}Task: {self.data.task}{Fore.RESET}")
await run_api_agent(self.data, config, self.ARTIFACTS_LOCATION, cutoff)
@@ -66,13 +110,11 @@ class Challenge(ABC):
str(Path(self.CHALLENGE_LOCATION).parent),
]
for path in artifact_paths:
- copy_artifacts_into_temp_folder(TEMP_FOLDER_ABS_PATH, "custom_python", path)
-
- def test_method(self, config: Dict[str, Any]) -> None:
- raise NotImplementedError
+ copy_artifacts_into_temp_folder(config.temp_folder, "custom_python", path)
+ @staticmethod
def get_artifacts_out(
- self, workspace: str | dict[str, str], ground: Ground
+ workspace: str | Path | dict[str, str], ground: Ground
) -> List[str]:
if isinstance(workspace, dict):
workspace = workspace["output"]
@@ -108,7 +150,7 @@ class Challenge(ABC):
if ground.eval.type == "pytest":
result = subprocess.run(
[sys.executable, "-m", "pytest"],
- cwd=TEMP_FOLDER_ABS_PATH,
+ cwd=os.path.abspath(workspace),
capture_output=True,
text=True,
)
@@ -119,15 +161,17 @@ class Challenge(ABC):
return files_contents
- def scoring(self, config: Dict[str, Any], content: str, ground: Ground) -> float:
- print("\033[1;34mScoring content:\033[0m", content)
+ @staticmethod
+ def scoring(content: str, ground: Ground) -> float:
+ print(f"{Fore.BLUE}Scoring content:{Style.RESET_ALL}", content)
if ground.should_contain:
for should_contain_word in ground.should_contain:
if not getattr(ground, "case_sensitive", True):
should_contain_word = should_contain_word.lower()
content = content.lower()
print_content = (
- f"\033[1;34mWord that should exist\033[0m - {should_contain_word}:"
+ f"{Fore.BLUE}Word that should exist{Style.RESET_ALL}"
+ f" - {should_contain_word}:"
)
if should_contain_word not in content:
print(print_content, "False")
@@ -140,7 +184,10 @@ class Challenge(ABC):
if not getattr(ground, "case_sensitive", True):
should_not_contain_word = should_not_contain_word.lower()
content = content.lower()
- print_content = f"\033[1;34mWord that should not exist\033[0m - {should_not_contain_word}:"
+ print_content = (
+ f"{Fore.BLUE}Word that should not exist{Style.RESET_ALL}"
+ f" - {should_not_contain_word}:"
+ )
if should_not_contain_word in content:
print(print_content, "False")
return 0.0
@@ -149,14 +196,17 @@ class Challenge(ABC):
return 1.0
- def llm_eval(self, config: Dict[str, Any], content: str, ground: Ground) -> float:
+ @classmethod
+ def llm_eval(cls, content: str, ground: Ground) -> float:
openai.api_key = os.getenv("OPENAI_API_KEY")
if os.getenv("IS_MOCK"):
return 1.0
# the validation for this is done in the Eval BaseModel
scoring = SCORING_MAP[ground.eval.scoring] # type: ignore
- prompt = PROMPT_MAP[ground.eval.template].format(task=self.data.task, scoring=scoring, answer=ground.answer, response=content) # type: ignore
+ prompt = PROMPT_MAP[ground.eval.template].format( # type: ignore
+ task=cls.data.task, scoring=scoring, answer=ground.answer, response=content
+ )
if ground.eval.examples:
prompt += FEW_SHOT_EXAMPLES.format(examples=ground.eval.examples)
@@ -172,34 +222,31 @@ class Challenge(ABC):
return float(answer["choices"][0]["message"]["content"]) # type: ignore
- def get_scores(self, config: Dict[str, Any]) -> dict[str, Any]:
+ @classmethod
+ def get_scores(cls, workspace: Path) -> dict[str, Any]:
scores = []
scores_dict: Any = {}
percentage = None
answers = {}
try:
- if self.data.task == "" and os.getenv("IS_MOCK"):
+ if cls.data.task == "" and os.getenv("IS_MOCK"):
scores = [1.0]
answers = {"mock": "This is a mock answer"}
- elif isinstance(self.data.ground, Ground):
- files_contents = self.get_artifacts_out(
- TEMP_FOLDER_ABS_PATH, self.data.ground
- )
+ elif isinstance(cls.data.ground, Ground):
+ files_contents = cls.get_artifacts_out(workspace, cls.data.ground)
answers = {"answer": files_contents}
for file_content in files_contents:
- score = self.scoring(config, file_content, self.data.ground)
- print("\033[1;32mYour score is:\033[0m", score)
+ score = cls.scoring(file_content, cls.data.ground)
+ print(f"{Fore.GREEN}Your score is:{Style.RESET_ALL}", score)
scores.append(score)
- if self.data.ground.eval.type == "llm":
- llm_eval = self.llm_eval(
- config, "\n".join(files_contents), self.data.ground
- )
- if self.data.ground.eval.scoring == "percentage":
+ if cls.data.ground.eval.type == "llm":
+ llm_eval = cls.llm_eval("\n".join(files_contents), cls.data.ground)
+ if cls.data.ground.eval.scoring == "percentage":
scores.append(math.ceil(llm_eval / 100))
- elif self.data.ground.eval.scoring == "scale":
+ elif cls.data.ground.eval.scoring == "scale":
scores.append(math.ceil(llm_eval / 10))
- print("\033[1;32mYour score is:\033[0m", llm_eval)
+ print(f"{Fore.GREEN}Your score is:{Style.RESET_ALL}", llm_eval)
scores.append(llm_eval)
except Exception as e:
@@ -212,7 +259,7 @@ class Challenge(ABC):
"answers": answers,
}
- self.scores[self.__class__.__name__] = scores_data
+ cls.scores[cls.__name__] = scores_data
return scores_data
@@ -223,14 +270,15 @@ class Challenge(ABC):
return None
- def skip_optional_categories(self, config: Dict[str, Any]) -> None:
- challenge_category = self.data.category
- categories = [
- category
- for category in OPTIONAL_CATEGORIES
- if category in challenge_category
- ]
- if not agent_eligibible_for_optional_categories(
- categories, config.get("category", [])
+ @classmethod
+ def skip_optional_categories(cls, config: AgentBenchmarkConfig) -> None:
+ challenge_categories = set(c.value for c in cls.data.category)
+ challenge_optional_categories = challenge_categories & set(OPTIONAL_CATEGORIES)
+ if challenge_optional_categories and not (
+ config.categories
+ and set(challenge_optional_categories).issubset(set(config.categories))
):
- pytest.skip("Agent is not eligible for this category")
+ pytest.skip(
+ f"Category {', '.join(challenge_optional_categories)} is optional, "
+ "and not explicitly selected in the benchmark config."
+ )
diff --git a/benchmark/agbenchmark/utils/data_types.py b/benchmark/agbenchmark/utils/data_types.py
index 955b1d6a8..b38e5ef23 100644
--- a/benchmark/agbenchmark/utils/data_types.py
+++ b/benchmark/agbenchmark/utils/data_types.py
@@ -1,12 +1,8 @@
-import datetime
-import json
-import sys
-from datetime import datetime
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Optional
-from pydantic import BaseModel, constr, validator
+from pydantic import BaseModel, Field, constr, validator
class DifficultyLevel(Enum):
@@ -33,80 +29,6 @@ DIFFICULTY_MAP = {
STRING_DIFFICULTY_MAP = {e.value: DIFFICULTY_MAP[e] for e in DifficultyLevel}
-def calculate_info_test_path(base_path: Path, benchmark_start_time: datetime) -> Path:
- """
- Calculates the path to the directory where the test report will be saved.
- """
- # Ensure the reports path exists
- base_path.mkdir(parents=True, exist_ok=True)
-
- # Get current UTC date-time stamp
- date_stamp = benchmark_start_time.strftime("%Y%m%dT%H%M%S")
-
- # Default run name
- run_name = "full_run"
-
- # Map command-line arguments to their respective labels
- arg_labels = {
- "--test": None,
- "--category": None,
- "--maintain": "maintain",
- "--improve": "improve",
- "--explore": "explore",
- }
-
- # Identify the relevant command-line argument
- for arg, label in arg_labels.items():
- if arg in sys.argv:
- test_arg = sys.argv[sys.argv.index(arg) + 1] if label is None else None
- run_name = arg.strip("--")
- if test_arg:
- run_name = f"{run_name}_{test_arg}"
- break
-
- # Create the full new directory path with ISO standard UTC date-time stamp
- report_path = base_path / f"{date_stamp}_{run_name}"
-
- # Ensure the new directory is created
- report_path.mkdir(exist_ok=True)
- return report_path
-
-
-class AgentBenchmarkConfig(BaseModel):
- """
- This class represents the configuration for the Agent agbenchmark.
- It includes the following attributes:
- - agent_benchmark_config_path: The path to the agent benchmark config that this object was created from.
- - reports_folder: The path to the folder where the benchmark reports will be stored.
- - host: The host where the benchmark is run.
- """
-
- agent_benchmark_config_path: Path | None = None
- reports_folder: Path | None = None
- host: str | None
-
- def get_reports_location(self) -> Path:
- # if not self.reports_folder:
- # self.reports_folder = (
- # Path(self.agent_benchmark_config_path).parent / "reports"
- # ).resolve()
- return Path.cwd() / "agbenchmark_config" / "reports"
-
- def get_reports_path(self, benchmark_start_time: datetime) -> Path:
- return calculate_info_test_path(
- self.get_reports_location(), benchmark_start_time
- )
-
- def get_regression_reports_path(self) -> Path:
- return self.get_reports_location() / "regression_tests.json"
-
- def get_success_rate_path(self) -> Path:
- return self.get_reports_location() / "success_rate.json"
-
- def get_agent_home_directory(self) -> Path:
- return Path(self.agent_benchmark_config_path).resolve().parent
-
-
class Info(BaseModel):
difficulty: DifficultyLevel
description: constr(regex=r"^Tests if the agent can.*")
@@ -180,6 +102,7 @@ class Category(str, Enum):
class ChallengeData(BaseModel):
+ eval_id: str = ""
name: str
category: List[Category]
task: str
@@ -189,73 +112,4 @@ class ChallengeData(BaseModel):
info: Info | Dict[str, Info]
metadata: Optional[Dict[str, Any]] = None
- def serialize(self, path: str) -> None:
- with open(path, "w") as file:
- file.write(self.json())
-
- def get_data(self) -> dict:
- return self.dict()
-
- @staticmethod
- def get_json_from_path(json_path: Path | str) -> dict:
- path = Path(json_path).resolve()
- with open(path, "r") as file:
- data = json.load(file)
- return data
-
- @staticmethod
- def deserialize(path: str) -> "ChallengeData":
- # this script is in root/agbenchmark/utils/define_task_types.py
- script_dir = Path(__file__).resolve().parent.parent.parent
- json_path = script_dir / Path(path)
-
- with open(json_path, "r") as file:
- data = json.load(file)
- try:
- return ChallengeData(**data)
- except:
- test = "ok"
-
- def challenge_from_datum(self, file_datum: list[dict[str, Any]]) -> "ChallengeData":
- same_task_data = {
- "name": self.prefix,
- "dependencies": self.dependencies,
- "category": self.shared_category,
- "task": self.task,
- "cutoff": self.cutoff,
- }
-
- if not self.info:
- same_task_data["info"] = {
- datum["name"]: datum["info"] for datum in file_datum
- }
- else:
- same_task_data["info"] = self.info
-
- if not self.ground:
- same_task_data["ground"] = {
- datum["name"]: datum["ground"] for datum in file_datum
- }
- else:
- same_task_data["ground"] = self.ground
-
- return ChallengeData(**same_task_data)
-
- def challenge_from_test_data(self, data: dict[str, Any]) -> "ChallengeData":
- same_task_data = {
- "name": data["name"],
- "dependencies": data["dependencies"],
- "category": data["category"],
- "info": data["info"],
- "ground": data["ground"],
- }
-
- if self.same_task:
- same_task_data["category"].extend(self.shared_category)
- same_task_data["task"] = self.task
- same_task_data["cutoff"] = self.cutoff
- else:
- same_task_data["task"] = data["task"]
- same_task_data["cutoff"] = data["cutoff"]
-
- return ChallengeData(**same_task_data)
+ spec_file: Path | None = Field(None, exclude=True)
diff --git a/benchmark/agbenchmark/utils/dependencies/graphs.py b/benchmark/agbenchmark/utils/dependencies/graphs.py
index 8155e2bfd..47d3d5c09 100644
--- a/benchmark/agbenchmark/utils/dependencies/graphs.py
+++ b/benchmark/agbenchmark/utils/dependencies/graphs.py
@@ -1,3 +1,5 @@
+import json
+import logging
import math
from pathlib import Path
from typing import Any, Dict, List, Tuple
@@ -11,6 +13,8 @@ from pyvis.network import Network
from agbenchmark.generate_test import DATA_CATEGORY
from agbenchmark.utils.utils import write_pretty_json
+logger = logging.getLogger(__name__)
+
def bezier_curve(
src: np.ndarray, ctrl: List[float], dst: np.ndarray
@@ -221,8 +225,8 @@ def graph_interactive_network(
f"{source_id_str}_to_{target_id_str}" # Construct a unique edge id
)
if not (source_id_str in nt.get_nodes() and target_id_str in nt.get_nodes()):
- print(
- f"Skipping edge {source_id_str} -> {target_id_str} due to missing nodes."
+ logger.warning(
+ f"Skipping edge {source_id_str} -> {target_id_str} due to missing nodes"
)
continue
nt.add_edge(source_id_str, target_id_str, id=edge_id_str)
@@ -271,9 +275,12 @@ def graph_interactive_network(
"layout": {"hierarchical": hierarchical_options},
}
- # Serialize the graph to JSON
+ # Serialize the graph to JSON and save in appropriate locations
graph_data = {"nodes": nt.nodes, "edges": nt.edges}
+ logger.debug(f"Generated graph data:\n{json.dumps(graph_data, indent=4)}")
+ # FIXME: use more reliable method to find the right location for these files.
+ # This will fail in all cases except if run from the root of our repo.
home_path = Path.cwd()
write_pretty_json(graph_data, home_path / "frontend" / "public" / "graph.json")
@@ -284,7 +291,6 @@ def graph_interactive_network(
# this literally only works in the AutoGPT repo, but this part of the code is not reached if BUILD_SKILL_TREE is false
write_pretty_json(graph_data, flutter_app_path / "tree_structure.json")
validate_skill_tree(graph_data, "")
- import json
# Extract node IDs with category "coding"
@@ -317,9 +323,6 @@ def graph_interactive_network(
scrape_synthesize_tree,
flutter_app_path / "scrape_synthesize_tree_structure.json",
)
- # If you want to convert back to JSON
- filtered_json = json.dumps(graph_data, indent=4)
- print(filtered_json)
if html_graph_path:
file_path = str(Path(html_graph_path).resolve())
diff --git a/benchmark/agbenchmark/utils/get_data_from_helicone.py b/benchmark/agbenchmark/utils/get_data_from_helicone.py
index 1e2f5fcbd..dabb2c8b0 100644
--- a/benchmark/agbenchmark/utils/get_data_from_helicone.py
+++ b/benchmark/agbenchmark/utils/get_data_from_helicone.py
@@ -1,4 +1,5 @@
import json
+import logging
import os
from typing import Optional
@@ -7,6 +8,8 @@ import requests
from agbenchmark.__main__ import BENCHMARK_START_TIME
from agbenchmark.agent_interface import HELICONE_GRAPHQL_LOGS
+logger = logging.getLogger(__name__)
+
def get_data_from_helicone(challenge: str) -> Optional[float]:
# Define the endpoint of your GraphQL server
@@ -38,8 +41,8 @@ query ExampleQuery($properties: [PropertyFilter!]){
]
}
if HELICONE_GRAPHQL_LOGS:
- print(query)
- print(json.dumps(variables, indent=4))
+ logger.debug(f"Executing Helicone query:\n{query.strip()}")
+ logger.debug(f"Query variables:\n{json.dumps(variables, indent=4)}")
operation_name = "ExampleQuery"
@@ -59,24 +62,22 @@ query ExampleQuery($properties: [PropertyFilter!]){
data = response.json()
except requests.HTTPError as http_err:
- print(f"HTTP error occurred: {http_err}")
- return None # Re-raise the exception to stop execution
+ logger.error(f"Helicone returned an HTTP error: {http_err}")
+ return None
except json.JSONDecodeError:
- print(f"Invalid JSON response: {response.text if response else 'No response'}")
+ raw_response = response.text # type: ignore
+ logger.error(
+ f"Helicone returned an invalid JSON response: '''{raw_response}'''"
+ )
return None
except Exception as err:
- print(f"Other error occurred: {err}")
+ logger.error(f"Error while trying to get data from Helicone: {err}")
return None
- try:
- if data is None or data.get("data") is None:
- print("Invalid response received from server: no data")
- return None
- return (
- data.get("data", {})
- .get("aggregatedHeliconeRequest", {})
- .get("costUSD", None)
- )
- except Exception as err:
- print(f"Error occurred while parsing response: {err}")
+ if data is None or data.get("data") is None:
+ logger.error("Invalid response received from Helicone: no data")
+ logger.error(f"Offending response: {response}")
return None
+ return (
+ data.get("data", {}).get("aggregatedHeliconeRequest", {}).get("costUSD", None)
+ )
diff --git a/benchmark/agbenchmark/utils/logging.py b/benchmark/agbenchmark/utils/logging.py
new file mode 100644
index 000000000..9a8f9f41d
--- /dev/null
+++ b/benchmark/agbenchmark/utils/logging.py
@@ -0,0 +1,74 @@
+from __future__ import annotations
+
+import logging
+
+from colorama import Fore, Style
+
+SIMPLE_LOG_FORMAT = "[%(asctime)s] %(levelname)s %(message)s"
+DEBUG_LOG_FORMAT = "[%(asctime)s] %(levelname)s %(filename)s:%(lineno)03d %(message)s"
+
+
+def configure_logging(
+ level: int = logging.INFO,
+) -> None:
+ """Configure the native logging module."""
+
+ # Auto-adjust default log format based on log level
+ log_format = DEBUG_LOG_FORMAT if level == logging.DEBUG else SIMPLE_LOG_FORMAT
+
+ console_handler = logging.StreamHandler()
+ console_handler.setFormatter(FancyConsoleFormatter(log_format))
+
+ # Configure the root logger
+ logging.basicConfig(
+ level=level,
+ format=log_format,
+ handlers=[console_handler],
+ )
+
+
+class FancyConsoleFormatter(logging.Formatter):
+ """
+ A custom logging formatter designed for console output.
+
+ This formatter enhances the standard logging output with color coding. The color
+ coding is based on the level of the log message, making it easier to distinguish
+ between different types of messages in the console output.
+
+ The color for each level is defined in the LEVEL_COLOR_MAP class attribute.
+ """
+
+ # level -> (level & text color, title color)
+ LEVEL_COLOR_MAP = {
+ logging.DEBUG: Fore.LIGHTBLACK_EX,
+ logging.INFO: Fore.BLUE,
+ logging.WARNING: Fore.YELLOW,
+ logging.ERROR: Fore.RED,
+ logging.CRITICAL: Fore.RED + Style.BRIGHT,
+ }
+
+ def format(self, record: logging.LogRecord) -> str:
+ # Make sure `msg` is a string
+ if not hasattr(record, "msg"):
+ record.msg = ""
+ elif not type(record.msg) is str:
+ record.msg = str(record.msg)
+
+ # Justify the level name to 5 characters minimum
+ record.levelname = record.levelname.ljust(5)
+
+ # Determine default color based on error level
+ level_color = ""
+ if record.levelno in self.LEVEL_COLOR_MAP:
+ level_color = self.LEVEL_COLOR_MAP[record.levelno]
+ record.levelname = f"{level_color}{record.levelname}{Style.RESET_ALL}"
+
+ # Determine color for message
+ color = getattr(record, "color", level_color)
+ color_is_specified = hasattr(record, "color")
+
+ # Don't color INFO messages unless the color is explicitly specified.
+ if color and (record.levelno != logging.INFO or color_is_specified):
+ record.msg = f"{color}{record.msg}{Style.RESET_ALL}"
+
+ return super().format(record)
diff --git a/benchmark/agbenchmark/utils/utils.py b/benchmark/agbenchmark/utils/utils.py
index 2fc51d212..a7756766a 100644
--- a/benchmark/agbenchmark/utils/utils.py
+++ b/benchmark/agbenchmark/utils/utils.py
@@ -1,18 +1,22 @@
# radio charts, logs, helper functions for tests, anything else relevant.
import json
+import logging
import os
import re
from pathlib import Path
-from typing import Any, List, Optional
+from typing import Any, Optional
from dotenv import load_dotenv
-load_dotenv()
from agbenchmark.utils.data_types import DIFFICULTY_MAP, DifficultyLevel
+load_dotenv()
+
AGENT_NAME = os.getenv("AGENT_NAME")
REPORT_LOCATION = os.getenv("REPORT_LOCATION", None)
+logger = logging.getLogger(__name__)
+
def replace_backslash(value: Any) -> Any:
if isinstance(value, str):
@@ -72,8 +76,9 @@ def get_highest_success_difficulty(
highest_difficulty = DifficultyLevel[highest_difficulty_str]
highest_difficulty_level = DIFFICULTY_MAP[highest_difficulty]
except KeyError:
- print(
- f"Unexpected difficulty level '{highest_difficulty_str}' in test '{test_name}'"
+ logger.warning(
+ f"Unexpected difficulty level '{highest_difficulty_str}' "
+ f"in test '{test_name}'"
)
continue
else:
@@ -88,12 +93,21 @@ def get_highest_success_difficulty(
highest_difficulty = difficulty_enum
highest_difficulty_level = difficulty_level
except KeyError:
- print(
- f"Unexpected difficulty level '{difficulty_str}' in test '{test_name}'"
+ logger.warning(
+ f"Unexpected difficulty level '{difficulty_str}' "
+ f"in test '{test_name}'"
)
continue
- except Exception:
- print(f"Make sure you selected the right test, no reports were generated.")
+ except Exception as e:
+ logger.warning(
+ "An unexpected error [1] occurred while analyzing report [2]."
+ "Please notify a maintainer.\n"
+ f"Report data [1]: {data}\n"
+ f"Error [2]: {e}"
+ )
+ logger.warning(
+ "Make sure you selected the right test, no reports were generated."
+ )
break
if highest_difficulty is not None:
@@ -116,22 +130,13 @@ def get_highest_success_difficulty(
# remote_url = remote_url[:-4]
# git_commit_sha = f"{remote_url}/tree/{repo.head.commit.hexsha}"
-# # print(f"GIT_COMMIT_SHA: {git_commit_sha}")
+# # logger.debug(f"GIT_COMMIT_SHA: {git_commit_sha}")
# return git_commit_sha
# except Exception:
-# # print(f"{directory} is not a git repository!")
+# # logger.error(f"{directory} is not a git repository!")
# return None
-def agent_eligibible_for_optional_categories(
- optional_challenge_categories: List, agent_categories: List
-) -> bool:
- for element in optional_challenge_categories:
- if element not in agent_categories:
- return False
- return True
-
-
def write_pretty_json(data, json_file):
sorted_data = deep_sort(data)
json_graph = json.dumps(sorted_data, indent=4)
diff --git a/benchmark/poetry.lock b/benchmark/poetry.lock
index 9a27d456f..fd7f78608 100644
--- a/benchmark/poetry.lock
+++ b/benchmark/poetry.lock
@@ -1,112 +1,117 @@
# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
[[package]]
+name = "agent-protocol-client"
+version = "1.1.0"
+description = "Agent Communication Protocol Client"
+optional = false
+python-versions = ">=3.7,<4.0"
+files = [
+ {file = "agent_protocol_client-1.1.0-py3-none-any.whl", hash = "sha256:0e8c6c97244189666ed18e320410abddce8c9dfb75437da1e590bbef3b6268be"},
+ {file = "agent_protocol_client-1.1.0.tar.gz", hash = "sha256:aa7e1042de1249477fdc29c2df08a44f2233dade9c02c1279e37c98e9d3a0d72"},
+]
+
+[package.dependencies]
+aiohttp = ">=3.8.4,<4.0.0"
+pydantic = ">=1.10.5,<2.0.0"
+python-dateutil = ">=2.8.2,<3.0.0"
+urllib3 = ">=1.25.3,<2.0.0"
+
+[[package]]
name = "aiohttp"
-version = "3.8.5"
+version = "3.9.1"
description = "Async http client/server framework (asyncio)"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.8"
files = [
- {file = "aiohttp-3.8.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a94159871304770da4dd371f4291b20cac04e8c94f11bdea1c3478e557fbe0d8"},
- {file = "aiohttp-3.8.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:13bf85afc99ce6f9ee3567b04501f18f9f8dbbb2ea11ed1a2e079670403a7c84"},
- {file = "aiohttp-3.8.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2ce2ac5708501afc4847221a521f7e4b245abf5178cf5ddae9d5b3856ddb2f3a"},
- {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96943e5dcc37a6529d18766597c491798b7eb7a61d48878611298afc1fca946c"},
- {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ad5c3c4590bb3cc28b4382f031f3783f25ec223557124c68754a2231d989e2b"},
- {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c413c633d0512df4dc7fd2373ec06cc6a815b7b6d6c2f208ada7e9e93a5061d"},
- {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df72ac063b97837a80d80dec8d54c241af059cc9bb42c4de68bd5b61ceb37caa"},
- {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c48c5c0271149cfe467c0ff8eb941279fd6e3f65c9a388c984e0e6cf57538e14"},
- {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:368a42363c4d70ab52c2c6420a57f190ed3dfaca6a1b19afda8165ee16416a82"},
- {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7607ec3ce4993464368505888af5beb446845a014bc676d349efec0e05085905"},
- {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:0d21c684808288a98914e5aaf2a7c6a3179d4df11d249799c32d1808e79503b5"},
- {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:312fcfbacc7880a8da0ae8b6abc6cc7d752e9caa0051a53d217a650b25e9a691"},
- {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ad093e823df03bb3fd37e7dec9d4670c34f9e24aeace76808fc20a507cace825"},
- {file = "aiohttp-3.8.5-cp310-cp310-win32.whl", hash = "sha256:33279701c04351a2914e1100b62b2a7fdb9a25995c4a104259f9a5ead7ed4802"},
- {file = "aiohttp-3.8.5-cp310-cp310-win_amd64.whl", hash = "sha256:6e4a280e4b975a2e7745573e3fc9c9ba0d1194a3738ce1cbaa80626cc9b4f4df"},
- {file = "aiohttp-3.8.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ae871a964e1987a943d83d6709d20ec6103ca1eaf52f7e0d36ee1b5bebb8b9b9"},
- {file = "aiohttp-3.8.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:461908b2578955045efde733719d62f2b649c404189a09a632d245b445c9c975"},
- {file = "aiohttp-3.8.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:72a860c215e26192379f57cae5ab12b168b75db8271f111019509a1196dfc780"},
- {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc14be025665dba6202b6a71cfcdb53210cc498e50068bc088076624471f8bb9"},
- {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8af740fc2711ad85f1a5c034a435782fbd5b5f8314c9a3ef071424a8158d7f6b"},
- {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:841cd8233cbd2111a0ef0a522ce016357c5e3aff8a8ce92bcfa14cef890d698f"},
- {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ed1c46fb119f1b59304b5ec89f834f07124cd23ae5b74288e364477641060ff"},
- {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84f8ae3e09a34f35c18fa57f015cc394bd1389bce02503fb30c394d04ee6b938"},
- {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62360cb771707cb70a6fd114b9871d20d7dd2163a0feafe43fd115cfe4fe845e"},
- {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:23fb25a9f0a1ca1f24c0a371523546366bb642397c94ab45ad3aedf2941cec6a"},
- {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0ba0d15164eae3d878260d4c4df859bbdc6466e9e6689c344a13334f988bb53"},
- {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5d20003b635fc6ae3f96d7260281dfaf1894fc3aa24d1888a9b2628e97c241e5"},
- {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0175d745d9e85c40dcc51c8f88c74bfbaef9e7afeeeb9d03c37977270303064c"},
- {file = "aiohttp-3.8.5-cp311-cp311-win32.whl", hash = "sha256:2e1b1e51b0774408f091d268648e3d57f7260c1682e7d3a63cb00d22d71bb945"},
- {file = "aiohttp-3.8.5-cp311-cp311-win_amd64.whl", hash = "sha256:043d2299f6dfdc92f0ac5e995dfc56668e1587cea7f9aa9d8a78a1b6554e5755"},
- {file = "aiohttp-3.8.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cae533195e8122584ec87531d6df000ad07737eaa3c81209e85c928854d2195c"},
- {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f21e83f355643c345177a5d1d8079f9f28b5133bcd154193b799d380331d5d3"},
- {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a7a75ef35f2df54ad55dbf4b73fe1da96f370e51b10c91f08b19603c64004acc"},
- {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e2e9839e14dd5308ee773c97115f1e0a1cb1d75cbeeee9f33824fa5144c7634"},
- {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44e65da1de4403d0576473e2344828ef9c4c6244d65cf4b75549bb46d40b8dd"},
- {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78d847e4cde6ecc19125ccbc9bfac4a7ab37c234dd88fbb3c5c524e8e14da543"},
- {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:c7a815258e5895d8900aec4454f38dca9aed71085f227537208057853f9d13f2"},
- {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:8b929b9bd7cd7c3939f8bcfffa92fae7480bd1aa425279d51a89327d600c704d"},
- {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:5db3a5b833764280ed7618393832e0853e40f3d3e9aa128ac0ba0f8278d08649"},
- {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:a0215ce6041d501f3155dc219712bc41252d0ab76474615b9700d63d4d9292af"},
- {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:fd1ed388ea7fbed22c4968dd64bab0198de60750a25fe8c0c9d4bef5abe13824"},
- {file = "aiohttp-3.8.5-cp36-cp36m-win32.whl", hash = "sha256:6e6783bcc45f397fdebc118d772103d751b54cddf5b60fbcc958382d7dd64f3e"},
- {file = "aiohttp-3.8.5-cp36-cp36m-win_amd64.whl", hash = "sha256:b5411d82cddd212644cf9360879eb5080f0d5f7d809d03262c50dad02f01421a"},
- {file = "aiohttp-3.8.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:01d4c0c874aa4ddfb8098e85d10b5e875a70adc63db91f1ae65a4b04d3344cda"},
- {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5980a746d547a6ba173fd5ee85ce9077e72d118758db05d229044b469d9029a"},
- {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a482e6da906d5e6e653be079b29bc173a48e381600161c9932d89dfae5942ef"},
- {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80bd372b8d0715c66c974cf57fe363621a02f359f1ec81cba97366948c7fc873"},
- {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1161b345c0a444ebcf46bf0a740ba5dcf50612fd3d0528883fdc0eff578006a"},
- {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd56db019015b6acfaaf92e1ac40eb8434847d9bf88b4be4efe5bfd260aee692"},
- {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:153c2549f6c004d2754cc60603d4668899c9895b8a89397444a9c4efa282aaf4"},
- {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4a01951fabc4ce26ab791da5f3f24dca6d9a6f24121746eb19756416ff2d881b"},
- {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bfb9162dcf01f615462b995a516ba03e769de0789de1cadc0f916265c257e5d8"},
- {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:7dde0009408969a43b04c16cbbe252c4f5ef4574ac226bc8815cd7342d2028b6"},
- {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4149d34c32f9638f38f544b3977a4c24052042affa895352d3636fa8bffd030a"},
- {file = "aiohttp-3.8.5-cp37-cp37m-win32.whl", hash = "sha256:68c5a82c8779bdfc6367c967a4a1b2aa52cd3595388bf5961a62158ee8a59e22"},
- {file = "aiohttp-3.8.5-cp37-cp37m-win_amd64.whl", hash = "sha256:2cf57fb50be5f52bda004b8893e63b48530ed9f0d6c96c84620dc92fe3cd9b9d"},
- {file = "aiohttp-3.8.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:eca4bf3734c541dc4f374ad6010a68ff6c6748f00451707f39857f429ca36ced"},
- {file = "aiohttp-3.8.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1274477e4c71ce8cfe6c1ec2f806d57c015ebf84d83373676036e256bc55d690"},
- {file = "aiohttp-3.8.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:28c543e54710d6158fc6f439296c7865b29e0b616629767e685a7185fab4a6b9"},
- {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:910bec0c49637d213f5d9877105d26e0c4a4de2f8b1b29405ff37e9fc0ad52b8"},
- {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5443910d662db951b2e58eb70b0fbe6b6e2ae613477129a5805d0b66c54b6cb7"},
- {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e460be6978fc24e3df83193dc0cc4de46c9909ed92dd47d349a452ef49325b7"},
- {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb1558def481d84f03b45888473fc5a1f35747b5f334ef4e7a571bc0dfcb11f8"},
- {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34dd0c107799dcbbf7d48b53be761a013c0adf5571bf50c4ecad5643fe9cfcd0"},
- {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aa1990247f02a54185dc0dff92a6904521172a22664c863a03ff64c42f9b5410"},
- {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0e584a10f204a617d71d359fe383406305a4b595b333721fa50b867b4a0a1548"},
- {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:a3cf433f127efa43fee6b90ea4c6edf6c4a17109d1d037d1a52abec84d8f2e42"},
- {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:c11f5b099adafb18e65c2c997d57108b5bbeaa9eeee64a84302c0978b1ec948b"},
- {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:84de26ddf621d7ac4c975dbea4c945860e08cccde492269db4e1538a6a6f3c35"},
- {file = "aiohttp-3.8.5-cp38-cp38-win32.whl", hash = "sha256:ab88bafedc57dd0aab55fa728ea10c1911f7e4d8b43e1d838a1739f33712921c"},
- {file = "aiohttp-3.8.5-cp38-cp38-win_amd64.whl", hash = "sha256:5798a9aad1879f626589f3df0f8b79b3608a92e9beab10e5fda02c8a2c60db2e"},
- {file = "aiohttp-3.8.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a6ce61195c6a19c785df04e71a4537e29eaa2c50fe745b732aa937c0c77169f3"},
- {file = "aiohttp-3.8.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:773dd01706d4db536335fcfae6ea2440a70ceb03dd3e7378f3e815b03c97ab51"},
- {file = "aiohttp-3.8.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f83a552443a526ea38d064588613aca983d0ee0038801bc93c0c916428310c28"},
- {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f7372f7341fcc16f57b2caded43e81ddd18df53320b6f9f042acad41f8e049a"},
- {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea353162f249c8097ea63c2169dd1aa55de1e8fecbe63412a9bc50816e87b761"},
- {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5d47ae48db0b2dcf70bc8a3bc72b3de86e2a590fc299fdbbb15af320d2659de"},
- {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d827176898a2b0b09694fbd1088c7a31836d1a505c243811c87ae53a3f6273c1"},
- {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3562b06567c06439d8b447037bb655ef69786c590b1de86c7ab81efe1c9c15d8"},
- {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4e874cbf8caf8959d2adf572a78bba17cb0e9d7e51bb83d86a3697b686a0ab4d"},
- {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6809a00deaf3810e38c628e9a33271892f815b853605a936e2e9e5129762356c"},
- {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:33776e945d89b29251b33a7e7d006ce86447b2cfd66db5e5ded4e5cd0340585c"},
- {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:eaeed7abfb5d64c539e2db173f63631455f1196c37d9d8d873fc316470dfbacd"},
- {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e91d635961bec2d8f19dfeb41a539eb94bd073f075ca6dae6c8dc0ee89ad6f91"},
- {file = "aiohttp-3.8.5-cp39-cp39-win32.whl", hash = "sha256:00ad4b6f185ec67f3e6562e8a1d2b69660be43070bd0ef6fcec5211154c7df67"},
- {file = "aiohttp-3.8.5-cp39-cp39-win_amd64.whl", hash = "sha256:c0a9034379a37ae42dea7ac1e048352d96286626251862e448933c0f59cbd79c"},
- {file = "aiohttp-3.8.5.tar.gz", hash = "sha256:b9552ec52cc147dbf1944ac7ac98af7602e51ea2dcd076ed194ca3c0d1c7d0bc"},
+ {file = "aiohttp-3.9.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e1f80197f8b0b846a8d5cf7b7ec6084493950d0882cc5537fb7b96a69e3c8590"},
+ {file = "aiohttp-3.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c72444d17777865734aa1a4d167794c34b63e5883abb90356a0364a28904e6c0"},
+ {file = "aiohttp-3.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b05d5cbe9dafcdc733262c3a99ccf63d2f7ce02543620d2bd8db4d4f7a22f83"},
+ {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c4fa235d534b3547184831c624c0b7c1e262cd1de847d95085ec94c16fddcd5"},
+ {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:289ba9ae8e88d0ba16062ecf02dd730b34186ea3b1e7489046fc338bdc3361c4"},
+ {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bff7e2811814fa2271be95ab6e84c9436d027a0e59665de60edf44e529a42c1f"},
+ {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81b77f868814346662c96ab36b875d7814ebf82340d3284a31681085c051320f"},
+ {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b9c7426923bb7bd66d409da46c41e3fb40f5caf679da624439b9eba92043fa6"},
+ {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8d44e7bf06b0c0a70a20f9100af9fcfd7f6d9d3913e37754c12d424179b4e48f"},
+ {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22698f01ff5653fe66d16ffb7658f582a0ac084d7da1323e39fd9eab326a1f26"},
+ {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ca7ca5abfbfe8d39e653870fbe8d7710be7a857f8a8386fc9de1aae2e02ce7e4"},
+ {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:8d7f98fde213f74561be1d6d3fa353656197f75d4edfbb3d94c9eb9b0fc47f5d"},
+ {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5216b6082c624b55cfe79af5d538e499cd5f5b976820eac31951fb4325974501"},
+ {file = "aiohttp-3.9.1-cp310-cp310-win32.whl", hash = "sha256:0e7ba7ff228c0d9a2cd66194e90f2bca6e0abca810b786901a569c0de082f489"},
+ {file = "aiohttp-3.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:c7e939f1ae428a86e4abbb9a7c4732bf4706048818dfd979e5e2839ce0159f23"},
+ {file = "aiohttp-3.9.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:df9cf74b9bc03d586fc53ba470828d7b77ce51b0582d1d0b5b2fb673c0baa32d"},
+ {file = "aiohttp-3.9.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ecca113f19d5e74048c001934045a2b9368d77b0b17691d905af18bd1c21275e"},
+ {file = "aiohttp-3.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8cef8710fb849d97c533f259103f09bac167a008d7131d7b2b0e3a33269185c0"},
+ {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bea94403a21eb94c93386d559bce297381609153e418a3ffc7d6bf772f59cc35"},
+ {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91c742ca59045dce7ba76cab6e223e41d2c70d79e82c284a96411f8645e2afff"},
+ {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6c93b7c2e52061f0925c3382d5cb8980e40f91c989563d3d32ca280069fd6a87"},
+ {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee2527134f95e106cc1653e9ac78846f3a2ec1004cf20ef4e02038035a74544d"},
+ {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11ff168d752cb41e8492817e10fb4f85828f6a0142b9726a30c27c35a1835f01"},
+ {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b8c3a67eb87394386847d188996920f33b01b32155f0a94f36ca0e0c635bf3e3"},
+ {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c7b5d5d64e2a14e35a9240b33b89389e0035e6de8dbb7ffa50d10d8b65c57449"},
+ {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:69985d50a2b6f709412d944ffb2e97d0be154ea90600b7a921f95a87d6f108a2"},
+ {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:c9110c06eaaac7e1f5562caf481f18ccf8f6fdf4c3323feab28a93d34cc646bd"},
+ {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737e69d193dac7296365a6dcb73bbbf53bb760ab25a3727716bbd42022e8d7a"},
+ {file = "aiohttp-3.9.1-cp311-cp311-win32.whl", hash = "sha256:4ee8caa925aebc1e64e98432d78ea8de67b2272252b0a931d2ac3bd876ad5544"},
+ {file = "aiohttp-3.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:a34086c5cc285be878622e0a6ab897a986a6e8bf5b67ecb377015f06ed316587"},
+ {file = "aiohttp-3.9.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f800164276eec54e0af5c99feb9494c295118fc10a11b997bbb1348ba1a52065"},
+ {file = "aiohttp-3.9.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:500f1c59906cd142d452074f3811614be04819a38ae2b3239a48b82649c08821"},
+ {file = "aiohttp-3.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0b0a6a36ed7e164c6df1e18ee47afbd1990ce47cb428739d6c99aaabfaf1b3af"},
+ {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69da0f3ed3496808e8cbc5123a866c41c12c15baaaead96d256477edf168eb57"},
+ {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:176df045597e674fa950bf5ae536be85699e04cea68fa3a616cf75e413737eb5"},
+ {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b796b44111f0cab6bbf66214186e44734b5baab949cb5fb56154142a92989aeb"},
+ {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f27fdaadce22f2ef950fc10dcdf8048407c3b42b73779e48a4e76b3c35bca26c"},
+ {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcb6532b9814ea7c5a6a3299747c49de30e84472fa72821b07f5a9818bce0f66"},
+ {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:54631fb69a6e44b2ba522f7c22a6fb2667a02fd97d636048478db2fd8c4e98fe"},
+ {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4b4c452d0190c5a820d3f5c0f3cd8a28ace48c54053e24da9d6041bf81113183"},
+ {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:cae4c0c2ca800c793cae07ef3d40794625471040a87e1ba392039639ad61ab5b"},
+ {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:565760d6812b8d78d416c3c7cfdf5362fbe0d0d25b82fed75d0d29e18d7fc30f"},
+ {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:54311eb54f3a0c45efb9ed0d0a8f43d1bc6060d773f6973efd90037a51cd0a3f"},
+ {file = "aiohttp-3.9.1-cp312-cp312-win32.whl", hash = "sha256:85c3e3c9cb1d480e0b9a64c658cd66b3cfb8e721636ab8b0e746e2d79a7a9eed"},
+ {file = "aiohttp-3.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:11cb254e397a82efb1805d12561e80124928e04e9c4483587ce7390b3866d213"},
+ {file = "aiohttp-3.9.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8a22a34bc594d9d24621091d1b91511001a7eea91d6652ea495ce06e27381f70"},
+ {file = "aiohttp-3.9.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:598db66eaf2e04aa0c8900a63b0101fdc5e6b8a7ddd805c56d86efb54eb66672"},
+ {file = "aiohttp-3.9.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2c9376e2b09895c8ca8b95362283365eb5c03bdc8428ade80a864160605715f1"},
+ {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41473de252e1797c2d2293804e389a6d6986ef37cbb4a25208de537ae32141dd"},
+ {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c5857612c9813796960c00767645cb5da815af16dafb32d70c72a8390bbf690"},
+ {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffcd828e37dc219a72c9012ec44ad2e7e3066bec6ff3aaa19e7d435dbf4032ca"},
+ {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:219a16763dc0294842188ac8a12262b5671817042b35d45e44fd0a697d8c8361"},
+ {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f694dc8a6a3112059258a725a4ebe9acac5fe62f11c77ac4dcf896edfa78ca28"},
+ {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:bcc0ea8d5b74a41b621ad4a13d96c36079c81628ccc0b30cfb1603e3dfa3a014"},
+ {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:90ec72d231169b4b8d6085be13023ece8fa9b1bb495e4398d847e25218e0f431"},
+ {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:cf2a0ac0615842b849f40c4d7f304986a242f1e68286dbf3bd7a835e4f83acfd"},
+ {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:0e49b08eafa4f5707ecfb321ab9592717a319e37938e301d462f79b4e860c32a"},
+ {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2c59e0076ea31c08553e868cec02d22191c086f00b44610f8ab7363a11a5d9d8"},
+ {file = "aiohttp-3.9.1-cp38-cp38-win32.whl", hash = "sha256:4831df72b053b1eed31eb00a2e1aff6896fb4485301d4ccb208cac264b648db4"},
+ {file = "aiohttp-3.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:3135713c5562731ee18f58d3ad1bf41e1d8883eb68b363f2ffde5b2ea4b84cc7"},
+ {file = "aiohttp-3.9.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cfeadf42840c1e870dc2042a232a8748e75a36b52d78968cda6736de55582766"},
+ {file = "aiohttp-3.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:70907533db712f7aa791effb38efa96f044ce3d4e850e2d7691abd759f4f0ae0"},
+ {file = "aiohttp-3.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cdefe289681507187e375a5064c7599f52c40343a8701761c802c1853a504558"},
+ {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7481f581251bb5558ba9f635db70908819caa221fc79ee52a7f58392778c636"},
+ {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:49f0c1b3c2842556e5de35f122fc0f0b721334ceb6e78c3719693364d4af8499"},
+ {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d406b01a9f5a7e232d1b0d161b40c05275ffbcbd772dc18c1d5a570961a1ca4"},
+ {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d8e4450e7fe24d86e86b23cc209e0023177b6d59502e33807b732d2deb6975f"},
+ {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c0266cd6f005e99f3f51e583012de2778e65af6b73860038b968a0a8888487a"},
+ {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab221850108a4a063c5b8a70f00dd7a1975e5a1713f87f4ab26a46e5feac5a0e"},
+ {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c88a15f272a0ad3d7773cf3a37cc7b7d077cbfc8e331675cf1346e849d97a4e5"},
+ {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:237533179d9747080bcaad4d02083ce295c0d2eab3e9e8ce103411a4312991a0"},
+ {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:02ab6006ec3c3463b528374c4cdce86434e7b89ad355e7bf29e2f16b46c7dd6f"},
+ {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04fa38875e53eb7e354ece1607b1d2fdee2d175ea4e4d745f6ec9f751fe20c7c"},
+ {file = "aiohttp-3.9.1-cp39-cp39-win32.whl", hash = "sha256:82eefaf1a996060602f3cc1112d93ba8b201dbf5d8fd9611227de2003dddb3b7"},
+ {file = "aiohttp-3.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:9b05d33ff8e6b269e30a7957bd3244ffbce2a7a35a81b81c382629b80af1a8bf"},
+ {file = "aiohttp-3.9.1.tar.gz", hash = "sha256:8fc49a87ac269d4529da45871e2ffb6874e87779c3d0e2ccd813c0899221239d"},
]
[package.dependencies]
aiosignal = ">=1.1.2"
-async-timeout = ">=4.0.0a3,<5.0"
+async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""}
attrs = ">=17.3.0"
-charset-normalizer = ">=2.0,<4.0"
frozenlist = ">=1.1.1"
multidict = ">=4.5,<7.0"
yarl = ">=1.0,<2.0"
[package.extras]
-speedups = ["Brotli", "aiodns", "cchardet"]
+speedups = ["Brotli", "aiodns", "brotlicffi"]
[[package]]
name = "aiosignal"
@@ -124,52 +129,43 @@ frozenlist = ">=1.1.0"
[[package]]
name = "anyio"
-version = "4.0.0"
+version = "4.2.0"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.8"
files = [
- {file = "anyio-4.0.0-py3-none-any.whl", hash = "sha256:cfdb2b588b9fc25ede96d8db56ed50848b0b649dca3dd1df0b11f683bb9e0b5f"},
- {file = "anyio-4.0.0.tar.gz", hash = "sha256:f7ed51751b2c2add651e5747c891b47e26d2a21be5d32d9311dfe9692f3e5d7a"},
+ {file = "anyio-4.2.0-py3-none-any.whl", hash = "sha256:745843b39e829e108e518c489b31dc757de7d2131d53fac32bd8df268227bfee"},
+ {file = "anyio-4.2.0.tar.gz", hash = "sha256:e1875bb4b4e2de1669f4bc7869b6d3f54231cdced71605e6e64c9be77e3be50f"},
]
[package.dependencies]
exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
idna = ">=2.8"
sniffio = ">=1.1"
+typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
[package.extras]
-doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)"]
-test = ["anyio[trio]", "coverage[toml] (>=7)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
-trio = ["trio (>=0.22)"]
-
-[[package]]
-name = "appnope"
-version = "0.1.3"
-description = "Disable App Nap on macOS >= 10.9"
-optional = false
-python-versions = "*"
-files = [
- {file = "appnope-0.1.3-py2.py3-none-any.whl", hash = "sha256:265a455292d0bd8a72453494fa24df5a11eb18373a60c7c0430889f22548605e"},
- {file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"},
-]
+doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
+test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
+trio = ["trio (>=0.23)"]
[[package]]
name = "asttokens"
-version = "2.4.0"
+version = "2.4.1"
description = "Annotate AST trees with source code positions"
optional = false
python-versions = "*"
files = [
- {file = "asttokens-2.4.0-py2.py3-none-any.whl", hash = "sha256:cf8fc9e61a86461aa9fb161a14a0841a03c405fa829ac6b202670b3495d2ce69"},
- {file = "asttokens-2.4.0.tar.gz", hash = "sha256:2e0171b991b2c959acc6c49318049236844a5da1d65ba2672c4880c1c894834e"},
+ {file = "asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24"},
+ {file = "asttokens-2.4.1.tar.gz", hash = "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0"},
]
[package.dependencies]
six = ">=1.12.0"
[package.extras]
-test = ["astroid", "pytest"]
+astroid = ["astroid (>=1,<2)", "astroid (>=2,<4)"]
+test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"]
[[package]]
name = "async-timeout"
@@ -184,21 +180,22 @@ files = [
[[package]]
name = "attrs"
-version = "23.1.0"
+version = "23.2.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.7"
files = [
- {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"},
- {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"},
+ {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"},
+ {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"},
]
[package.extras]
cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
-dev = ["attrs[docs,tests]", "pre-commit"]
+dev = ["attrs[tests]", "pre-commit"]
docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
tests = ["attrs[tests-no-zope]", "zope-interface"]
-tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"]
+tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"]
[[package]]
name = "autoflake"
@@ -216,17 +213,6 @@ pyflakes = ">=1.1.0,<3"
tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""}
[[package]]
-name = "backcall"
-version = "0.2.0"
-description = "Specifications for callback functions passed in to an API"
-optional = false
-python-versions = "*"
-files = [
- {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"},
- {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"},
-]
-
-[[package]]
name = "black"
version = "22.3.0"
description = "The uncompromising code formatter."
@@ -273,24 +259,24 @@ uvloop = ["uvloop (>=0.15.2)"]
[[package]]
name = "cachetools"
-version = "5.3.1"
+version = "5.3.2"
description = "Extensible memoizing collections and decorators"
optional = false
python-versions = ">=3.7"
files = [
- {file = "cachetools-5.3.1-py3-none-any.whl", hash = "sha256:95ef631eeaea14ba2e36f06437f36463aac3a096799e876ee55e5cdccb102590"},
- {file = "cachetools-5.3.1.tar.gz", hash = "sha256:dce83f2d9b4e1f732a8cd44af8e8fab2dbe46201467fc98b3ef8f269092bf62b"},
+ {file = "cachetools-5.3.2-py3-none-any.whl", hash = "sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1"},
+ {file = "cachetools-5.3.2.tar.gz", hash = "sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2"},
]
[[package]]
name = "certifi"
-version = "2023.7.22"
+version = "2023.11.17"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
- {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
- {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"},
+ {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"},
+ {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"},
]
[[package]]
@@ -370,86 +356,101 @@ files = [
[[package]]
name = "charset-normalizer"
-version = "3.2.0"
+version = "3.3.2"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7.0"
files = [
- {file = "charset-normalizer-3.2.0.tar.gz", hash = "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace"},
- {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710"},
- {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed"},
- {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9"},
- {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623"},
- {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a"},
- {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8"},
- {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad"},
- {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c"},
- {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3"},
- {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029"},
- {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f"},
- {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a"},
- {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd"},
- {file = "charset_normalizer-3.2.0-cp310-cp310-win32.whl", hash = "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96"},
- {file = "charset_normalizer-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea"},
- {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09"},
- {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2"},
- {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac"},
- {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918"},
- {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a"},
- {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a"},
- {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6"},
- {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3"},
- {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d"},
- {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2"},
- {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6"},
- {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23"},
- {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa"},
- {file = "charset_normalizer-3.2.0-cp311-cp311-win32.whl", hash = "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1"},
- {file = "charset_normalizer-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489"},
- {file = "charset_normalizer-3.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346"},
- {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982"},
- {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c"},
- {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4"},
- {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449"},
- {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3"},
- {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a"},
- {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7"},
- {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd"},
- {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3"},
- {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592"},
- {file = "charset_normalizer-3.2.0-cp37-cp37m-win32.whl", hash = "sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1"},
- {file = "charset_normalizer-3.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959"},
- {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669"},
- {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329"},
- {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149"},
- {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94"},
- {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f"},
- {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa"},
- {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a"},
- {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037"},
- {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46"},
- {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2"},
- {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d"},
- {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c"},
- {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10"},
- {file = "charset_normalizer-3.2.0-cp38-cp38-win32.whl", hash = "sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706"},
- {file = "charset_normalizer-3.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e"},
- {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c"},
- {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f"},
- {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858"},
- {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5"},
- {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952"},
- {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4"},
- {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200"},
- {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252"},
- {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22"},
- {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c"},
- {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e"},
- {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299"},
- {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020"},
- {file = "charset_normalizer-3.2.0-cp39-cp39-win32.whl", hash = "sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9"},
- {file = "charset_normalizer-3.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80"},
- {file = "charset_normalizer-3.2.0-py3-none-any.whl", hash = "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6"},
+ {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"},
+ {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"},
]
[[package]]
@@ -467,6 +468,23 @@ files = [
colorama = {version = "*", markers = "platform_system == \"Windows\""}
[[package]]
+name = "click-default-group"
+version = "1.2.4"
+description = "click_default_group"
+optional = false
+python-versions = ">=2.7"
+files = [
+ {file = "click_default_group-1.2.4-py2.py3-none-any.whl", hash = "sha256:9b60486923720e7fc61731bdb32b617039aba820e22e1c88766b1125592eaa5f"},
+ {file = "click_default_group-1.2.4.tar.gz", hash = "sha256:eb3f3c99ec0d456ca6cd2a7f08f7d4e91771bef51b01bdd9580cc6450fe1251e"},
+]
+
+[package.dependencies]
+click = "*"
+
+[package.extras]
+test = ["pytest"]
+
+[[package]]
name = "colorama"
version = "0.4.6"
description = "Cross-platform colored terminal text."
@@ -479,142 +497,76 @@ files = [
[[package]]
name = "contourpy"
-version = "1.1.0"
-description = "Python library for calculating contours of 2D quadrilateral grids"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "contourpy-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:89f06eff3ce2f4b3eb24c1055a26981bffe4e7264acd86f15b97e40530b794bc"},
- {file = "contourpy-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dffcc2ddec1782dd2f2ce1ef16f070861af4fb78c69862ce0aab801495dda6a3"},
- {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25ae46595e22f93592d39a7eac3d638cda552c3e1160255258b695f7b58e5655"},
- {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:17cfaf5ec9862bc93af1ec1f302457371c34e688fbd381f4035a06cd47324f48"},
- {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18a64814ae7bce73925131381603fff0116e2df25230dfc80d6d690aa6e20b37"},
- {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90c81f22b4f572f8a2110b0b741bb64e5a6427e0a198b2cdc1fbaf85f352a3aa"},
- {file = "contourpy-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:53cc3a40635abedbec7f1bde60f8c189c49e84ac180c665f2cd7c162cc454baa"},
- {file = "contourpy-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:1f795597073b09d631782e7245016a4323cf1cf0b4e06eef7ea6627e06a37ff2"},
- {file = "contourpy-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0b7b04ed0961647691cfe5d82115dd072af7ce8846d31a5fac6c142dcce8b882"},
- {file = "contourpy-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27bc79200c742f9746d7dd51a734ee326a292d77e7d94c8af6e08d1e6c15d545"},
- {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:052cc634bf903c604ef1a00a5aa093c54f81a2612faedaa43295809ffdde885e"},
- {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9382a1c0bc46230fb881c36229bfa23d8c303b889b788b939365578d762b5c18"},
- {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5cec36c5090e75a9ac9dbd0ff4a8cf7cecd60f1b6dc23a374c7d980a1cd710e"},
- {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f0cbd657e9bde94cd0e33aa7df94fb73c1ab7799378d3b3f902eb8eb2e04a3a"},
- {file = "contourpy-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:181cbace49874f4358e2929aaf7ba84006acb76694102e88dd15af861996c16e"},
- {file = "contourpy-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fb3b7d9e6243bfa1efb93ccfe64ec610d85cfe5aec2c25f97fbbd2e58b531256"},
- {file = "contourpy-1.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bcb41692aa09aeb19c7c213411854402f29f6613845ad2453d30bf421fe68fed"},
- {file = "contourpy-1.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5d123a5bc63cd34c27ff9c7ac1cd978909e9c71da12e05be0231c608048bb2ae"},
- {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62013a2cf68abc80dadfd2307299bfa8f5aa0dcaec5b2954caeb5fa094171103"},
- {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0b6616375d7de55797d7a66ee7d087efe27f03d336c27cf1f32c02b8c1a5ac70"},
- {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:317267d915490d1e84577924bd61ba71bf8681a30e0d6c545f577363157e5e94"},
- {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d551f3a442655f3dcc1285723f9acd646ca5858834efeab4598d706206b09c9f"},
- {file = "contourpy-1.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e7a117ce7df5a938fe035cad481b0189049e8d92433b4b33aa7fc609344aafa1"},
- {file = "contourpy-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:d4f26b25b4f86087e7d75e63212756c38546e70f2a92d2be44f80114826e1cd4"},
- {file = "contourpy-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc00bb4225d57bff7ebb634646c0ee2a1298402ec10a5fe7af79df9a51c1bfd9"},
- {file = "contourpy-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:189ceb1525eb0655ab8487a9a9c41f42a73ba52d6789754788d1883fb06b2d8a"},
- {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f2931ed4741f98f74b410b16e5213f71dcccee67518970c42f64153ea9313b9"},
- {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30f511c05fab7f12e0b1b7730ebdc2ec8deedcfb505bc27eb570ff47c51a8f15"},
- {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:143dde50520a9f90e4a2703f367cf8ec96a73042b72e68fcd184e1279962eb6f"},
- {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e94bef2580e25b5fdb183bf98a2faa2adc5b638736b2c0a4da98691da641316a"},
- {file = "contourpy-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ed614aea8462735e7d70141374bd7650afd1c3f3cb0c2dbbcbe44e14331bf002"},
- {file = "contourpy-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:438ba416d02f82b692e371858143970ed2eb6337d9cdbbede0d8ad9f3d7dd17d"},
- {file = "contourpy-1.1.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a698c6a7a432789e587168573a864a7ea374c6be8d4f31f9d87c001d5a843493"},
- {file = "contourpy-1.1.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:397b0ac8a12880412da3551a8cb5a187d3298a72802b45a3bd1805e204ad8439"},
- {file = "contourpy-1.1.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a67259c2b493b00e5a4d0f7bfae51fb4b3371395e47d079a4446e9b0f4d70e76"},
- {file = "contourpy-1.1.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2b836d22bd2c7bb2700348e4521b25e077255ebb6ab68e351ab5aa91ca27e027"},
- {file = "contourpy-1.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084eaa568400cfaf7179b847ac871582199b1b44d5699198e9602ecbbb5f6104"},
- {file = "contourpy-1.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:911ff4fd53e26b019f898f32db0d4956c9d227d51338fb3b03ec72ff0084ee5f"},
- {file = "contourpy-1.1.0.tar.gz", hash = "sha256:e53046c3863828d21d531cc3b53786e6580eb1ba02477e8681009b6aa0870b21"},
-]
-
-[package.dependencies]
-numpy = ">=1.16"
-
-[package.extras]
-bokeh = ["bokeh", "selenium"]
-docs = ["furo", "sphinx-copybutton"]
-mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.2.0)", "types-Pillow"]
-test = ["Pillow", "contourpy[test-no-images]", "matplotlib"]
-test-no-images = ["pytest", "pytest-cov", "wurlitzer"]
-
-[[package]]
-name = "contourpy"
-version = "1.1.1"
+version = "1.2.0"
description = "Python library for calculating contours of 2D quadrilateral grids"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
files = [
- {file = "contourpy-1.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:46e24f5412c948d81736509377e255f6040e94216bf1a9b5ea1eaa9d29f6ec1b"},
- {file = "contourpy-1.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e48694d6a9c5a26ee85b10130c77a011a4fedf50a7279fa0bdaf44bafb4299d"},
- {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a66045af6cf00e19d02191ab578a50cb93b2028c3eefed999793698e9ea768ae"},
- {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ebf42695f75ee1a952f98ce9775c873e4971732a87334b099dde90b6af6a916"},
- {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6aec19457617ef468ff091669cca01fa7ea557b12b59a7908b9474bb9674cf0"},
- {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:462c59914dc6d81e0b11f37e560b8a7c2dbab6aca4f38be31519d442d6cde1a1"},
- {file = "contourpy-1.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6d0a8efc258659edc5299f9ef32d8d81de8b53b45d67bf4bfa3067f31366764d"},
- {file = "contourpy-1.1.1-cp310-cp310-win32.whl", hash = "sha256:d6ab42f223e58b7dac1bb0af32194a7b9311065583cc75ff59dcf301afd8a431"},
- {file = "contourpy-1.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:549174b0713d49871c6dee90a4b499d3f12f5e5f69641cd23c50a4542e2ca1eb"},
- {file = "contourpy-1.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:407d864db716a067cc696d61fa1ef6637fedf03606e8417fe2aeed20a061e6b2"},
- {file = "contourpy-1.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe80c017973e6a4c367e037cb31601044dd55e6bfacd57370674867d15a899b"},
- {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e30aaf2b8a2bac57eb7e1650df1b3a4130e8d0c66fc2f861039d507a11760e1b"},
- {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3de23ca4f381c3770dee6d10ead6fff524d540c0f662e763ad1530bde5112532"},
- {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:566f0e41df06dfef2431defcfaa155f0acfa1ca4acbf8fd80895b1e7e2ada40e"},
- {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b04c2f0adaf255bf756cf08ebef1be132d3c7a06fe6f9877d55640c5e60c72c5"},
- {file = "contourpy-1.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d0c188ae66b772d9d61d43c6030500344c13e3f73a00d1dc241da896f379bb62"},
- {file = "contourpy-1.1.1-cp311-cp311-win32.whl", hash = "sha256:0683e1ae20dc038075d92e0e0148f09ffcefab120e57f6b4c9c0f477ec171f33"},
- {file = "contourpy-1.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:8636cd2fc5da0fb102a2504fa2c4bea3cbc149533b345d72cdf0e7a924decc45"},
- {file = "contourpy-1.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:560f1d68a33e89c62da5da4077ba98137a5e4d3a271b29f2f195d0fba2adcb6a"},
- {file = "contourpy-1.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:24216552104ae8f3b34120ef84825400b16eb6133af2e27a190fdc13529f023e"},
- {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56de98a2fb23025882a18b60c7f0ea2d2d70bbbcfcf878f9067234b1c4818442"},
- {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:07d6f11dfaf80a84c97f1a5ba50d129d9303c5b4206f776e94037332e298dda8"},
- {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1eaac5257a8f8a047248d60e8f9315c6cff58f7803971170d952555ef6344a7"},
- {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19557fa407e70f20bfaba7d55b4d97b14f9480856c4fb65812e8a05fe1c6f9bf"},
- {file = "contourpy-1.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:081f3c0880712e40effc5f4c3b08feca6d064cb8cfbb372ca548105b86fd6c3d"},
- {file = "contourpy-1.1.1-cp312-cp312-win32.whl", hash = "sha256:059c3d2a94b930f4dafe8105bcdc1b21de99b30b51b5bce74c753686de858cb6"},
- {file = "contourpy-1.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:f44d78b61740e4e8c71db1cf1fd56d9050a4747681c59ec1094750a658ceb970"},
- {file = "contourpy-1.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:70e5a10f8093d228bb2b552beeb318b8928b8a94763ef03b858ef3612b29395d"},
- {file = "contourpy-1.1.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8394e652925a18ef0091115e3cc191fef350ab6dc3cc417f06da66bf98071ae9"},
- {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5bd5680f844c3ff0008523a71949a3ff5e4953eb7701b28760805bc9bcff217"},
- {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66544f853bfa85c0d07a68f6c648b2ec81dafd30f272565c37ab47a33b220684"},
- {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0c02b75acfea5cab07585d25069207e478d12309557f90a61b5a3b4f77f46ce"},
- {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41339b24471c58dc1499e56783fedc1afa4bb018bcd035cfb0ee2ad2a7501ef8"},
- {file = "contourpy-1.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f29fb0b3f1217dfe9362ec55440d0743fe868497359f2cf93293f4b2701b8251"},
- {file = "contourpy-1.1.1-cp38-cp38-win32.whl", hash = "sha256:f9dc7f933975367251c1b34da882c4f0e0b2e24bb35dc906d2f598a40b72bfc7"},
- {file = "contourpy-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:498e53573e8b94b1caeb9e62d7c2d053c263ebb6aa259c81050766beb50ff8d9"},
- {file = "contourpy-1.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ba42e3810999a0ddd0439e6e5dbf6d034055cdc72b7c5c839f37a7c274cb4eba"},
- {file = "contourpy-1.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c06e4c6e234fcc65435223c7b2a90f286b7f1b2733058bdf1345d218cc59e34"},
- {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca6fab080484e419528e98624fb5c4282148b847e3602dc8dbe0cb0669469887"},
- {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93df44ab351119d14cd1e6b52a5063d3336f0754b72736cc63db59307dabb718"},
- {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eafbef886566dc1047d7b3d4b14db0d5b7deb99638d8e1be4e23a7c7ac59ff0f"},
- {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efe0fab26d598e1ec07d72cf03eaeeba8e42b4ecf6b9ccb5a356fde60ff08b85"},
- {file = "contourpy-1.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f08e469821a5e4751c97fcd34bcb586bc243c39c2e39321822060ba902eac49e"},
- {file = "contourpy-1.1.1-cp39-cp39-win32.whl", hash = "sha256:bfc8a5e9238232a45ebc5cb3bfee71f1167064c8d382cadd6076f0d51cff1da0"},
- {file = "contourpy-1.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:c84fdf3da00c2827d634de4fcf17e3e067490c4aea82833625c4c8e6cdea0887"},
- {file = "contourpy-1.1.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:229a25f68046c5cf8067d6d6351c8b99e40da11b04d8416bf8d2b1d75922521e"},
- {file = "contourpy-1.1.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10dab5ea1bd4401c9483450b5b0ba5416be799bbd50fc7a6cc5e2a15e03e8a3"},
- {file = "contourpy-1.1.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4f9147051cb8fdb29a51dc2482d792b3b23e50f8f57e3720ca2e3d438b7adf23"},
- {file = "contourpy-1.1.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a75cc163a5f4531a256f2c523bd80db509a49fc23721b36dd1ef2f60ff41c3cb"},
- {file = "contourpy-1.1.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b53d5769aa1f2d4ea407c65f2d1d08002952fac1d9e9d307aa2e1023554a163"},
- {file = "contourpy-1.1.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11b836b7dbfb74e049c302bbf74b4b8f6cb9d0b6ca1bf86cfa8ba144aedadd9c"},
- {file = "contourpy-1.1.1.tar.gz", hash = "sha256:96ba37c2e24b7212a77da85004c38e7c4d155d3e72a45eeaf22c1f03f607e8ab"},
+ {file = "contourpy-1.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0274c1cb63625972c0c007ab14dd9ba9e199c36ae1a231ce45d725cbcbfd10a8"},
+ {file = "contourpy-1.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ab459a1cbbf18e8698399c595a01f6dcc5c138220ca3ea9e7e6126232d102bb4"},
+ {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fdd887f17c2f4572ce548461e4f96396681212d858cae7bd52ba3310bc6f00f"},
+ {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d16edfc3fc09968e09ddffada434b3bf989bf4911535e04eada58469873e28e"},
+ {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c203f617abc0dde5792beb586f827021069fb6d403d7f4d5c2b543d87edceb9"},
+ {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b69303ceb2e4d4f146bf82fda78891ef7bcd80c41bf16bfca3d0d7eb545448aa"},
+ {file = "contourpy-1.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:884c3f9d42d7218304bc74a8a7693d172685c84bd7ab2bab1ee567b769696df9"},
+ {file = "contourpy-1.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4a1b1208102be6e851f20066bf0e7a96b7d48a07c9b0cfe6d0d4545c2f6cadab"},
+ {file = "contourpy-1.2.0-cp310-cp310-win32.whl", hash = "sha256:34b9071c040d6fe45d9826cbbe3727d20d83f1b6110d219b83eb0e2a01d79488"},
+ {file = "contourpy-1.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:bd2f1ae63998da104f16a8b788f685e55d65760cd1929518fd94cd682bf03e41"},
+ {file = "contourpy-1.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dd10c26b4eadae44783c45ad6655220426f971c61d9b239e6f7b16d5cdaaa727"},
+ {file = "contourpy-1.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5c6b28956b7b232ae801406e529ad7b350d3f09a4fde958dfdf3c0520cdde0dd"},
+ {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebeac59e9e1eb4b84940d076d9f9a6cec0064e241818bcb6e32124cc5c3e377a"},
+ {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:139d8d2e1c1dd52d78682f505e980f592ba53c9f73bd6be102233e358b401063"},
+ {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e9dc350fb4c58adc64df3e0703ab076f60aac06e67d48b3848c23647ae4310e"},
+ {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18fc2b4ed8e4a8fe849d18dce4bd3c7ea637758c6343a1f2bae1e9bd4c9f4686"},
+ {file = "contourpy-1.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:16a7380e943a6d52472096cb7ad5264ecee36ed60888e2a3d3814991a0107286"},
+ {file = "contourpy-1.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8d8faf05be5ec8e02a4d86f616fc2a0322ff4a4ce26c0f09d9f7fb5330a35c95"},
+ {file = "contourpy-1.2.0-cp311-cp311-win32.whl", hash = "sha256:67b7f17679fa62ec82b7e3e611c43a016b887bd64fb933b3ae8638583006c6d6"},
+ {file = "contourpy-1.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:99ad97258985328b4f207a5e777c1b44a83bfe7cf1f87b99f9c11d4ee477c4de"},
+ {file = "contourpy-1.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:575bcaf957a25d1194903a10bc9f316c136c19f24e0985a2b9b5608bdf5dbfe0"},
+ {file = "contourpy-1.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9e6c93b5b2dbcedad20a2f18ec22cae47da0d705d454308063421a3b290d9ea4"},
+ {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:464b423bc2a009088f19bdf1f232299e8b6917963e2b7e1d277da5041f33a779"},
+ {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:68ce4788b7d93e47f84edd3f1f95acdcd142ae60bc0e5493bfd120683d2d4316"},
+ {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d7d1f8871998cdff5d2ff6a087e5e1780139abe2838e85b0b46b7ae6cc25399"},
+ {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e739530c662a8d6d42c37c2ed52a6f0932c2d4a3e8c1f90692ad0ce1274abe0"},
+ {file = "contourpy-1.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:247b9d16535acaa766d03037d8e8fb20866d054d3c7fbf6fd1f993f11fc60ca0"},
+ {file = "contourpy-1.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:461e3ae84cd90b30f8d533f07d87c00379644205b1d33a5ea03381edc4b69431"},
+ {file = "contourpy-1.2.0-cp312-cp312-win32.whl", hash = "sha256:1c2559d6cffc94890b0529ea7eeecc20d6fadc1539273aa27faf503eb4656d8f"},
+ {file = "contourpy-1.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:491b1917afdd8638a05b611a56d46587d5a632cabead889a5440f7c638bc6ed9"},
+ {file = "contourpy-1.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5fd1810973a375ca0e097dee059c407913ba35723b111df75671a1976efa04bc"},
+ {file = "contourpy-1.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:999c71939aad2780f003979b25ac5b8f2df651dac7b38fb8ce6c46ba5abe6ae9"},
+ {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7caf9b241464c404613512d5594a6e2ff0cc9cb5615c9475cc1d9b514218ae8"},
+ {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:266270c6f6608340f6c9836a0fb9b367be61dde0c9a9a18d5ece97774105ff3e"},
+ {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbd50d0a0539ae2e96e537553aff6d02c10ed165ef40c65b0e27e744a0f10af8"},
+ {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11f8d2554e52f459918f7b8e6aa20ec2a3bce35ce95c1f0ef4ba36fbda306df5"},
+ {file = "contourpy-1.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ce96dd400486e80ac7d195b2d800b03e3e6a787e2a522bfb83755938465a819e"},
+ {file = "contourpy-1.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6d3364b999c62f539cd403f8123ae426da946e142312a514162adb2addd8d808"},
+ {file = "contourpy-1.2.0-cp39-cp39-win32.whl", hash = "sha256:1c88dfb9e0c77612febebb6ac69d44a8d81e3dc60f993215425b62c1161353f4"},
+ {file = "contourpy-1.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:78e6ad33cf2e2e80c5dfaaa0beec3d61face0fb650557100ee36db808bfa6843"},
+ {file = "contourpy-1.2.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:be16975d94c320432657ad2402f6760990cb640c161ae6da1363051805fa8108"},
+ {file = "contourpy-1.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b95a225d4948b26a28c08307a60ac00fb8671b14f2047fc5476613252a129776"},
+ {file = "contourpy-1.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0d7e03c0f9a4f90dc18d4e77e9ef4ec7b7bbb437f7f675be8e530d65ae6ef956"},
+ {file = "contourpy-1.2.0.tar.gz", hash = "sha256:171f311cb758de7da13fc53af221ae47a5877be5a0843a9fe150818c51ed276a"},
]
[package.dependencies]
-numpy = {version = ">=1.16,<2.0", markers = "python_version <= \"3.11\""}
+numpy = ">=1.20,<2.0"
[package.extras]
bokeh = ["bokeh", "selenium"]
docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"]
-mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.4.1)", "types-Pillow"]
+mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.6.1)", "types-Pillow"]
test = ["Pillow", "contourpy[test-no-images]", "matplotlib"]
-test-no-images = ["pytest", "pytest-cov", "wurlitzer"]
+test-no-images = ["pytest", "pytest-cov", "pytest-xdist", "wurlitzer"]
[[package]]
name = "cycler"
-version = "0.12.0"
+version = "0.12.1"
description = "Composable style cycles"
optional = false
python-versions = ">=3.8"
files = [
- {file = "cycler-0.12.0-py3-none-any.whl", hash = "sha256:7896994252d006771357777d0251f3e34d266f4fa5f2c572247a80ab01440947"},
- {file = "cycler-0.12.0.tar.gz", hash = "sha256:8cc3a7b4861f91b1095157f9916f748549a617046e67eb7619abed9b34d2c94a"},
+ {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"},
+ {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"},
]
[package.extras]
@@ -634,24 +586,24 @@ files = [
[[package]]
name = "distlib"
-version = "0.3.7"
+version = "0.3.8"
description = "Distribution utilities"
optional = false
python-versions = "*"
files = [
- {file = "distlib-0.3.7-py2.py3-none-any.whl", hash = "sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057"},
- {file = "distlib-0.3.7.tar.gz", hash = "sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8"},
+ {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"},
+ {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"},
]
[[package]]
name = "exceptiongroup"
-version = "1.1.3"
+version = "1.2.0"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
files = [
- {file = "exceptiongroup-1.1.3-py3-none-any.whl", hash = "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"},
- {file = "exceptiongroup-1.1.3.tar.gz", hash = "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9"},
+ {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"},
+ {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"},
]
[package.extras]
@@ -659,17 +611,17 @@ test = ["pytest (>=6)"]
[[package]]
name = "executing"
-version = "1.2.0"
+version = "2.0.1"
description = "Get the currently executing AST node of a frame, and other information"
optional = false
-python-versions = "*"
+python-versions = ">=3.5"
files = [
- {file = "executing-1.2.0-py2.py3-none-any.whl", hash = "sha256:0314a69e37426e3608aada02473b4161d4caf5a4b244d1d0c48072b8fee7bacc"},
- {file = "executing-1.2.0.tar.gz", hash = "sha256:19da64c18d2d851112f09c287f8d3dbbdf725ab0e569077efb6cdcbd3497c107"},
+ {file = "executing-2.0.1-py2.py3-none-any.whl", hash = "sha256:eac49ca94516ccc753f9fb5ce82603156e590b27525a8bc32cce8ae302eb61bc"},
+ {file = "executing-2.0.1.tar.gz", hash = "sha256:35afe2ce3affba8ee97f2d69927fa823b08b472b7b994e36a52a964b93d16147"},
]
[package.extras]
-tests = ["asttokens", "littleutils", "pytest", "rich"]
+tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"]
[[package]]
name = "fastapi"
@@ -692,19 +644,19 @@ all = ["email-validator (>=1.1.1)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)"
[[package]]
name = "filelock"
-version = "3.12.4"
+version = "3.13.1"
description = "A platform independent file lock."
optional = false
python-versions = ">=3.8"
files = [
- {file = "filelock-3.12.4-py3-none-any.whl", hash = "sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4"},
- {file = "filelock-3.12.4.tar.gz", hash = "sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd"},
+ {file = "filelock-3.13.1-py3-none-any.whl", hash = "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c"},
+ {file = "filelock-3.13.1.tar.gz", hash = "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e"},
]
[package.extras]
-docs = ["furo (>=2023.7.26)", "sphinx (>=7.1.2)", "sphinx-autodoc-typehints (>=1.24)"]
-testing = ["covdefaults (>=2.3)", "coverage (>=7.3)", "diff-cover (>=7.7)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)", "pytest-timeout (>=2.1)"]
-typing = ["typing-extensions (>=4.7.1)"]
+docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.24)"]
+testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"]
+typing = ["typing-extensions (>=4.8)"]
[[package]]
name = "flake8"
@@ -724,51 +676,59 @@ pyflakes = ">=2.3.0,<2.4.0"
[[package]]
name = "fonttools"
-version = "4.42.1"
+version = "4.47.0"
description = "Tools to manipulate font files"
optional = false
python-versions = ">=3.8"
files = [
- {file = "fonttools-4.42.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ed1a13a27f59d1fc1920394a7f596792e9d546c9ca5a044419dca70c37815d7c"},
- {file = "fonttools-4.42.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c9b1ce7a45978b821a06d375b83763b27a3a5e8a2e4570b3065abad240a18760"},
- {file = "fonttools-4.42.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f720fa82a11c0f9042376fd509b5ed88dab7e3cd602eee63a1af08883b37342b"},
- {file = "fonttools-4.42.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db55cbaea02a20b49fefbd8e9d62bd481aaabe1f2301dabc575acc6b358874fa"},
- {file = "fonttools-4.42.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a35981d90feebeaef05e46e33e6b9e5b5e618504672ca9cd0ff96b171e4bfff"},
- {file = "fonttools-4.42.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:68a02bbe020dc22ee0540e040117535f06df9358106d3775e8817d826047f3fd"},
- {file = "fonttools-4.42.1-cp310-cp310-win32.whl", hash = "sha256:12a7c247d1b946829bfa2f331107a629ea77dc5391dfd34fdcd78efa61f354ca"},
- {file = "fonttools-4.42.1-cp310-cp310-win_amd64.whl", hash = "sha256:a398bdadb055f8de69f62b0fc70625f7cbdab436bbb31eef5816e28cab083ee8"},
- {file = "fonttools-4.42.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:689508b918332fb40ce117131633647731d098b1b10d092234aa959b4251add5"},
- {file = "fonttools-4.42.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e36344e48af3e3bde867a1ca54f97c308735dd8697005c2d24a86054a114a71"},
- {file = "fonttools-4.42.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19b7db825c8adee96fac0692e6e1ecd858cae9affb3b4812cdb9d934a898b29e"},
- {file = "fonttools-4.42.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:113337c2d29665839b7d90b39f99b3cac731f72a0eda9306165a305c7c31d341"},
- {file = "fonttools-4.42.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:37983b6bdab42c501202500a2be3a572f50d4efe3237e0686ee9d5f794d76b35"},
- {file = "fonttools-4.42.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6ed2662a3d9c832afa36405f8748c250be94ae5dfc5283d668308391f2102861"},
- {file = "fonttools-4.42.1-cp311-cp311-win32.whl", hash = "sha256:179737095eb98332a2744e8f12037b2977f22948cf23ff96656928923ddf560a"},
- {file = "fonttools-4.42.1-cp311-cp311-win_amd64.whl", hash = "sha256:f2b82f46917d8722e6b5eafeefb4fb585d23babd15d8246c664cd88a5bddd19c"},
- {file = "fonttools-4.42.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:62f481ac772fd68901573956231aea3e4b1ad87b9b1089a61613a91e2b50bb9b"},
- {file = "fonttools-4.42.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f2f806990160d1ce42d287aa419df3ffc42dfefe60d473695fb048355fe0c6a0"},
- {file = "fonttools-4.42.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db372213d39fa33af667c2aa586a0c1235e88e9c850f5dd5c8e1f17515861868"},
- {file = "fonttools-4.42.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d18fc642fd0ac29236ff88ecfccff229ec0386090a839dd3f1162e9a7944a40"},
- {file = "fonttools-4.42.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8708b98c278012ad267ee8a7433baeb809948855e81922878118464b274c909d"},
- {file = "fonttools-4.42.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c95b0724a6deea2c8c5d3222191783ced0a2f09bd6d33f93e563f6f1a4b3b3a4"},
- {file = "fonttools-4.42.1-cp38-cp38-win32.whl", hash = "sha256:4aa79366e442dbca6e2c8595645a3a605d9eeabdb7a094d745ed6106816bef5d"},
- {file = "fonttools-4.42.1-cp38-cp38-win_amd64.whl", hash = "sha256:acb47f6f8680de24c1ab65ebde39dd035768e2a9b571a07c7b8da95f6c8815fd"},
- {file = "fonttools-4.42.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5fb289b7a815638a7613d46bcf324c9106804725b2bb8ad913c12b6958ffc4ec"},
- {file = "fonttools-4.42.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:53eb5091ddc8b1199330bb7b4a8a2e7995ad5d43376cadce84523d8223ef3136"},
- {file = "fonttools-4.42.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46a0ec8adbc6ff13494eb0c9c2e643b6f009ce7320cf640de106fb614e4d4360"},
- {file = "fonttools-4.42.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7cc7d685b8eeca7ae69dc6416833fbfea61660684b7089bca666067cb2937dcf"},
- {file = "fonttools-4.42.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:be24fcb80493b2c94eae21df70017351851652a37de514de553435b256b2f249"},
- {file = "fonttools-4.42.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:515607ec756d7865f23070682622c49d922901943697871fc292277cf1e71967"},
- {file = "fonttools-4.42.1-cp39-cp39-win32.whl", hash = "sha256:0eb79a2da5eb6457a6f8ab904838454accc7d4cccdaff1fd2bd3a0679ea33d64"},
- {file = "fonttools-4.42.1-cp39-cp39-win_amd64.whl", hash = "sha256:7286aed4ea271df9eab8d7a9b29e507094b51397812f7ce051ecd77915a6e26b"},
- {file = "fonttools-4.42.1-py3-none-any.whl", hash = "sha256:9398f244e28e0596e2ee6024f808b06060109e33ed38dcc9bded452fd9bbb853"},
- {file = "fonttools-4.42.1.tar.gz", hash = "sha256:c391cd5af88aacaf41dd7cfb96eeedfad297b5899a39e12f4c2c3706d0a3329d"},
+ {file = "fonttools-4.47.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2d2404107626f97a221dc1a65b05396d2bb2ce38e435f64f26ed2369f68675d9"},
+ {file = "fonttools-4.47.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c01f409be619a9a0f5590389e37ccb58b47264939f0e8d58bfa1f3ba07d22671"},
+ {file = "fonttools-4.47.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d986b66ff722ef675b7ee22fbe5947a41f60a61a4da15579d5e276d897fbc7fa"},
+ {file = "fonttools-4.47.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8acf6dd0434b211b3bd30d572d9e019831aae17a54016629fa8224783b22df8"},
+ {file = "fonttools-4.47.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:495369c660e0c27233e3c572269cbe520f7f4978be675f990f4005937337d391"},
+ {file = "fonttools-4.47.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c59227d7ba5b232281c26ae04fac2c73a79ad0e236bca5c44aae904a18f14faf"},
+ {file = "fonttools-4.47.0-cp310-cp310-win32.whl", hash = "sha256:59a6c8b71a245800e923cb684a2dc0eac19c56493e2f896218fcf2571ed28984"},
+ {file = "fonttools-4.47.0-cp310-cp310-win_amd64.whl", hash = "sha256:52c82df66201f3a90db438d9d7b337c7c98139de598d0728fb99dab9fd0495ca"},
+ {file = "fonttools-4.47.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:854421e328d47d70aa5abceacbe8eef231961b162c71cbe7ff3f47e235e2e5c5"},
+ {file = "fonttools-4.47.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:511482df31cfea9f697930f61520f6541185fa5eeba2fa760fe72e8eee5af88b"},
+ {file = "fonttools-4.47.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0e2c88c8c985b7b9a7efcd06511fb0a1fe3ddd9a6cd2895ef1dbf9059719d7"},
+ {file = "fonttools-4.47.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7a0a8848726956e9d9fb18c977a279013daadf0cbb6725d2015a6dd57527992"},
+ {file = "fonttools-4.47.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e869da810ae35afb3019baa0d0306cdbab4760a54909c89ad8904fa629991812"},
+ {file = "fonttools-4.47.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dd23848f877c3754f53a4903fb7a593ed100924f9b4bff7d5a4e2e8a7001ae11"},
+ {file = "fonttools-4.47.0-cp311-cp311-win32.whl", hash = "sha256:bf1810635c00f7c45d93085611c995fc130009cec5abdc35b327156aa191f982"},
+ {file = "fonttools-4.47.0-cp311-cp311-win_amd64.whl", hash = "sha256:61df4dee5d38ab65b26da8efd62d859a1eef7a34dcbc331299a28e24d04c59a7"},
+ {file = "fonttools-4.47.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e3f4d61f3a8195eac784f1d0c16c0a3105382c1b9a74d99ac4ba421da39a8826"},
+ {file = "fonttools-4.47.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:174995f7b057e799355b393e97f4f93ef1f2197cbfa945e988d49b2a09ecbce8"},
+ {file = "fonttools-4.47.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea592e6a09b71cb7a7661dd93ac0b877a6228e2d677ebacbad0a4d118494c86d"},
+ {file = "fonttools-4.47.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40bdbe90b33897d9cc4a39f8e415b0fcdeae4c40a99374b8a4982f127ff5c767"},
+ {file = "fonttools-4.47.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:843509ae9b93db5aaf1a6302085e30bddc1111d31e11d724584818f5b698f500"},
+ {file = "fonttools-4.47.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9acfa1cdc479e0dde528b61423855913d949a7f7fe09e276228298fef4589540"},
+ {file = "fonttools-4.47.0-cp312-cp312-win32.whl", hash = "sha256:66c92ec7f95fd9732550ebedefcd190a8d81beaa97e89d523a0d17198a8bda4d"},
+ {file = "fonttools-4.47.0-cp312-cp312-win_amd64.whl", hash = "sha256:e8fa20748de55d0021f83754b371432dca0439e02847962fc4c42a0e444c2d78"},
+ {file = "fonttools-4.47.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c75e19971209fbbce891ebfd1b10c37320a5a28e8d438861c21d35305aedb81c"},
+ {file = "fonttools-4.47.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e79f1a3970d25f692bbb8c8c2637e621a66c0d60c109ab48d4a160f50856deff"},
+ {file = "fonttools-4.47.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:562681188c62c024fe2c611b32e08b8de2afa00c0c4e72bed47c47c318e16d5c"},
+ {file = "fonttools-4.47.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a77a60315c33393b2bd29d538d1ef026060a63d3a49a9233b779261bad9c3f71"},
+ {file = "fonttools-4.47.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b4fabb8cc9422efae1a925160083fdcbab8fdc96a8483441eb7457235df625bd"},
+ {file = "fonttools-4.47.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2a78dba8c2a1e9d53a0fb5382979f024200dc86adc46a56cbb668a2249862fda"},
+ {file = "fonttools-4.47.0-cp38-cp38-win32.whl", hash = "sha256:e6b968543fde4119231c12c2a953dcf83349590ca631ba8216a8edf9cd4d36a9"},
+ {file = "fonttools-4.47.0-cp38-cp38-win_amd64.whl", hash = "sha256:4a9a51745c0439516d947480d4d884fa18bd1458e05b829e482b9269afa655bc"},
+ {file = "fonttools-4.47.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:62d8ddb058b8e87018e5dc26f3258e2c30daad4c87262dfeb0e2617dd84750e6"},
+ {file = "fonttools-4.47.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5dde0eab40faaa5476133123f6a622a1cc3ac9b7af45d65690870620323308b4"},
+ {file = "fonttools-4.47.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4da089f6dfdb822293bde576916492cd708c37c2501c3651adde39804630538"},
+ {file = "fonttools-4.47.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:253bb46bab970e8aae254cebf2ae3db98a4ef6bd034707aa68a239027d2b198d"},
+ {file = "fonttools-4.47.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1193fb090061efa2f9e2d8d743ae9850c77b66746a3b32792324cdce65784154"},
+ {file = "fonttools-4.47.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:084511482dd265bce6dca24c509894062f0117e4e6869384d853f46c0e6d43be"},
+ {file = "fonttools-4.47.0-cp39-cp39-win32.whl", hash = "sha256:97620c4af36e4c849e52661492e31dc36916df12571cb900d16960ab8e92a980"},
+ {file = "fonttools-4.47.0-cp39-cp39-win_amd64.whl", hash = "sha256:e77bdf52185bdaf63d39f3e1ac3212e6cfa3ab07d509b94557a8902ce9c13c82"},
+ {file = "fonttools-4.47.0-py3-none-any.whl", hash = "sha256:d6477ba902dd2d7adda7f0fd3bfaeb92885d45993c9e1928c9f28fc3961415f7"},
+ {file = "fonttools-4.47.0.tar.gz", hash = "sha256:ec13a10715eef0e031858c1c23bfaee6cba02b97558e4a7bfa089dba4a8c2ebf"},
]
[package.extras]
-all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.0.0)", "xattr", "zopfli (>=0.1.4)"]
+all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"]
graphite = ["lz4 (>=1.7.4.2)"]
-interpolatable = ["munkres", "scipy"]
+interpolatable = ["munkres", "pycairo", "scipy"]
lxml = ["lxml (>=4.0,<5)"]
pathops = ["skia-pathops (>=0.5.0)"]
plot = ["matplotlib"]
@@ -776,88 +736,104 @@ repacker = ["uharfbuzz (>=0.23.0)"]
symfont = ["sympy"]
type1 = ["xattr"]
ufo = ["fs (>=2.2.0,<3)"]
-unicode = ["unicodedata2 (>=15.0.0)"]
+unicode = ["unicodedata2 (>=15.1.0)"]
woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"]
[[package]]
name = "frozenlist"
-version = "1.4.0"
+version = "1.4.1"
description = "A list-like structure which implements collections.abc.MutableSequence"
optional = false
python-versions = ">=3.8"
files = [
- {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab"},
- {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559"},
- {file = "frozenlist-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c"},
- {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b"},
- {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea"},
- {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326"},
- {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963"},
- {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300"},
- {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b"},
- {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8"},
- {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb"},
- {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9"},
- {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62"},
- {file = "frozenlist-1.4.0-cp310-cp310-win32.whl", hash = "sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0"},
- {file = "frozenlist-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956"},
- {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95"},
- {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3"},
- {file = "frozenlist-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc"},
- {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839"},
- {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c"},
- {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f"},
- {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b"},
- {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b"},
- {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472"},
- {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01"},
- {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f"},
- {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467"},
- {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb"},
- {file = "frozenlist-1.4.0-cp311-cp311-win32.whl", hash = "sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431"},
- {file = "frozenlist-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1"},
- {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3"},
- {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503"},
- {file = "frozenlist-1.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9"},
- {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf"},
- {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2"},
- {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc"},
- {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672"},
- {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919"},
- {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc"},
- {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79"},
- {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e"},
- {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781"},
- {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8"},
- {file = "frozenlist-1.4.0-cp38-cp38-win32.whl", hash = "sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc"},
- {file = "frozenlist-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7"},
- {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf"},
- {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963"},
- {file = "frozenlist-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f"},
- {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1"},
- {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef"},
- {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87"},
- {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6"},
- {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087"},
- {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3"},
- {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d"},
- {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2"},
- {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a"},
- {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3"},
- {file = "frozenlist-1.4.0-cp39-cp39-win32.whl", hash = "sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f"},
- {file = "frozenlist-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167"},
- {file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"},
+ {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"},
+ {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"},
+ {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"},
+ {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"},
+ {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"},
+ {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"},
+ {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"},
+ {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"},
+ {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"},
+ {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"},
+ {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"},
+ {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"},
+ {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"},
+ {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"},
+ {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"},
+ {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"},
+ {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"},
+ {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"},
+ {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"},
+ {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"},
+ {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"},
+ {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"},
+ {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"},
+ {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"},
+ {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"},
+ {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"},
+ {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"},
+ {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"},
+ {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"},
+ {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"},
+ {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"},
+ {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"},
+ {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"},
+ {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"},
+ {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"},
+ {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"},
+ {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"},
+ {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"},
+ {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"},
+ {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"},
+ {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"},
+ {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"},
+ {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"},
+ {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"},
+ {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"},
+ {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"},
+ {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"},
+ {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"},
+ {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"},
+ {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"},
+ {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"},
+ {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"},
+ {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"},
+ {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"},
+ {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"},
+ {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"},
+ {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"},
+ {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"},
+ {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"},
+ {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"},
+ {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"},
+ {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"},
+ {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"},
+ {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"},
+ {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"},
+ {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"},
+ {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"},
+ {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"},
+ {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"},
+ {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"},
+ {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"},
+ {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"},
+ {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"},
+ {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"},
+ {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"},
+ {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"},
+ {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"},
]
[[package]]
name = "gitdb"
-version = "4.0.10"
+version = "4.0.11"
description = "Git Object Database"
optional = false
python-versions = ">=3.7"
files = [
- {file = "gitdb-4.0.10-py3-none-any.whl", hash = "sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7"},
- {file = "gitdb-4.0.10.tar.gz", hash = "sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a"},
+ {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"},
+ {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"},
]
[package.dependencies]
@@ -865,30 +841,30 @@ smmap = ">=3.0.1,<6"
[[package]]
name = "gitpython"
-version = "3.1.37"
+version = "3.1.40"
description = "GitPython is a Python library used to interact with Git repositories"
optional = false
python-versions = ">=3.7"
files = [
- {file = "GitPython-3.1.37-py3-none-any.whl", hash = "sha256:5f4c4187de49616d710a77e98ddf17b4782060a1788df441846bddefbb89ab33"},
- {file = "GitPython-3.1.37.tar.gz", hash = "sha256:f9b9ddc0761c125d5780eab2d64be4873fc6817c2899cbcb34b02344bdc7bc54"},
+ {file = "GitPython-3.1.40-py3-none-any.whl", hash = "sha256:cf14627d5a8049ffbf49915732e5eddbe8134c3bdb9d476e6182b676fc573f8a"},
+ {file = "GitPython-3.1.40.tar.gz", hash = "sha256:22b126e9ffb671fdd0c129796343a02bf67bf2994b35449ffc9321aa755e18a4"},
]
[package.dependencies]
gitdb = ">=4.0.1,<5"
[package.extras]
-test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-sugar"]
+test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-instafail", "pytest-subtests", "pytest-sugar"]
[[package]]
name = "google-auth"
-version = "2.23.2"
+version = "2.25.2"
description = "Google Authentication Library"
optional = false
python-versions = ">=3.7"
files = [
- {file = "google-auth-2.23.2.tar.gz", hash = "sha256:5a9af4be520ba33651471a0264eead312521566f44631cbb621164bc30c8fd40"},
- {file = "google_auth-2.23.2-py2.py3-none-any.whl", hash = "sha256:c2e253347579d483004f17c3bd0bf92e611ef6c7ba24d41c5c59f2e7aeeaf088"},
+ {file = "google-auth-2.25.2.tar.gz", hash = "sha256:42f707937feb4f5e5a39e6c4f343a17300a459aaf03141457ba505812841cc40"},
+ {file = "google_auth-2.25.2-py2.py3-none-any.whl", hash = "sha256:473a8dfd0135f75bb79d878436e568f2695dce456764bf3a02b6f8c540b1d256"},
]
[package.dependencies]
@@ -905,13 +881,13 @@ requests = ["requests (>=2.20.0,<3.0.0.dev0)"]
[[package]]
name = "google-auth-oauthlib"
-version = "1.1.0"
+version = "1.2.0"
description = "Google Authentication Library"
optional = false
python-versions = ">=3.6"
files = [
- {file = "google-auth-oauthlib-1.1.0.tar.gz", hash = "sha256:83ea8c3b0881e453790baff4448e8a6112ac8778d1de9da0b68010b843937afb"},
- {file = "google_auth_oauthlib-1.1.0-py2.py3-none-any.whl", hash = "sha256:089c6e587d36f4803ac7e0720c045c6a8b1fd1790088b8424975b90d0ee61c12"},
+ {file = "google-auth-oauthlib-1.2.0.tar.gz", hash = "sha256:292d2d3783349f2b0734a0a0207b1e1e322ac193c2c09d8f7c613fb7cc501ea8"},
+ {file = "google_auth_oauthlib-1.2.0-py2.py3-none-any.whl", hash = "sha256:297c1ce4cb13a99b5834c74a1fe03252e1e499716718b190f56bcb9c4abc4faf"},
]
[package.dependencies]
@@ -923,13 +899,13 @@ tool = ["click (>=6.0.0)"]
[[package]]
name = "gspread"
-version = "5.11.2"
+version = "5.12.4"
description = "Google Spreadsheets Python API"
optional = false
python-versions = ">=3.7"
files = [
- {file = "gspread-5.11.2-py3-none-any.whl", hash = "sha256:525a9d3ef712d5747867d32b61f5d7aa035ead0835b56cd1ae2a6d310eaef077"},
- {file = "gspread-5.11.2.tar.gz", hash = "sha256:fdc477cbda48bc9ea77eb8a4bf737985bfdba44f04677e4d791eb70bcbae2b95"},
+ {file = "gspread-5.12.4-py3-none-any.whl", hash = "sha256:1e453d87e0fde23bc5546e33eb684cf8b8c26540615f2f1ae004a9084a29051d"},
+ {file = "gspread-5.12.4.tar.gz", hash = "sha256:3fcef90183f15d3c9233b4caa021a83682f2b2ee678340c42d7ca7d8be98c6d1"},
]
[package.dependencies]
@@ -949,18 +925,19 @@ files = [
[[package]]
name = "helicone"
-version = "1.0.9"
+version = "1.0.14"
description = "A Python wrapper for the OpenAI API that logs all requests to Helicone."
optional = false
python-versions = ">=3.8.1"
files = [
- {file = "helicone-1.0.9-py3-none-any.whl", hash = "sha256:440b4ff1ba65239f33b70aab3f53b7c69d7c6883a3552a76f8b13818dbef915c"},
- {file = "helicone-1.0.9.tar.gz", hash = "sha256:96122e95d40917a722f79ea71ff5099e248951623b5d0068275b08760e351b0b"},
+ {file = "helicone-1.0.14-py3-none-any.whl", hash = "sha256:1828eb546e2f1aa780391ff640a1c62d55de030d3712c26db8d53f288333349a"},
+ {file = "helicone-1.0.14.tar.gz", hash = "sha256:d8061b1ff7ed45257cd24f42e7f6e24d330130a30fe0444253d3cad4bb9d23d5"},
]
[package.dependencies]
lockfile = ">=0.12.2,<0.13.0"
openai = ">=0.27.0,<0.28.0"
+pyhumps = ">=3.8.0,<4.0.0"
[[package]]
name = "httpcore"
@@ -1022,13 +999,13 @@ socks = ["socksio (==1.*)"]
[[package]]
name = "identify"
-version = "2.5.29"
+version = "2.5.33"
description = "File identification library for Python"
optional = false
python-versions = ">=3.8"
files = [
- {file = "identify-2.5.29-py2.py3-none-any.whl", hash = "sha256:24437fbf6f4d3fe6efd0eb9d67e24dd9106db99af5ceb27996a5f7895f24bf1b"},
- {file = "identify-2.5.29.tar.gz", hash = "sha256:d43d52b86b15918c137e3a74fff5224f60385cd0e9c38e99d07c257f02f151a5"},
+ {file = "identify-2.5.33-py2.py3-none-any.whl", hash = "sha256:d40ce5fcd762817627670da8a7d8d8e65f24342d14539c59488dc603bf662e34"},
+ {file = "identify-2.5.33.tar.gz", hash = "sha256:161558f9fe4559e1557e1bff323e8631f6a0e4837f7497767c1782832f16b62d"},
]
[package.extras]
@@ -1036,13 +1013,13 @@ license = ["ukkonen"]
[[package]]
name = "idna"
-version = "3.4"
+version = "3.6"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.5"
files = [
- {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"},
- {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
+ {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"},
+ {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"},
]
[[package]]
@@ -1058,69 +1035,63 @@ files = [
[[package]]
name = "ipython"
-version = "8.15.0"
+version = "8.19.0"
description = "IPython: Productive Interactive Computing"
optional = false
-python-versions = ">=3.9"
+python-versions = ">=3.10"
files = [
- {file = "ipython-8.15.0-py3-none-any.whl", hash = "sha256:45a2c3a529296870a97b7de34eda4a31bee16bc7bf954e07d39abe49caf8f887"},
- {file = "ipython-8.15.0.tar.gz", hash = "sha256:2baeb5be6949eeebf532150f81746f8333e2ccce02de1c7eedde3f23ed5e9f1e"},
+ {file = "ipython-8.19.0-py3-none-any.whl", hash = "sha256:2f55d59370f59d0d2b2212109fe0e6035cfea436b1c0e6150ad2244746272ec5"},
+ {file = "ipython-8.19.0.tar.gz", hash = "sha256:ac4da4ecf0042fb4e0ce57c60430c2db3c719fa8bdf92f8631d6bd8a5785d1f0"},
]
[package.dependencies]
-appnope = {version = "*", markers = "sys_platform == \"darwin\""}
-backcall = "*"
colorama = {version = "*", markers = "sys_platform == \"win32\""}
decorator = "*"
exceptiongroup = {version = "*", markers = "python_version < \"3.11\""}
jedi = ">=0.16"
matplotlib-inline = "*"
pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""}
-pickleshare = "*"
-prompt-toolkit = ">=3.0.30,<3.0.37 || >3.0.37,<3.1.0"
+prompt-toolkit = ">=3.0.41,<3.1.0"
pygments = ">=2.4.0"
stack-data = "*"
traitlets = ">=5"
[package.extras]
-all = ["black", "curio", "docrepr", "exceptiongroup", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.21)", "pandas", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"]
+all = ["black", "curio", "docrepr", "exceptiongroup", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.23)", "pandas", "pickleshare", "pytest", "pytest-asyncio (<0.22)", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"]
black = ["black"]
-doc = ["docrepr", "exceptiongroup", "ipykernel", "matplotlib", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"]
+doc = ["docrepr", "exceptiongroup", "ipykernel", "matplotlib", "pickleshare", "pytest", "pytest-asyncio (<0.22)", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"]
kernel = ["ipykernel"]
nbconvert = ["nbconvert"]
nbformat = ["nbformat"]
notebook = ["ipywidgets", "notebook"]
parallel = ["ipyparallel"]
qtconsole = ["qtconsole"]
-test = ["pytest (<7.1)", "pytest-asyncio", "testpath"]
-test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"]
+test = ["pickleshare", "pytest", "pytest-asyncio (<0.22)", "testpath"]
+test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.23)", "pandas", "pickleshare", "pytest", "pytest-asyncio (<0.22)", "testpath", "trio"]
[[package]]
name = "isort"
-version = "5.12.0"
+version = "5.13.2"
description = "A Python utility / library to sort Python imports."
optional = false
python-versions = ">=3.8.0"
files = [
- {file = "isort-5.12.0-py3-none-any.whl", hash = "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6"},
- {file = "isort-5.12.0.tar.gz", hash = "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504"},
+ {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"},
+ {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"},
]
[package.extras]
-colors = ["colorama (>=0.4.3)"]
-pipfile-deprecated-finder = ["pip-shims (>=0.5.2)", "pipreqs", "requirementslib"]
-plugins = ["setuptools"]
-requirements-deprecated-finder = ["pip-api", "pipreqs"]
+colors = ["colorama (>=0.4.6)"]
[[package]]
name = "jedi"
-version = "0.19.0"
+version = "0.19.1"
description = "An autocompletion tool for Python that can be used for text editors."
optional = false
python-versions = ">=3.6"
files = [
- {file = "jedi-0.19.0-py2.py3-none-any.whl", hash = "sha256:cb8ce23fbccff0025e9386b5cf85e892f94c9b822378f8da49970471335ac64e"},
- {file = "jedi-0.19.0.tar.gz", hash = "sha256:bcf9894f1753969cbac8022a8c2eaee06bfa3724e4192470aaffe7eb6272b0c4"},
+ {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"},
+ {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"},
]
[package.dependencies]
@@ -1129,7 +1100,7 @@ parso = ">=0.8.3,<0.9.0"
[package.extras]
docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"]
qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"]
-testing = ["Django (<3.1)", "attrs", "colorama", "docopt", "pytest (<7.0.0)"]
+testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"]
[[package]]
name = "jinja2"
@@ -1359,52 +1330,51 @@ files = [
[[package]]
name = "matplotlib"
-version = "3.8.0"
+version = "3.8.2"
description = "Python plotting package"
optional = false
python-versions = ">=3.9"
files = [
- {file = "matplotlib-3.8.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c4940bad88a932ddc69734274f6fb047207e008389489f2b6f77d9ca485f0e7a"},
- {file = "matplotlib-3.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a33bd3045c7452ca1fa65676d88ba940867880e13e2546abb143035fa9072a9d"},
- {file = "matplotlib-3.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ea6886e93401c22e534bbfd39201ce8931b75502895cfb115cbdbbe2d31f287"},
- {file = "matplotlib-3.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d670b9348e712ec176de225d425f150dc8e37b13010d85233c539b547da0be39"},
- {file = "matplotlib-3.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7b37b74f00c4cb6af908cb9a00779d97d294e89fd2145ad43f0cdc23f635760c"},
- {file = "matplotlib-3.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:0e723f5b96f3cd4aad99103dc93e9e3cdc4f18afdcc76951f4857b46f8e39d2d"},
- {file = "matplotlib-3.8.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5dc945a9cb2deb7d197ba23eb4c210e591d52d77bf0ba27c35fc82dec9fa78d4"},
- {file = "matplotlib-3.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8b5a1bf27d078453aa7b5b27f52580e16360d02df6d3dc9504f3d2ce11f6309"},
- {file = "matplotlib-3.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f25ffb6ad972cdffa7df8e5be4b1e3cadd2f8d43fc72085feb1518006178394"},
- {file = "matplotlib-3.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eee482731c8c17d86d9ddb5194d38621f9b0f0d53c99006275a12523ab021732"},
- {file = "matplotlib-3.8.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:36eafe2128772195b373e1242df28d1b7ec6c04c15b090b8d9e335d55a323900"},
- {file = "matplotlib-3.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:061ee58facb3580cd2d046a6d227fb77e9295599c5ec6ad069f06b5821ad1cfc"},
- {file = "matplotlib-3.8.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:3cc3776836d0f4f22654a7f2d2ec2004618d5cf86b7185318381f73b80fd8a2d"},
- {file = "matplotlib-3.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6c49a2bd6981264bddcb8c317b6bd25febcece9e2ebfcbc34e7f4c0c867c09dc"},
- {file = "matplotlib-3.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23ed11654fc83cd6cfdf6170b453e437674a050a452133a064d47f2f1371f8d3"},
- {file = "matplotlib-3.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dae97fdd6996b3a25da8ee43e3fc734fff502f396801063c6b76c20b56683196"},
- {file = "matplotlib-3.8.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:87df75f528020a6299f76a1d986c0ed4406e3b2bd44bc5e306e46bca7d45e53e"},
- {file = "matplotlib-3.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:90d74a95fe055f73a6cd737beecc1b81c26f2893b7a3751d52b53ff06ca53f36"},
- {file = "matplotlib-3.8.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c3499c312f5def8f362a2bf761d04fa2d452b333f3a9a3f58805273719bf20d9"},
- {file = "matplotlib-3.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:31e793c8bd4ea268cc5d3a695c27b30650ec35238626961d73085d5e94b6ab68"},
- {file = "matplotlib-3.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d5ee602ef517a89d1f2c508ca189cfc395dd0b4a08284fb1b97a78eec354644"},
- {file = "matplotlib-3.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5de39dc61ca35342cf409e031f70f18219f2c48380d3886c1cf5ad9f17898e06"},
- {file = "matplotlib-3.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:dd386c80a98b5f51571b9484bf6c6976de383cd2a8cd972b6a9562d85c6d2087"},
- {file = "matplotlib-3.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:f691b4ef47c7384d0936b2e8ebdeb5d526c81d004ad9403dfb9d4c76b9979a93"},
- {file = "matplotlib-3.8.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:0b11f354aae62a2aa53ec5bb09946f5f06fc41793e351a04ff60223ea9162955"},
- {file = "matplotlib-3.8.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f54b9fb87ca5acbcdd0f286021bedc162e1425fa5555ebf3b3dfc167b955ad9"},
- {file = "matplotlib-3.8.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:60a6e04dfd77c0d3bcfee61c3cd335fff1b917c2f303b32524cd1235e194ef99"},
- {file = "matplotlib-3.8.0.tar.gz", hash = "sha256:df8505e1c19d5c2c26aff3497a7cbd3ccfc2e97043d1e4db3e76afa399164b69"},
+ {file = "matplotlib-3.8.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:09796f89fb71a0c0e1e2f4bdaf63fb2cefc84446bb963ecdeb40dfee7dfa98c7"},
+ {file = "matplotlib-3.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6f9c6976748a25e8b9be51ea028df49b8e561eed7809146da7a47dbecebab367"},
+ {file = "matplotlib-3.8.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b78e4f2cedf303869b782071b55fdde5987fda3038e9d09e58c91cc261b5ad18"},
+ {file = "matplotlib-3.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e208f46cf6576a7624195aa047cb344a7f802e113bb1a06cfd4bee431de5e31"},
+ {file = "matplotlib-3.8.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:46a569130ff53798ea5f50afce7406e91fdc471ca1e0e26ba976a8c734c9427a"},
+ {file = "matplotlib-3.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:830f00640c965c5b7f6bc32f0d4ce0c36dfe0379f7dd65b07a00c801713ec40a"},
+ {file = "matplotlib-3.8.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d86593ccf546223eb75a39b44c32788e6f6440d13cfc4750c1c15d0fcb850b63"},
+ {file = "matplotlib-3.8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9a5430836811b7652991939012f43d2808a2db9b64ee240387e8c43e2e5578c8"},
+ {file = "matplotlib-3.8.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9576723858a78751d5aacd2497b8aef29ffea6d1c95981505877f7ac28215c6"},
+ {file = "matplotlib-3.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ba9cbd8ac6cf422f3102622b20f8552d601bf8837e49a3afed188d560152788"},
+ {file = "matplotlib-3.8.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:03f9d160a29e0b65c0790bb07f4f45d6a181b1ac33eb1bb0dd225986450148f0"},
+ {file = "matplotlib-3.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:3773002da767f0a9323ba1a9b9b5d00d6257dbd2a93107233167cfb581f64717"},
+ {file = "matplotlib-3.8.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:4c318c1e95e2f5926fba326f68177dee364aa791d6df022ceb91b8221bd0a627"},
+ {file = "matplotlib-3.8.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:091275d18d942cf1ee9609c830a1bc36610607d8223b1b981c37d5c9fc3e46a4"},
+ {file = "matplotlib-3.8.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b0f3b8ea0e99e233a4bcc44590f01604840d833c280ebb8fe5554fd3e6cfe8d"},
+ {file = "matplotlib-3.8.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7b1704a530395aaf73912be741c04d181f82ca78084fbd80bc737be04848331"},
+ {file = "matplotlib-3.8.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:533b0e3b0c6768eef8cbe4b583731ce25a91ab54a22f830db2b031e83cca9213"},
+ {file = "matplotlib-3.8.2-cp312-cp312-win_amd64.whl", hash = "sha256:0f4fc5d72b75e2c18e55eb32292659cf731d9d5b312a6eb036506304f4675630"},
+ {file = "matplotlib-3.8.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:deaed9ad4da0b1aea77fe0aa0cebb9ef611c70b3177be936a95e5d01fa05094f"},
+ {file = "matplotlib-3.8.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:172f4d0fbac3383d39164c6caafd3255ce6fa58f08fc392513a0b1d3b89c4f89"},
+ {file = "matplotlib-3.8.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7d36c2209d9136cd8e02fab1c0ddc185ce79bc914c45054a9f514e44c787917"},
+ {file = "matplotlib-3.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5864bdd7da445e4e5e011b199bb67168cdad10b501750367c496420f2ad00843"},
+ {file = "matplotlib-3.8.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ef8345b48e95cee45ff25192ed1f4857273117917a4dcd48e3905619bcd9c9b8"},
+ {file = "matplotlib-3.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:7c48d9e221b637c017232e3760ed30b4e8d5dfd081daf327e829bf2a72c731b4"},
+ {file = "matplotlib-3.8.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:aa11b3c6928a1e496c1a79917d51d4cd5d04f8a2e75f21df4949eeefdf697f4b"},
+ {file = "matplotlib-3.8.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1095fecf99eeb7384dabad4bf44b965f929a5f6079654b681193edf7169ec20"},
+ {file = "matplotlib-3.8.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:bddfb1db89bfaa855912261c805bd0e10218923cc262b9159a49c29a7a1c1afa"},
+ {file = "matplotlib-3.8.2.tar.gz", hash = "sha256:01a978b871b881ee76017152f1f1a0cbf6bd5f7b8ff8c96df0df1bd57d8755a1"},
]
[package.dependencies]
contourpy = ">=1.0.1"
cycler = ">=0.10"
fonttools = ">=4.22.0"
-kiwisolver = ">=1.0.1"
+kiwisolver = ">=1.3.1"
numpy = ">=1.21,<2"
packaging = ">=20.0"
-pillow = ">=6.2.0"
+pillow = ">=8"
pyparsing = ">=2.3.1"
python-dateutil = ">=2.7"
-setuptools_scm = ">=7"
[[package]]
name = "matplotlib-inline"
@@ -1527,21 +1497,21 @@ files = [
[[package]]
name = "networkx"
-version = "3.1"
+version = "3.2.1"
description = "Python package for creating and manipulating graphs and networks"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
files = [
- {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"},
- {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"},
+ {file = "networkx-3.2.1-py3-none-any.whl", hash = "sha256:f18c69adc97877c42332c170849c96cefa91881c99a7cb3e95b7c659ebdc1ec2"},
+ {file = "networkx-3.2.1.tar.gz", hash = "sha256:9f1bb5cf3409bf324e0a722c20bdb4c20ee39bf1c30ce8ae499c8502b0b5e0c6"},
]
[package.extras]
-default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"]
-developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"]
-doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"]
-extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"]
-test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"]
+default = ["matplotlib (>=3.5)", "numpy (>=1.22)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"]
+developer = ["changelist (==0.4)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"]
+doc = ["nb2plots (>=0.7)", "nbconvert (<7.9)", "numpydoc (>=1.6)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"]
+extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.11)", "sympy (>=1.10)"]
+test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"]
[[package]]
name = "nodeenv"
@@ -1559,77 +1529,47 @@ setuptools = "*"
[[package]]
name = "numpy"
-version = "1.25.2"
+version = "1.26.2"
description = "Fundamental package for array computing in Python"
optional = false
python-versions = ">=3.9"
files = [
- {file = "numpy-1.25.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:db3ccc4e37a6873045580d413fe79b68e47a681af8db2e046f1dacfa11f86eb3"},
- {file = "numpy-1.25.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:90319e4f002795ccfc9050110bbbaa16c944b1c37c0baeea43c5fb881693ae1f"},
- {file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfe4a913e29b418d096e696ddd422d8a5d13ffba4ea91f9f60440a3b759b0187"},
- {file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f08f2e037bba04e707eebf4bc934f1972a315c883a9e0ebfa8a7756eabf9e357"},
- {file = "numpy-1.25.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bec1e7213c7cb00d67093247f8c4db156fd03075f49876957dca4711306d39c9"},
- {file = "numpy-1.25.2-cp310-cp310-win32.whl", hash = "sha256:7dc869c0c75988e1c693d0e2d5b26034644399dd929bc049db55395b1379e044"},
- {file = "numpy-1.25.2-cp310-cp310-win_amd64.whl", hash = "sha256:834b386f2b8210dca38c71a6e0f4fd6922f7d3fcff935dbe3a570945acb1b545"},
- {file = "numpy-1.25.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c5462d19336db4560041517dbb7759c21d181a67cb01b36ca109b2ae37d32418"},
- {file = "numpy-1.25.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c5652ea24d33585ea39eb6a6a15dac87a1206a692719ff45d53c5282e66d4a8f"},
- {file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d60fbae8e0019865fc4784745814cff1c421df5afee233db6d88ab4f14655a2"},
- {file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60e7f0f7f6d0eee8364b9a6304c2845b9c491ac706048c7e8cf47b83123b8dbf"},
- {file = "numpy-1.25.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bb33d5a1cf360304754913a350edda36d5b8c5331a8237268c48f91253c3a364"},
- {file = "numpy-1.25.2-cp311-cp311-win32.whl", hash = "sha256:5883c06bb92f2e6c8181df7b39971a5fb436288db58b5a1c3967702d4278691d"},
- {file = "numpy-1.25.2-cp311-cp311-win_amd64.whl", hash = "sha256:5c97325a0ba6f9d041feb9390924614b60b99209a71a69c876f71052521d42a4"},
- {file = "numpy-1.25.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b79e513d7aac42ae918db3ad1341a015488530d0bb2a6abcbdd10a3a829ccfd3"},
- {file = "numpy-1.25.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eb942bfb6f84df5ce05dbf4b46673ffed0d3da59f13635ea9b926af3deb76926"},
- {file = "numpy-1.25.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e0746410e73384e70d286f93abf2520035250aad8c5714240b0492a7302fdca"},
- {file = "numpy-1.25.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7806500e4f5bdd04095e849265e55de20d8cc4b661b038957354327f6d9b295"},
- {file = "numpy-1.25.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8b77775f4b7df768967a7c8b3567e309f617dd5e99aeb886fa14dc1a0791141f"},
- {file = "numpy-1.25.2-cp39-cp39-win32.whl", hash = "sha256:2792d23d62ec51e50ce4d4b7d73de8f67a2fd3ea710dcbc8563a51a03fb07b01"},
- {file = "numpy-1.25.2-cp39-cp39-win_amd64.whl", hash = "sha256:76b4115d42a7dfc5d485d358728cdd8719be33cc5ec6ec08632a5d6fca2ed380"},
- {file = "numpy-1.25.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1a1329e26f46230bf77b02cc19e900db9b52f398d6722ca853349a782d4cff55"},
- {file = "numpy-1.25.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3abc71e8b6edba80a01a52e66d83c5d14433cbcd26a40c329ec7ed09f37901"},
- {file = "numpy-1.25.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1b9735c27cea5d995496f46a8b1cd7b408b3f34b6d50459d9ac8fe3a20cc17bf"},
- {file = "numpy-1.25.2.tar.gz", hash = "sha256:fd608e19c8d7c55021dffd43bfe5492fab8cc105cc8986f813f8c3c048b38760"},
-]
-
-[[package]]
-name = "numpy"
-version = "1.26.0"
-description = "Fundamental package for array computing in Python"
-optional = false
-python-versions = "<3.13,>=3.9"
-files = [
- {file = "numpy-1.26.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8db2f125746e44dce707dd44d4f4efeea8d7e2b43aace3f8d1f235cfa2733dd"},
- {file = "numpy-1.26.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0621f7daf973d34d18b4e4bafb210bbaf1ef5e0100b5fa750bd9cde84c7ac292"},
- {file = "numpy-1.26.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51be5f8c349fdd1a5568e72713a21f518e7d6707bcf8503b528b88d33b57dc68"},
- {file = "numpy-1.26.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:767254ad364991ccfc4d81b8152912e53e103ec192d1bb4ea6b1f5a7117040be"},
- {file = "numpy-1.26.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:436c8e9a4bdeeee84e3e59614d38c3dbd3235838a877af8c211cfcac8a80b8d3"},
- {file = "numpy-1.26.0-cp310-cp310-win32.whl", hash = "sha256:c2e698cb0c6dda9372ea98a0344245ee65bdc1c9dd939cceed6bb91256837896"},
- {file = "numpy-1.26.0-cp310-cp310-win_amd64.whl", hash = "sha256:09aaee96c2cbdea95de76ecb8a586cb687d281c881f5f17bfc0fb7f5890f6b91"},
- {file = "numpy-1.26.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:637c58b468a69869258b8ae26f4a4c6ff8abffd4a8334c830ffb63e0feefe99a"},
- {file = "numpy-1.26.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:306545e234503a24fe9ae95ebf84d25cba1fdc27db971aa2d9f1ab6bba19a9dd"},
- {file = "numpy-1.26.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c6adc33561bd1d46f81131d5352348350fc23df4d742bb246cdfca606ea1208"},
- {file = "numpy-1.26.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e062aa24638bb5018b7841977c360d2f5917268d125c833a686b7cbabbec496c"},
- {file = "numpy-1.26.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:546b7dd7e22f3c6861463bebb000646fa730e55df5ee4a0224408b5694cc6148"},
- {file = "numpy-1.26.0-cp311-cp311-win32.whl", hash = "sha256:c0b45c8b65b79337dee5134d038346d30e109e9e2e9d43464a2970e5c0e93229"},
- {file = "numpy-1.26.0-cp311-cp311-win_amd64.whl", hash = "sha256:eae430ecf5794cb7ae7fa3808740b015aa80747e5266153128ef055975a72b99"},
- {file = "numpy-1.26.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:166b36197e9debc4e384e9c652ba60c0bacc216d0fc89e78f973a9760b503388"},
- {file = "numpy-1.26.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f042f66d0b4ae6d48e70e28d487376204d3cbf43b84c03bac57e28dac6151581"},
- {file = "numpy-1.26.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5e18e5b14a7560d8acf1c596688f4dfd19b4f2945b245a71e5af4ddb7422feb"},
- {file = "numpy-1.26.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f6bad22a791226d0a5c7c27a80a20e11cfe09ad5ef9084d4d3fc4a299cca505"},
- {file = "numpy-1.26.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4acc65dd65da28060e206c8f27a573455ed724e6179941edb19f97e58161bb69"},
- {file = "numpy-1.26.0-cp312-cp312-win32.whl", hash = "sha256:bb0d9a1aaf5f1cb7967320e80690a1d7ff69f1d47ebc5a9bea013e3a21faec95"},
- {file = "numpy-1.26.0-cp312-cp312-win_amd64.whl", hash = "sha256:ee84ca3c58fe48b8ddafdeb1db87388dce2c3c3f701bf447b05e4cfcc3679112"},
- {file = "numpy-1.26.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4a873a8180479bc829313e8d9798d5234dfacfc2e8a7ac188418189bb8eafbd2"},
- {file = "numpy-1.26.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:914b28d3215e0c721dc75db3ad6d62f51f630cb0c277e6b3bcb39519bed10bd8"},
- {file = "numpy-1.26.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c78a22e95182fb2e7874712433eaa610478a3caf86f28c621708d35fa4fd6e7f"},
- {file = "numpy-1.26.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86f737708b366c36b76e953c46ba5827d8c27b7a8c9d0f471810728e5a2fe57c"},
- {file = "numpy-1.26.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b44e6a09afc12952a7d2a58ca0a2429ee0d49a4f89d83a0a11052da696440e49"},
- {file = "numpy-1.26.0-cp39-cp39-win32.whl", hash = "sha256:5671338034b820c8d58c81ad1dafc0ed5a00771a82fccc71d6438df00302094b"},
- {file = "numpy-1.26.0-cp39-cp39-win_amd64.whl", hash = "sha256:020cdbee66ed46b671429c7265cf00d8ac91c046901c55684954c3958525dab2"},
- {file = "numpy-1.26.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0792824ce2f7ea0c82ed2e4fecc29bb86bee0567a080dacaf2e0a01fe7654369"},
- {file = "numpy-1.26.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d484292eaeb3e84a51432a94f53578689ffdea3f90e10c8b203a99be5af57d8"},
- {file = "numpy-1.26.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:186ba67fad3c60dbe8a3abff3b67a91351100f2661c8e2a80364ae6279720299"},
- {file = "numpy-1.26.0.tar.gz", hash = "sha256:f93fc78fe8bf15afe2b8d6b6499f1c73953169fad1e9a8dd086cdff3190e7fdf"},
+ {file = "numpy-1.26.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3703fc9258a4a122d17043e57b35e5ef1c5a5837c3db8be396c82e04c1cf9b0f"},
+ {file = "numpy-1.26.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cc392fdcbd21d4be6ae1bb4475a03ce3b025cd49a9be5345d76d7585aea69440"},
+ {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36340109af8da8805d8851ef1d74761b3b88e81a9bd80b290bbfed61bd2b4f75"},
+ {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcc008217145b3d77abd3e4d5ef586e3bdfba8fe17940769f8aa09b99e856c00"},
+ {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ced40d4e9e18242f70dd02d739e44698df3dcb010d31f495ff00a31ef6014fe"},
+ {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b272d4cecc32c9e19911891446b72e986157e6a1809b7b56518b4f3755267523"},
+ {file = "numpy-1.26.2-cp310-cp310-win32.whl", hash = "sha256:22f8fc02fdbc829e7a8c578dd8d2e15a9074b630d4da29cda483337e300e3ee9"},
+ {file = "numpy-1.26.2-cp310-cp310-win_amd64.whl", hash = "sha256:26c9d33f8e8b846d5a65dd068c14e04018d05533b348d9eaeef6c1bd787f9919"},
+ {file = "numpy-1.26.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b96e7b9c624ef3ae2ae0e04fa9b460f6b9f17ad8b4bec6d7756510f1f6c0c841"},
+ {file = "numpy-1.26.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aa18428111fb9a591d7a9cc1b48150097ba6a7e8299fb56bdf574df650e7d1f1"},
+ {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06fa1ed84aa60ea6ef9f91ba57b5ed963c3729534e6e54055fc151fad0423f0a"},
+ {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96ca5482c3dbdd051bcd1fce8034603d6ebfc125a7bd59f55b40d8f5d246832b"},
+ {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:854ab91a2906ef29dc3925a064fcd365c7b4da743f84b123002f6139bcb3f8a7"},
+ {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f43740ab089277d403aa07567be138fc2a89d4d9892d113b76153e0e412409f8"},
+ {file = "numpy-1.26.2-cp311-cp311-win32.whl", hash = "sha256:a2bbc29fcb1771cd7b7425f98b05307776a6baf43035d3b80c4b0f29e9545186"},
+ {file = "numpy-1.26.2-cp311-cp311-win_amd64.whl", hash = "sha256:2b3fca8a5b00184828d12b073af4d0fc5fdd94b1632c2477526f6bd7842d700d"},
+ {file = "numpy-1.26.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a4cd6ed4a339c21f1d1b0fdf13426cb3b284555c27ac2f156dfdaaa7e16bfab0"},
+ {file = "numpy-1.26.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d5244aabd6ed7f312268b9247be47343a654ebea52a60f002dc70c769048e75"},
+ {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a3cdb4d9c70e6b8c0814239ead47da00934666f668426fc6e94cce869e13fd7"},
+ {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa317b2325f7aa0a9471663e6093c210cb2ae9c0ad824732b307d2c51983d5b6"},
+ {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:174a8880739c16c925799c018f3f55b8130c1f7c8e75ab0a6fa9d41cab092fd6"},
+ {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f79b231bf5c16b1f39c7f4875e1ded36abee1591e98742b05d8a0fb55d8a3eec"},
+ {file = "numpy-1.26.2-cp312-cp312-win32.whl", hash = "sha256:4a06263321dfd3598cacb252f51e521a8cb4b6df471bb12a7ee5cbab20ea9167"},
+ {file = "numpy-1.26.2-cp312-cp312-win_amd64.whl", hash = "sha256:b04f5dc6b3efdaab541f7857351aac359e6ae3c126e2edb376929bd3b7f92d7e"},
+ {file = "numpy-1.26.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4eb8df4bf8d3d90d091e0146f6c28492b0be84da3e409ebef54349f71ed271ef"},
+ {file = "numpy-1.26.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1a13860fdcd95de7cf58bd6f8bc5a5ef81c0b0625eb2c9a783948847abbef2c2"},
+ {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64308ebc366a8ed63fd0bf426b6a9468060962f1a4339ab1074c228fa6ade8e3"},
+ {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baf8aab04a2c0e859da118f0b38617e5ee65d75b83795055fb66c0d5e9e9b818"},
+ {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d73a3abcac238250091b11caef9ad12413dab01669511779bc9b29261dd50210"},
+ {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b361d369fc7e5e1714cf827b731ca32bff8d411212fccd29ad98ad622449cc36"},
+ {file = "numpy-1.26.2-cp39-cp39-win32.whl", hash = "sha256:bd3f0091e845164a20bd5a326860c840fe2af79fa12e0469a12768a3ec578d80"},
+ {file = "numpy-1.26.2-cp39-cp39-win_amd64.whl", hash = "sha256:2beef57fb031dcc0dc8fa4fe297a742027b954949cabb52a2a376c144e5e6060"},
+ {file = "numpy-1.26.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1cc3d5029a30fb5f06704ad6b23b35e11309491c999838c31f124fee32107c79"},
+ {file = "numpy-1.26.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94cc3c222bb9fb5a12e334d0479b97bb2df446fbe622b470928f5284ffca3f8d"},
+ {file = "numpy-1.26.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe6b44fb8fcdf7eda4ef4461b97b3f63c466b27ab151bec2366db8b197387841"},
+ {file = "numpy-1.26.2.tar.gz", hash = "sha256:f65738447676ab5777f11e6bbbdb8ce11b785e105f690bc45966574816b6d3ea"},
]
[[package]]
@@ -1690,13 +1630,13 @@ wandb = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1
[[package]]
name = "outcome"
-version = "1.2.0"
+version = "1.3.0.post0"
description = "Capture the outcome of Python function calls."
optional = false
python-versions = ">=3.7"
files = [
- {file = "outcome-1.2.0-py2.py3-none-any.whl", hash = "sha256:c4ab89a56575d6d38a05aa16daeaa333109c1f96167aba8901ab18b6b5e0f7f5"},
- {file = "outcome-1.2.0.tar.gz", hash = "sha256:6f82bd3de45da303cf1f771ecafa1633750a358436a8bb60e06a1ceb745d2672"},
+ {file = "outcome-1.3.0.post0-py2.py3-none-any.whl", hash = "sha256:e771c5ce06d1415e356078d3bdd68523f284b4ce5419828922b6871e65eda82b"},
+ {file = "outcome-1.3.0.post0.tar.gz", hash = "sha256:9dcf02e65f2971b80047b377468e72a268e15c0af3cf1238e6ff14f7f91143b8"},
]
[package.dependencies]
@@ -1704,118 +1644,61 @@ attrs = ">=19.2.0"
[[package]]
name = "packaging"
-version = "23.1"
+version = "23.2"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.7"
files = [
- {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"},
- {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"},
-]
-
-[[package]]
-name = "pandas"
-version = "2.1.0"
-description = "Powerful data structures for data analysis, time series, and statistics"
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "pandas-2.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:40dd20439ff94f1b2ed55b393ecee9cb6f3b08104c2c40b0cb7186a2f0046242"},
- {file = "pandas-2.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d4f38e4fedeba580285eaac7ede4f686c6701a9e618d8a857b138a126d067f2f"},
- {file = "pandas-2.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e6a0fe052cf27ceb29be9429428b4918f3740e37ff185658f40d8702f0b3e09"},
- {file = "pandas-2.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d81e1813191070440d4c7a413cb673052b3b4a984ffd86b8dd468c45742d3cc"},
- {file = "pandas-2.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:eb20252720b1cc1b7d0b2879ffc7e0542dd568f24d7c4b2347cb035206936421"},
- {file = "pandas-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:38f74ef7ebc0ffb43b3d633e23d74882bce7e27bfa09607f3c5d3e03ffd9a4a5"},
- {file = "pandas-2.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cda72cc8c4761c8f1d97b169661f23a86b16fdb240bdc341173aee17e4d6cedd"},
- {file = "pandas-2.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d97daeac0db8c993420b10da4f5f5b39b01fc9ca689a17844e07c0a35ac96b4b"},
- {file = "pandas-2.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8c58b1113892e0c8078f006a167cc210a92bdae23322bb4614f2f0b7a4b510f"},
- {file = "pandas-2.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:629124923bcf798965b054a540f9ccdfd60f71361255c81fa1ecd94a904b9dd3"},
- {file = "pandas-2.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:70cf866af3ab346a10debba8ea78077cf3a8cd14bd5e4bed3d41555a3280041c"},
- {file = "pandas-2.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:d53c8c1001f6a192ff1de1efe03b31a423d0eee2e9e855e69d004308e046e694"},
- {file = "pandas-2.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:86f100b3876b8c6d1a2c66207288ead435dc71041ee4aea789e55ef0e06408cb"},
- {file = "pandas-2.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28f330845ad21c11db51e02d8d69acc9035edfd1116926ff7245c7215db57957"},
- {file = "pandas-2.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9a6ccf0963db88f9b12df6720e55f337447aea217f426a22d71f4213a3099a6"},
- {file = "pandas-2.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d99e678180bc59b0c9443314297bddce4ad35727a1a2656dbe585fd78710b3b9"},
- {file = "pandas-2.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b31da36d376d50a1a492efb18097b9101bdbd8b3fbb3f49006e02d4495d4c644"},
- {file = "pandas-2.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:0164b85937707ec7f70b34a6c3a578dbf0f50787f910f21ca3b26a7fd3363437"},
- {file = "pandas-2.1.0.tar.gz", hash = "sha256:62c24c7fc59e42b775ce0679cfa7b14a5f9bfb7643cfbe708c960699e05fb918"},
+ {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"},
+ {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"},
]
-[package.dependencies]
-numpy = {version = ">=1.23.2", markers = "python_version >= \"3.11\""}
-python-dateutil = ">=2.8.2"
-pytz = ">=2020.1"
-tzdata = ">=2022.1"
-
-[package.extras]
-all = ["PyQt5 (>=5.15.6)", "SQLAlchemy (>=1.4.36)", "beautifulsoup4 (>=4.11.1)", "bottleneck (>=1.3.4)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=0.8.1)", "fsspec (>=2022.05.0)", "gcsfs (>=2022.05.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.8.0)", "matplotlib (>=3.6.1)", "numba (>=0.55.2)", "numexpr (>=2.8.0)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pandas-gbq (>=0.17.5)", "psycopg2 (>=2.9.3)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.5)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "pyxlsb (>=1.0.9)", "qtpy (>=2.2.0)", "s3fs (>=2022.05.0)", "scipy (>=1.8.1)", "tables (>=3.7.0)", "tabulate (>=0.8.10)", "xarray (>=2022.03.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)", "zstandard (>=0.17.0)"]
-aws = ["s3fs (>=2022.05.0)"]
-clipboard = ["PyQt5 (>=5.15.6)", "qtpy (>=2.2.0)"]
-compression = ["zstandard (>=0.17.0)"]
-computation = ["scipy (>=1.8.1)", "xarray (>=2022.03.0)"]
-consortium-standard = ["dataframe-api-compat (>=0.1.7)"]
-excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pyxlsb (>=1.0.9)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)"]
-feather = ["pyarrow (>=7.0.0)"]
-fss = ["fsspec (>=2022.05.0)"]
-gcp = ["gcsfs (>=2022.05.0)", "pandas-gbq (>=0.17.5)"]
-hdf5 = ["tables (>=3.7.0)"]
-html = ["beautifulsoup4 (>=4.11.1)", "html5lib (>=1.1)", "lxml (>=4.8.0)"]
-mysql = ["SQLAlchemy (>=1.4.36)", "pymysql (>=1.0.2)"]
-output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.8.10)"]
-parquet = ["pyarrow (>=7.0.0)"]
-performance = ["bottleneck (>=1.3.4)", "numba (>=0.55.2)", "numexpr (>=2.8.0)"]
-plot = ["matplotlib (>=3.6.1)"]
-postgresql = ["SQLAlchemy (>=1.4.36)", "psycopg2 (>=2.9.3)"]
-spss = ["pyreadstat (>=1.1.5)"]
-sql-other = ["SQLAlchemy (>=1.4.36)"]
-test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"]
-xml = ["lxml (>=4.8.0)"]
-
[[package]]
name = "pandas"
-version = "2.1.1"
+version = "2.1.4"
description = "Powerful data structures for data analysis, time series, and statistics"
optional = false
python-versions = ">=3.9"
files = [
- {file = "pandas-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58d997dbee0d4b64f3cb881a24f918b5f25dd64ddf31f467bb9b67ae4c63a1e4"},
- {file = "pandas-2.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02304e11582c5d090e5a52aec726f31fe3f42895d6bfc1f28738f9b64b6f0614"},
- {file = "pandas-2.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffa8f0966de2c22de408d0e322db2faed6f6e74265aa0856f3824813cf124363"},
- {file = "pandas-2.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1f84c144dee086fe4f04a472b5cd51e680f061adf75c1ae4fc3a9275560f8f4"},
- {file = "pandas-2.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:75ce97667d06d69396d72be074f0556698c7f662029322027c226fd7a26965cb"},
- {file = "pandas-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:4c3f32fd7c4dccd035f71734df39231ac1a6ff95e8bdab8d891167197b7018d2"},
- {file = "pandas-2.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e2959720b70e106bb1d8b6eadd8ecd7c8e99ccdbe03ee03260877184bb2877d"},
- {file = "pandas-2.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:25e8474a8eb258e391e30c288eecec565bfed3e026f312b0cbd709a63906b6f8"},
- {file = "pandas-2.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8bd1685556f3374520466998929bade3076aeae77c3e67ada5ed2b90b4de7f0"},
- {file = "pandas-2.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc3657869c7902810f32bd072f0740487f9e030c1a3ab03e0af093db35a9d14e"},
- {file = "pandas-2.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:05674536bd477af36aa2effd4ec8f71b92234ce0cc174de34fd21e2ee99adbc2"},
- {file = "pandas-2.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:b407381258a667df49d58a1b637be33e514b07f9285feb27769cedb3ab3d0b3a"},
- {file = "pandas-2.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c747793c4e9dcece7bb20156179529898abf505fe32cb40c4052107a3c620b49"},
- {file = "pandas-2.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3bcad1e6fb34b727b016775bea407311f7721db87e5b409e6542f4546a4951ea"},
- {file = "pandas-2.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5ec7740f9ccb90aec64edd71434711f58ee0ea7f5ed4ac48be11cfa9abf7317"},
- {file = "pandas-2.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:29deb61de5a8a93bdd033df328441a79fcf8dd3c12d5ed0b41a395eef9cd76f0"},
- {file = "pandas-2.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4f99bebf19b7e03cf80a4e770a3e65eee9dd4e2679039f542d7c1ace7b7b1daa"},
- {file = "pandas-2.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:84e7e910096416adec68075dc87b986ff202920fb8704e6d9c8c9897fe7332d6"},
- {file = "pandas-2.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:366da7b0e540d1b908886d4feb3d951f2f1e572e655c1160f5fde28ad4abb750"},
- {file = "pandas-2.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9e50e72b667415a816ac27dfcfe686dc5a0b02202e06196b943d54c4f9c7693e"},
- {file = "pandas-2.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc1ab6a25da197f03ebe6d8fa17273126120874386b4ac11c1d687df288542dd"},
- {file = "pandas-2.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0dbfea0dd3901ad4ce2306575c54348d98499c95be01b8d885a2737fe4d7a98"},
- {file = "pandas-2.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0489b0e6aa3d907e909aef92975edae89b1ee1654db5eafb9be633b0124abe97"},
- {file = "pandas-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:4cdb0fab0400c2cb46dafcf1a0fe084c8bb2480a1fa8d81e19d15e12e6d4ded2"},
- {file = "pandas-2.1.1.tar.gz", hash = "sha256:fecb198dc389429be557cde50a2d46da8434a17fe37d7d41ff102e3987fd947b"},
+ {file = "pandas-2.1.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bdec823dc6ec53f7a6339a0e34c68b144a7a1fd28d80c260534c39c62c5bf8c9"},
+ {file = "pandas-2.1.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:294d96cfaf28d688f30c918a765ea2ae2e0e71d3536754f4b6de0ea4a496d034"},
+ {file = "pandas-2.1.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b728fb8deba8905b319f96447a27033969f3ea1fea09d07d296c9030ab2ed1d"},
+ {file = "pandas-2.1.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00028e6737c594feac3c2df15636d73ace46b8314d236100b57ed7e4b9ebe8d9"},
+ {file = "pandas-2.1.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:426dc0f1b187523c4db06f96fb5c8d1a845e259c99bda74f7de97bd8a3bb3139"},
+ {file = "pandas-2.1.4-cp310-cp310-win_amd64.whl", hash = "sha256:f237e6ca6421265643608813ce9793610ad09b40154a3344a088159590469e46"},
+ {file = "pandas-2.1.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b7d852d16c270e4331f6f59b3e9aa23f935f5c4b0ed2d0bc77637a8890a5d092"},
+ {file = "pandas-2.1.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7d5f2f54f78164b3d7a40f33bf79a74cdee72c31affec86bfcabe7e0789821"},
+ {file = "pandas-2.1.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0aa6e92e639da0d6e2017d9ccff563222f4eb31e4b2c3cf32a2a392fc3103c0d"},
+ {file = "pandas-2.1.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d797591b6846b9db79e65dc2d0d48e61f7db8d10b2a9480b4e3faaddc421a171"},
+ {file = "pandas-2.1.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d2d3e7b00f703aea3945995ee63375c61b2e6aa5aa7871c5d622870e5e137623"},
+ {file = "pandas-2.1.4-cp311-cp311-win_amd64.whl", hash = "sha256:dc9bf7ade01143cddc0074aa6995edd05323974e6e40d9dbde081021ded8510e"},
+ {file = "pandas-2.1.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:482d5076e1791777e1571f2e2d789e940dedd927325cc3cb6d0800c6304082f6"},
+ {file = "pandas-2.1.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8a706cfe7955c4ca59af8c7a0517370eafbd98593155b48f10f9811da440248b"},
+ {file = "pandas-2.1.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0513a132a15977b4a5b89aabd304647919bc2169eac4c8536afb29c07c23540"},
+ {file = "pandas-2.1.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9f17f2b6fc076b2a0078862547595d66244db0f41bf79fc5f64a5c4d635bead"},
+ {file = "pandas-2.1.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:45d63d2a9b1b37fa6c84a68ba2422dc9ed018bdaa668c7f47566a01188ceeec1"},
+ {file = "pandas-2.1.4-cp312-cp312-win_amd64.whl", hash = "sha256:f69b0c9bb174a2342818d3e2778584e18c740d56857fc5cdb944ec8bbe4082cf"},
+ {file = "pandas-2.1.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3f06bda01a143020bad20f7a85dd5f4a1600112145f126bc9e3e42077c24ef34"},
+ {file = "pandas-2.1.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab5796839eb1fd62a39eec2916d3e979ec3130509930fea17fe6f81e18108f6a"},
+ {file = "pandas-2.1.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbaf9e8d3a63a9276d707b4d25930a262341bca9874fcb22eff5e3da5394732"},
+ {file = "pandas-2.1.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ebfd771110b50055712b3b711b51bee5d50135429364d0498e1213a7adc2be8"},
+ {file = "pandas-2.1.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8ea107e0be2aba1da619cc6ba3f999b2bfc9669a83554b1904ce3dd9507f0860"},
+ {file = "pandas-2.1.4-cp39-cp39-win_amd64.whl", hash = "sha256:d65148b14788b3758daf57bf42725caa536575da2b64df9964c563b015230984"},
+ {file = "pandas-2.1.4.tar.gz", hash = "sha256:fcb68203c833cc735321512e13861358079a96c174a61f5116a1de89c58c0ef7"},
]
[package.dependencies]
numpy = [
- {version = ">=1.22.4", markers = "python_version < \"3.11\""},
- {version = ">=1.23.2", markers = "python_version == \"3.11\""},
+ {version = ">=1.22.4,<2", markers = "python_version < \"3.11\""},
+ {version = ">=1.23.2,<2", markers = "python_version == \"3.11\""},
+ {version = ">=1.26.0,<2", markers = "python_version >= \"3.12\""},
]
python-dateutil = ">=2.8.2"
pytz = ">=2020.1"
tzdata = ">=2022.1"
[package.extras]
-all = ["PyQt5 (>=5.15.6)", "SQLAlchemy (>=1.4.36)", "beautifulsoup4 (>=4.11.1)", "bottleneck (>=1.3.4)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=0.8.1)", "fsspec (>=2022.05.0)", "gcsfs (>=2022.05.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.8.0)", "matplotlib (>=3.6.1)", "numba (>=0.55.2)", "numexpr (>=2.8.0)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pandas-gbq (>=0.17.5)", "psycopg2 (>=2.9.3)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.5)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "pyxlsb (>=1.0.9)", "qtpy (>=2.2.0)", "s3fs (>=2022.05.0)", "scipy (>=1.8.1)", "tables (>=3.7.0)", "tabulate (>=0.8.10)", "xarray (>=2022.03.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)", "zstandard (>=0.17.0)"]
+all = ["PyQt5 (>=5.15.6)", "SQLAlchemy (>=1.4.36)", "beautifulsoup4 (>=4.11.1)", "bottleneck (>=1.3.4)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=0.8.1)", "fsspec (>=2022.05.0)", "gcsfs (>=2022.05.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.8.0)", "matplotlib (>=3.6.1)", "numba (>=0.55.2)", "numexpr (>=2.8.0)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pandas-gbq (>=0.17.5)", "psycopg2 (>=2.9.3)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.5)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "pyxlsb (>=1.0.9)", "qtpy (>=2.2.0)", "s3fs (>=2022.05.0)", "scipy (>=1.8.1)", "tables (>=3.7.0)", "tabulate (>=0.8.10)", "xarray (>=2022.03.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)", "zstandard (>=0.17.0)"]
aws = ["s3fs (>=2022.05.0)"]
clipboard = ["PyQt5 (>=5.15.6)", "qtpy (>=2.2.0)"]
compression = ["zstandard (>=0.17.0)"]
@@ -1835,7 +1718,7 @@ plot = ["matplotlib (>=3.6.1)"]
postgresql = ["SQLAlchemy (>=1.4.36)", "psycopg2 (>=2.9.3)"]
spss = ["pyreadstat (>=1.1.5)"]
sql-other = ["SQLAlchemy (>=1.4.36)"]
-test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"]
+test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"]
xml = ["lxml (>=4.8.0)"]
[[package]]
@@ -1855,116 +1738,123 @@ testing = ["docopt", "pytest (<6.0.0)"]
[[package]]
name = "pathspec"
-version = "0.11.2"
+version = "0.12.1"
description = "Utility library for gitignore style pattern matching of file paths."
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "pathspec-0.11.2-py3-none-any.whl", hash = "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20"},
- {file = "pathspec-0.11.2.tar.gz", hash = "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"},
+ {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"},
+ {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"},
]
[[package]]
name = "pexpect"
-version = "4.8.0"
+version = "4.9.0"
description = "Pexpect allows easy control of interactive console applications."
optional = false
python-versions = "*"
files = [
- {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"},
- {file = "pexpect-4.8.0.tar.gz", hash = "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"},
+ {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"},
+ {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"},
]
[package.dependencies]
ptyprocess = ">=0.5"
[[package]]
-name = "pickleshare"
-version = "0.7.5"
-description = "Tiny 'shelve'-like database with concurrency support"
-optional = false
-python-versions = "*"
-files = [
- {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"},
- {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"},
-]
-
-[[package]]
name = "pillow"
-version = "10.0.1"
+version = "10.2.0"
description = "Python Imaging Library (Fork)"
optional = false
python-versions = ">=3.8"
files = [
- {file = "Pillow-10.0.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:8f06be50669087250f319b706decf69ca71fdecd829091a37cc89398ca4dc17a"},
- {file = "Pillow-10.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50bd5f1ebafe9362ad622072a1d2f5850ecfa44303531ff14353a4059113b12d"},
- {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6a90167bcca1216606223a05e2cf991bb25b14695c518bc65639463d7db722d"},
- {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f11c9102c56ffb9ca87134bd025a43d2aba3f1155f508eff88f694b33a9c6d19"},
- {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:186f7e04248103482ea6354af6d5bcedb62941ee08f7f788a1c7707bc720c66f"},
- {file = "Pillow-10.0.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:0462b1496505a3462d0f35dc1c4d7b54069747d65d00ef48e736acda2c8cbdff"},
- {file = "Pillow-10.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d889b53ae2f030f756e61a7bff13684dcd77e9af8b10c6048fb2c559d6ed6eaf"},
- {file = "Pillow-10.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:552912dbca585b74d75279a7570dd29fa43b6d93594abb494ebb31ac19ace6bd"},
- {file = "Pillow-10.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:787bb0169d2385a798888e1122c980c6eff26bf941a8ea79747d35d8f9210ca0"},
- {file = "Pillow-10.0.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fd2a5403a75b54661182b75ec6132437a181209b901446ee5724b589af8edef1"},
- {file = "Pillow-10.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2d7e91b4379f7a76b31c2dda84ab9e20c6220488e50f7822e59dac36b0cd92b1"},
- {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e9adb3f22d4c416e7cd79b01375b17159d6990003633ff1d8377e21b7f1b21"},
- {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93139acd8109edcdeffd85e3af8ae7d88b258b3a1e13a038f542b79b6d255c54"},
- {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:92a23b0431941a33242b1f0ce6c88a952e09feeea9af4e8be48236a68ffe2205"},
- {file = "Pillow-10.0.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cbe68deb8580462ca0d9eb56a81912f59eb4542e1ef8f987405e35a0179f4ea2"},
- {file = "Pillow-10.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:522ff4ac3aaf839242c6f4e5b406634bfea002469656ae8358644fc6c4856a3b"},
- {file = "Pillow-10.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:84efb46e8d881bb06b35d1d541aa87f574b58e87f781cbba8d200daa835b42e1"},
- {file = "Pillow-10.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:898f1d306298ff40dc1b9ca24824f0488f6f039bc0e25cfb549d3195ffa17088"},
- {file = "Pillow-10.0.1-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:bcf1207e2f2385a576832af02702de104be71301c2696d0012b1b93fe34aaa5b"},
- {file = "Pillow-10.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d6c9049c6274c1bb565021367431ad04481ebb54872edecfcd6088d27edd6ed"},
- {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28444cb6ad49726127d6b340217f0627abc8732f1194fd5352dec5e6a0105635"},
- {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de596695a75496deb3b499c8c4f8e60376e0516e1a774e7bc046f0f48cd620ad"},
- {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:2872f2d7846cf39b3dbff64bc1104cc48c76145854256451d33c5faa55c04d1a"},
- {file = "Pillow-10.0.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:4ce90f8a24e1c15465048959f1e94309dfef93af272633e8f37361b824532e91"},
- {file = "Pillow-10.0.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ee7810cf7c83fa227ba9125de6084e5e8b08c59038a7b2c9045ef4dde61663b4"},
- {file = "Pillow-10.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b1be1c872b9b5fcc229adeadbeb51422a9633abd847c0ff87dc4ef9bb184ae08"},
- {file = "Pillow-10.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:98533fd7fa764e5f85eebe56c8e4094db912ccbe6fbf3a58778d543cadd0db08"},
- {file = "Pillow-10.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:764d2c0daf9c4d40ad12fbc0abd5da3af7f8aa11daf87e4fa1b834000f4b6b0a"},
- {file = "Pillow-10.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fcb59711009b0168d6ee0bd8fb5eb259c4ab1717b2f538bbf36bacf207ef7a68"},
- {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:697a06bdcedd473b35e50a7e7506b1d8ceb832dc238a336bd6f4f5aa91a4b500"},
- {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f665d1e6474af9f9da5e86c2a3a2d2d6204e04d5af9c06b9d42afa6ebde3f21"},
- {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:2fa6dd2661838c66f1a5473f3b49ab610c98a128fc08afbe81b91a1f0bf8c51d"},
- {file = "Pillow-10.0.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:3a04359f308ebee571a3127fdb1bd01f88ba6f6fb6d087f8dd2e0d9bff43f2a7"},
- {file = "Pillow-10.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:723bd25051454cea9990203405fa6b74e043ea76d4968166dfd2569b0210886a"},
- {file = "Pillow-10.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:71671503e3015da1b50bd18951e2f9daf5b6ffe36d16f1eb2c45711a301521a7"},
- {file = "Pillow-10.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:44e7e4587392953e5e251190a964675f61e4dae88d1e6edbe9f36d6243547ff3"},
- {file = "Pillow-10.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:3855447d98cced8670aaa63683808df905e956f00348732448b5a6df67ee5849"},
- {file = "Pillow-10.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ed2d9c0704f2dc4fa980b99d565c0c9a543fe5101c25b3d60488b8ba80f0cce1"},
- {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5bb289bb835f9fe1a1e9300d011eef4d69661bb9b34d5e196e5e82c4cb09b37"},
- {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a0d3e54ab1df9df51b914b2233cf779a5a10dfd1ce339d0421748232cea9876"},
- {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:2cc6b86ece42a11f16f55fe8903595eff2b25e0358dec635d0a701ac9586588f"},
- {file = "Pillow-10.0.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:ca26ba5767888c84bf5a0c1a32f069e8204ce8c21d00a49c90dabeba00ce0145"},
- {file = "Pillow-10.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f0b4b06da13275bc02adfeb82643c4a6385bd08d26f03068c2796f60d125f6f2"},
- {file = "Pillow-10.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bc2e3069569ea9dbe88d6b8ea38f439a6aad8f6e7a6283a38edf61ddefb3a9bf"},
- {file = "Pillow-10.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:8b451d6ead6e3500b6ce5c7916a43d8d8d25ad74b9102a629baccc0808c54971"},
- {file = "Pillow-10.0.1-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:32bec7423cdf25c9038fef614a853c9d25c07590e1a870ed471f47fb80b244db"},
- {file = "Pillow-10.0.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cf63d2c6928b51d35dfdbda6f2c1fddbe51a6bc4a9d4ee6ea0e11670dd981e"},
- {file = "Pillow-10.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f6d3d4c905e26354e8f9d82548475c46d8e0889538cb0657aa9c6f0872a37aa4"},
- {file = "Pillow-10.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:847e8d1017c741c735d3cd1883fa7b03ded4f825a6e5fcb9378fd813edee995f"},
- {file = "Pillow-10.0.1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:7f771e7219ff04b79e231d099c0a28ed83aa82af91fd5fa9fdb28f5b8d5addaf"},
- {file = "Pillow-10.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459307cacdd4138edee3875bbe22a2492519e060660eaf378ba3b405d1c66317"},
- {file = "Pillow-10.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b059ac2c4c7a97daafa7dc850b43b2d3667def858a4f112d1aa082e5c3d6cf7d"},
- {file = "Pillow-10.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d6caf3cd38449ec3cd8a68b375e0c6fe4b6fd04edb6c9766b55ef84a6e8ddf2d"},
- {file = "Pillow-10.0.1.tar.gz", hash = "sha256:d72967b06be9300fed5cfbc8b5bafceec48bf7cdc7dab66b1d2549035287191d"},
+ {file = "pillow-10.2.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:7823bdd049099efa16e4246bdf15e5a13dbb18a51b68fa06d6c1d4d8b99a796e"},
+ {file = "pillow-10.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:83b2021f2ade7d1ed556bc50a399127d7fb245e725aa0113ebd05cfe88aaf588"},
+ {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fad5ff2f13d69b7e74ce5b4ecd12cc0ec530fcee76356cac6742785ff71c452"},
+ {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da2b52b37dad6d9ec64e653637a096905b258d2fc2b984c41ae7d08b938a67e4"},
+ {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:47c0995fc4e7f79b5cfcab1fc437ff2890b770440f7696a3ba065ee0fd496563"},
+ {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:322bdf3c9b556e9ffb18f93462e5f749d3444ce081290352c6070d014c93feb2"},
+ {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:51f1a1bffc50e2e9492e87d8e09a17c5eea8409cda8d3f277eb6edc82813c17c"},
+ {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:69ffdd6120a4737710a9eee73e1d2e37db89b620f702754b8f6e62594471dee0"},
+ {file = "pillow-10.2.0-cp310-cp310-win32.whl", hash = "sha256:c6dafac9e0f2b3c78df97e79af707cdc5ef8e88208d686a4847bab8266870023"},
+ {file = "pillow-10.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:aebb6044806f2e16ecc07b2a2637ee1ef67a11840a66752751714a0d924adf72"},
+ {file = "pillow-10.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:7049e301399273a0136ff39b84c3678e314f2158f50f517bc50285fb5ec847ad"},
+ {file = "pillow-10.2.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:35bb52c37f256f662abdfa49d2dfa6ce5d93281d323a9af377a120e89a9eafb5"},
+ {file = "pillow-10.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c23f307202661071d94b5e384e1e1dc7dfb972a28a2310e4ee16103e66ddb67"},
+ {file = "pillow-10.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:773efe0603db30c281521a7c0214cad7836c03b8ccff897beae9b47c0b657d61"},
+ {file = "pillow-10.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11fa2e5984b949b0dd6d7a94d967743d87c577ff0b83392f17cb3990d0d2fd6e"},
+ {file = "pillow-10.2.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:716d30ed977be8b37d3ef185fecb9e5a1d62d110dfbdcd1e2a122ab46fddb03f"},
+ {file = "pillow-10.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a086c2af425c5f62a65e12fbf385f7c9fcb8f107d0849dba5839461a129cf311"},
+ {file = "pillow-10.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c8de2789052ed501dd829e9cae8d3dcce7acb4777ea4a479c14521c942d395b1"},
+ {file = "pillow-10.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:609448742444d9290fd687940ac0b57fb35e6fd92bdb65386e08e99af60bf757"},
+ {file = "pillow-10.2.0-cp311-cp311-win32.whl", hash = "sha256:823ef7a27cf86df6597fa0671066c1b596f69eba53efa3d1e1cb8b30f3533068"},
+ {file = "pillow-10.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:1da3b2703afd040cf65ec97efea81cfba59cdbed9c11d8efc5ab09df9509fc56"},
+ {file = "pillow-10.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:edca80cbfb2b68d7b56930b84a0e45ae1694aeba0541f798e908a49d66b837f1"},
+ {file = "pillow-10.2.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:1b5e1b74d1bd1b78bc3477528919414874748dd363e6272efd5abf7654e68bef"},
+ {file = "pillow-10.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0eae2073305f451d8ecacb5474997c08569fb4eb4ac231ffa4ad7d342fdc25ac"},
+ {file = "pillow-10.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7c2286c23cd350b80d2fc9d424fc797575fb16f854b831d16fd47ceec078f2c"},
+ {file = "pillow-10.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e23412b5c41e58cec602f1135c57dfcf15482013ce6e5f093a86db69646a5aa"},
+ {file = "pillow-10.2.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:52a50aa3fb3acb9cf7213573ef55d31d6eca37f5709c69e6858fe3bc04a5c2a2"},
+ {file = "pillow-10.2.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:127cee571038f252a552760076407f9cff79761c3d436a12af6000cd182a9d04"},
+ {file = "pillow-10.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:8d12251f02d69d8310b046e82572ed486685c38f02176bd08baf216746eb947f"},
+ {file = "pillow-10.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:54f1852cd531aa981bc0965b7d609f5f6cc8ce8c41b1139f6ed6b3c54ab82bfb"},
+ {file = "pillow-10.2.0-cp312-cp312-win32.whl", hash = "sha256:257d8788df5ca62c980314053197f4d46eefedf4e6175bc9412f14412ec4ea2f"},
+ {file = "pillow-10.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:154e939c5f0053a383de4fd3d3da48d9427a7e985f58af8e94d0b3c9fcfcf4f9"},
+ {file = "pillow-10.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:f379abd2f1e3dddb2b61bc67977a6b5a0a3f7485538bcc6f39ec76163891ee48"},
+ {file = "pillow-10.2.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8373c6c251f7ef8bda6675dd6d2b3a0fcc31edf1201266b5cf608b62a37407f9"},
+ {file = "pillow-10.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:870ea1ada0899fd0b79643990809323b389d4d1d46c192f97342eeb6ee0b8483"},
+ {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4b6b1e20608493548b1f32bce8cca185bf0480983890403d3b8753e44077129"},
+ {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3031709084b6e7852d00479fd1d310b07d0ba82765f973b543c8af5061cf990e"},
+ {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:3ff074fc97dd4e80543a3e91f69d58889baf2002b6be64347ea8cf5533188213"},
+ {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:cb4c38abeef13c61d6916f264d4845fab99d7b711be96c326b84df9e3e0ff62d"},
+ {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b1b3020d90c2d8e1dae29cf3ce54f8094f7938460fb5ce8bc5c01450b01fbaf6"},
+ {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:170aeb00224ab3dc54230c797f8404507240dd868cf52066f66a41b33169bdbe"},
+ {file = "pillow-10.2.0-cp38-cp38-win32.whl", hash = "sha256:c4225f5220f46b2fde568c74fca27ae9771536c2e29d7c04f4fb62c83275ac4e"},
+ {file = "pillow-10.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:0689b5a8c5288bc0504d9fcee48f61a6a586b9b98514d7d29b840143d6734f39"},
+ {file = "pillow-10.2.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b792a349405fbc0163190fde0dc7b3fef3c9268292586cf5645598b48e63dc67"},
+ {file = "pillow-10.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c570f24be1e468e3f0ce7ef56a89a60f0e05b30a3669a459e419c6eac2c35364"},
+ {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8ecd059fdaf60c1963c58ceb8997b32e9dc1b911f5da5307aab614f1ce5c2fb"},
+ {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c365fd1703040de1ec284b176d6af5abe21b427cb3a5ff68e0759e1e313a5e7e"},
+ {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:70c61d4c475835a19b3a5aa42492409878bbca7438554a1f89d20d58a7c75c01"},
+ {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b6f491cdf80ae540738859d9766783e3b3c8e5bd37f5dfa0b76abdecc5081f13"},
+ {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d189550615b4948f45252d7f005e53c2040cea1af5b60d6f79491a6e147eef7"},
+ {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:49d9ba1ed0ef3e061088cd1e7538a0759aab559e2e0a80a36f9fd9d8c0c21591"},
+ {file = "pillow-10.2.0-cp39-cp39-win32.whl", hash = "sha256:babf5acfede515f176833ed6028754cbcd0d206f7f614ea3447d67c33be12516"},
+ {file = "pillow-10.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:0304004f8067386b477d20a518b50f3fa658a28d44e4116970abfcd94fac34a8"},
+ {file = "pillow-10.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:0fb3e7fc88a14eacd303e90481ad983fd5b69c761e9e6ef94c983f91025da869"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:322209c642aabdd6207517e9739c704dc9f9db943015535783239022002f054a"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3eedd52442c0a5ff4f887fab0c1c0bb164d8635b32c894bc1faf4c618dd89df2"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb28c753fd5eb3dd859b4ee95de66cc62af91bcff5db5f2571d32a520baf1f04"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:33870dc4653c5017bf4c8873e5488d8f8d5f8935e2f1fb9a2208c47cdd66efd2"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3c31822339516fb3c82d03f30e22b1d038da87ef27b6a78c9549888f8ceda39a"},
+ {file = "pillow-10.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a2b56ba36e05f973d450582fb015594aaa78834fefe8dfb8fcd79b93e64ba4c6"},
+ {file = "pillow-10.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d8e6aeb9201e655354b3ad049cb77d19813ad4ece0df1249d3c793de3774f8c7"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:2247178effb34a77c11c0e8ac355c7a741ceca0a732b27bf11e747bbc950722f"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15587643b9e5eb26c48e49a7b33659790d28f190fc514a322d55da2fb5c2950e"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753cd8f2086b2b80180d9b3010dd4ed147efc167c90d3bf593fe2af21265e5a5"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7c8f97e8e7a9009bcacbe3766a36175056c12f9a44e6e6f2d5caad06dcfbf03b"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d1b35bcd6c5543b9cb547dee3150c93008f8dd0f1fef78fc0cd2b141c5baf58a"},
+ {file = "pillow-10.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe4c15f6c9285dc54ce6553a3ce908ed37c8f3825b5a51a15c91442bb955b868"},
+ {file = "pillow-10.2.0.tar.gz", hash = "sha256:e87f0b2c78157e12d7686b27d63c070fd65d994e8ddae6f328e0dcf4a0cd007e"},
]
[package.extras]
docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"]
+fpx = ["olefile"]
+mic = ["olefile"]
tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
+typing = ["typing-extensions"]
+xmp = ["defusedxml"]
[[package]]
name = "platformdirs"
-version = "3.10.0"
+version = "4.1.0"
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "platformdirs-3.10.0-py3-none-any.whl", hash = "sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d"},
- {file = "platformdirs-3.10.0.tar.gz", hash = "sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d"},
+ {file = "platformdirs-4.1.0-py3-none-any.whl", hash = "sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380"},
+ {file = "platformdirs-4.1.0.tar.gz", hash = "sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420"},
]
[package.extras]
@@ -1988,13 +1878,13 @@ testing = ["pytest", "pytest-benchmark"]
[[package]]
name = "pre-commit"
-version = "3.4.0"
+version = "3.6.0"
description = "A framework for managing and maintaining multi-language pre-commit hooks."
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
files = [
- {file = "pre_commit-3.4.0-py2.py3-none-any.whl", hash = "sha256:96d529a951f8b677f730a7212442027e8ba53f9b04d217c4c67dc56c393ad945"},
- {file = "pre_commit-3.4.0.tar.gz", hash = "sha256:6bbd5129a64cad4c0dfaeeb12cd8f7ea7e15b77028d985341478c8af3c759522"},
+ {file = "pre_commit-3.6.0-py2.py3-none-any.whl", hash = "sha256:c255039ef399049a5544b6ce13d135caba8f2c28c3b4033277a788f434308376"},
+ {file = "pre_commit-3.6.0.tar.gz", hash = "sha256:d30bad9abf165f7785c15a21a1f46da7d0677cb00ee7ff4c579fd38922efe15d"},
]
[package.dependencies]
@@ -2006,13 +1896,13 @@ virtualenv = ">=20.10.0"
[[package]]
name = "prompt-toolkit"
-version = "3.0.39"
+version = "3.0.43"
description = "Library for building powerful interactive command lines in Python"
optional = false
python-versions = ">=3.7.0"
files = [
- {file = "prompt_toolkit-3.0.39-py3-none-any.whl", hash = "sha256:9dffbe1d8acf91e3de75f3b544e4842382fc06c6babe903ac9acb74dc6e08d88"},
- {file = "prompt_toolkit-3.0.39.tar.gz", hash = "sha256:04505ade687dc26dc4284b1ad19a83be2f2afe83e7a828ace0c72f3a1df72aac"},
+ {file = "prompt_toolkit-3.0.43-py3-none-any.whl", hash = "sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6"},
+ {file = "prompt_toolkit-3.0.43.tar.gz", hash = "sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d"},
]
[package.dependencies]
@@ -2020,25 +1910,27 @@ wcwidth = "*"
[[package]]
name = "psutil"
-version = "5.9.5"
+version = "5.9.7"
description = "Cross-platform lib for process and system monitoring in Python."
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-files = [
- {file = "psutil-5.9.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:be8929ce4313f9f8146caad4272f6abb8bf99fc6cf59344a3167ecd74f4f203f"},
- {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ab8ed1a1d77c95453db1ae00a3f9c50227ebd955437bcf2a574ba8adbf6a74d5"},
- {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:4aef137f3345082a3d3232187aeb4ac4ef959ba3d7c10c33dd73763fbc063da4"},
- {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ea8518d152174e1249c4f2a1c89e3e6065941df2fa13a1ab45327716a23c2b48"},
- {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:acf2aef9391710afded549ff602b5887d7a2349831ae4c26be7c807c0a39fac4"},
- {file = "psutil-5.9.5-cp27-none-win32.whl", hash = "sha256:5b9b8cb93f507e8dbaf22af6a2fd0ccbe8244bf30b1baad6b3954e935157ae3f"},
- {file = "psutil-5.9.5-cp27-none-win_amd64.whl", hash = "sha256:8c5f7c5a052d1d567db4ddd231a9d27a74e8e4a9c3f44b1032762bd7b9fdcd42"},
- {file = "psutil-5.9.5-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:3c6f686f4225553615612f6d9bc21f1c0e305f75d7d8454f9b46e901778e7217"},
- {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a7dd9997128a0d928ed4fb2c2d57e5102bb6089027939f3b722f3a210f9a8da"},
- {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89518112647f1276b03ca97b65cc7f64ca587b1eb0278383017c2a0dcc26cbe4"},
- {file = "psutil-5.9.5-cp36-abi3-win32.whl", hash = "sha256:104a5cc0e31baa2bcf67900be36acde157756b9c44017b86b2c049f11957887d"},
- {file = "psutil-5.9.5-cp36-abi3-win_amd64.whl", hash = "sha256:b258c0c1c9d145a1d5ceffab1134441c4c5113b2417fafff7315a917a026c3c9"},
- {file = "psutil-5.9.5-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:c607bb3b57dc779d55e1554846352b4e358c10fff3abf3514a7a6601beebdb30"},
- {file = "psutil-5.9.5.tar.gz", hash = "sha256:5410638e4df39c54d957fc51ce03048acd8e6d60abc0f5107af51e5fb566eb3c"},
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+files = [
+ {file = "psutil-5.9.7-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:0bd41bf2d1463dfa535942b2a8f0e958acf6607ac0be52265ab31f7923bcd5e6"},
+ {file = "psutil-5.9.7-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:5794944462509e49d4d458f4dbfb92c47539e7d8d15c796f141f474010084056"},
+ {file = "psutil-5.9.7-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:fe361f743cb3389b8efda21980d93eb55c1f1e3898269bc9a2a1d0bb7b1f6508"},
+ {file = "psutil-5.9.7-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:e469990e28f1ad738f65a42dcfc17adaed9d0f325d55047593cb9033a0ab63df"},
+ {file = "psutil-5.9.7-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:3c4747a3e2ead1589e647e64aad601981f01b68f9398ddf94d01e3dc0d1e57c7"},
+ {file = "psutil-5.9.7-cp27-none-win32.whl", hash = "sha256:1d4bc4a0148fdd7fd8f38e0498639ae128e64538faa507df25a20f8f7fb2341c"},
+ {file = "psutil-5.9.7-cp27-none-win_amd64.whl", hash = "sha256:4c03362e280d06bbbfcd52f29acd79c733e0af33d707c54255d21029b8b32ba6"},
+ {file = "psutil-5.9.7-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ea36cc62e69a13ec52b2f625c27527f6e4479bca2b340b7a452af55b34fcbe2e"},
+ {file = "psutil-5.9.7-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1132704b876e58d277168cd729d64750633d5ff0183acf5b3c986b8466cd0284"},
+ {file = "psutil-5.9.7-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe8b7f07948f1304497ce4f4684881250cd859b16d06a1dc4d7941eeb6233bfe"},
+ {file = "psutil-5.9.7-cp36-cp36m-win32.whl", hash = "sha256:b27f8fdb190c8c03914f908a4555159327d7481dac2f01008d483137ef3311a9"},
+ {file = "psutil-5.9.7-cp36-cp36m-win_amd64.whl", hash = "sha256:44969859757f4d8f2a9bd5b76eba8c3099a2c8cf3992ff62144061e39ba8568e"},
+ {file = "psutil-5.9.7-cp37-abi3-win32.whl", hash = "sha256:c727ca5a9b2dd5193b8644b9f0c883d54f1248310023b5ad3e92036c5e2ada68"},
+ {file = "psutil-5.9.7-cp37-abi3-win_amd64.whl", hash = "sha256:f37f87e4d73b79e6c5e749440c3113b81d1ee7d26f21c19c47371ddea834f414"},
+ {file = "psutil-5.9.7-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:032f4f2c909818c86cea4fe2cc407f1c0f0cde8e6c6d702b28b8ce0c0d143340"},
+ {file = "psutil-5.9.7.tar.gz", hash = "sha256:3f02134e82cfb5d089fddf20bb2e03fd5cd52395321d1c8458a9e58500ff417c"},
]
[package.extras]
@@ -2071,13 +1963,13 @@ tests = ["pytest"]
[[package]]
name = "pyasn1"
-version = "0.5.0"
+version = "0.5.1"
description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
files = [
- {file = "pyasn1-0.5.0-py2.py3-none-any.whl", hash = "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57"},
- {file = "pyasn1-0.5.0.tar.gz", hash = "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde"},
+ {file = "pyasn1-0.5.1-py2.py3-none-any.whl", hash = "sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58"},
+ {file = "pyasn1-0.5.1.tar.gz", hash = "sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c"},
]
[[package]]
@@ -2181,17 +2073,29 @@ files = [
[[package]]
name = "pygments"
-version = "2.16.1"
+version = "2.17.2"
description = "Pygments is a syntax highlighting package written in Python."
optional = false
python-versions = ">=3.7"
files = [
- {file = "Pygments-2.16.1-py3-none-any.whl", hash = "sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692"},
- {file = "Pygments-2.16.1.tar.gz", hash = "sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29"},
+ {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"},
+ {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"},
]
[package.extras]
plugins = ["importlib-metadata"]
+windows-terminal = ["colorama (>=0.4.6)"]
+
+[[package]]
+name = "pyhumps"
+version = "3.8.0"
+description = "🐫 Convert strings (and dictionary keys) between snake case, camel case and pascal case in Python. Inspired by Humps for Node"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pyhumps-3.8.0-py3-none-any.whl", hash = "sha256:060e1954d9069f428232a1adda165db0b9d8dfdce1d265d36df7fbff540acfd6"},
+ {file = "pyhumps-3.8.0.tar.gz", hash = "sha256:498026258f7ee1a8e447c2e28526c0bea9407f9a59c03260aee4bd6c04d681a3"},
+]
[[package]]
name = "pyparsing"
@@ -2221,13 +2125,13 @@ files = [
[[package]]
name = "pytest"
-version = "7.4.2"
+version = "7.4.4"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.7"
files = [
- {file = "pytest-7.4.2-py3-none-any.whl", hash = "sha256:1d881c6124e08ff0a1bb75ba3ec0bfd8b5354a01c194ddd5a0a870a48d99b002"},
- {file = "pytest-7.4.2.tar.gz", hash = "sha256:a766259cfab564a2ad52cb1aae1b881a75c3eb7e34ca3779697c23ed47c47069"},
+ {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
+ {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
]
[package.dependencies]
@@ -2442,13 +2346,13 @@ pyasn1 = ">=0.1.3"
[[package]]
name = "selenium"
-version = "4.13.0"
+version = "4.16.0"
description = ""
optional = false
python-versions = ">=3.8"
files = [
- {file = "selenium-4.13.0-py3-none-any.whl", hash = "sha256:f0f9185c01ae249a321529c4e3aa0edc2a900642e61fdbb76988cd72d2762ece"},
- {file = "selenium-4.13.0.tar.gz", hash = "sha256:3c413a4f1b8af67824703195e3b1c19cfb1c3186c799efa035d55fd59d6dd59f"},
+ {file = "selenium-4.16.0-py3-none-any.whl", hash = "sha256:aec71f4e6ed6cb3ec25c9c1b5ed56ae31b6da0a7f17474c7566d303f84e6219f"},
+ {file = "selenium-4.16.0.tar.gz", hash = "sha256:b2e987a445306151f7be0e6dfe2aa72a479c2ac6a91b9d5ef2d6dd4e49ad0435"},
]
[package.dependencies]
@@ -2459,43 +2363,21 @@ urllib3 = {version = ">=1.26,<3", extras = ["socks"]}
[[package]]
name = "setuptools"
-version = "68.2.2"
+version = "69.0.3"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "setuptools-68.2.2-py3-none-any.whl", hash = "sha256:b454a35605876da60632df1a60f736524eb73cc47bbc9f3f1ef1b644de74fd2a"},
- {file = "setuptools-68.2.2.tar.gz", hash = "sha256:4ac1475276d2f1c48684874089fefcd83bd7162ddaafb81fac866ba0db282a87"},
+ {file = "setuptools-69.0.3-py3-none-any.whl", hash = "sha256:385eb4edd9c9d5c17540511303e39a147ce2fc04bc55289c322b9e5904fe2c05"},
+ {file = "setuptools-69.0.3.tar.gz", hash = "sha256:be1af57fc409f93647f2e8e4573a142ed38724b8cdd389706a867bb4efcf1e78"},
]
[package.extras]
-docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
[[package]]
-name = "setuptools-scm"
-version = "8.0.3"
-description = "the blessed package to manage your versions by scm tags"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "setuptools-scm-8.0.3.tar.gz", hash = "sha256:0169fd70197efda2f8c4d0b2a7a3d614431b488116f37b79d031e9e7ec884d8c"},
- {file = "setuptools_scm-8.0.3-py3-none-any.whl", hash = "sha256:813822234453438a13c78d05c8af29918fbc06f88efb33d38f065340bbb48c39"},
-]
-
-[package.dependencies]
-packaging = ">=20"
-setuptools = "*"
-tomli = {version = ">=1", markers = "python_version < \"3.11\""}
-typing-extensions = {version = "*", markers = "python_version < \"3.11\""}
-
-[package.extras]
-docs = ["entangled-cli[rich]", "mkdocs", "mkdocs-entangled-plugin", "mkdocs-material", "mkdocstrings[python]", "pygments"]
-rich = ["rich"]
-test = ["pytest", "rich", "virtualenv (>20)"]
-
-[[package]]
name = "six"
version = "1.16.0"
description = "Python 2 and 3 compatibility utilities"
@@ -2541,13 +2423,13 @@ files = [
[[package]]
name = "stack-data"
-version = "0.6.2"
+version = "0.6.3"
description = "Extract data from python stack frames and tracebacks for informative displays"
optional = false
python-versions = "*"
files = [
- {file = "stack_data-0.6.2-py3-none-any.whl", hash = "sha256:cbb2a53eb64e5785878201a97ed7c7b94883f48b87bfb0bbe8b623c74679e4a8"},
- {file = "stack_data-0.6.2.tar.gz", hash = "sha256:32d2dd0376772d01b6cb9fc996f3c8b57a357089dec328ed4b6553d037eaf815"},
+ {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"},
+ {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"},
]
[package.dependencies]
@@ -2619,37 +2501,37 @@ telegram = ["requests"]
[[package]]
name = "traitlets"
-version = "5.10.1"
+version = "5.14.1"
description = "Traitlets Python configuration system"
optional = false
python-versions = ">=3.8"
files = [
- {file = "traitlets-5.10.1-py3-none-any.whl", hash = "sha256:07ab9c5bf8a0499fd7b088ba51be899c90ffc936ffc797d7b6907fc516bcd116"},
- {file = "traitlets-5.10.1.tar.gz", hash = "sha256:db9c4aa58139c3ba850101913915c042bdba86f7c8a0dda1c6f7f92c5da8e542"},
+ {file = "traitlets-5.14.1-py3-none-any.whl", hash = "sha256:2e5a030e6eff91737c643231bfcf04a65b0132078dad75e4936700b213652e74"},
+ {file = "traitlets-5.14.1.tar.gz", hash = "sha256:8585105b371a04b8316a43d5ce29c098575c2e477850b62b848b964f1444527e"},
]
[package.extras]
docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"]
-test = ["argcomplete (>=3.0.3)", "mypy (>=1.5.1)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"]
+test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"]
[[package]]
name = "trio"
-version = "0.22.2"
+version = "0.23.2"
description = "A friendly Python library for async concurrency and I/O"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "trio-0.22.2-py3-none-any.whl", hash = "sha256:f43da357620e5872b3d940a2e3589aa251fd3f881b65a608d742e00809b1ec38"},
- {file = "trio-0.22.2.tar.gz", hash = "sha256:3887cf18c8bcc894433420305468388dac76932e9668afa1c49aa3806b6accb3"},
+ {file = "trio-0.23.2-py3-none-any.whl", hash = "sha256:5a0b566fa5d50cf231cfd6b08f3b03aa4179ff004b8f3144059587039e2b26d3"},
+ {file = "trio-0.23.2.tar.gz", hash = "sha256:da1d35b9a2b17eb32cae2e763b16551f9aa6703634735024e32f325c9285069e"},
]
[package.dependencies]
attrs = ">=20.1.0"
cffi = {version = ">=1.14", markers = "os_name == \"nt\" and implementation_name != \"pypy\""}
-exceptiongroup = {version = ">=1.0.0rc9", markers = "python_version < \"3.11\""}
+exceptiongroup = {version = "*", markers = "python_version < \"3.11\""}
idna = "*"
outcome = "*"
-sniffio = "*"
+sniffio = ">=1.3.0"
sortedcontainers = "*"
[[package]]
@@ -2695,45 +2577,44 @@ files = [
[[package]]
name = "typing-extensions"
-version = "4.8.0"
+version = "4.9.0"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
files = [
- {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"},
- {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"},
+ {file = "typing_extensions-4.9.0-py3-none-any.whl", hash = "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"},
+ {file = "typing_extensions-4.9.0.tar.gz", hash = "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783"},
]
[[package]]
name = "tzdata"
-version = "2023.3"
+version = "2023.4"
description = "Provider of IANA time zone data"
optional = false
python-versions = ">=2"
files = [
- {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"},
- {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"},
+ {file = "tzdata-2023.4-py2.py3-none-any.whl", hash = "sha256:aa3ace4329eeacda5b7beb7ea08ece826c28d761cda36e747cfbf97996d39bf3"},
+ {file = "tzdata-2023.4.tar.gz", hash = "sha256:dd54c94f294765522c77399649b4fefd95522479a664a0cec87f41bebc6148c9"},
]
[[package]]
name = "urllib3"
-version = "2.0.5"
+version = "1.26.18"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
-python-versions = ">=3.7"
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
files = [
- {file = "urllib3-2.0.5-py3-none-any.whl", hash = "sha256:ef16afa8ba34a1f989db38e1dbbe0c302e4289a47856990d0682e374563ce35e"},
- {file = "urllib3-2.0.5.tar.gz", hash = "sha256:13abf37382ea2ce6fb744d4dad67838eec857c9f4f57009891805e0b5e123594"},
+ {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"},
+ {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"},
]
[package.dependencies]
-pysocks = {version = ">=1.5.6,<1.5.7 || >1.5.7,<2.0", optional = true, markers = "extra == \"socks\""}
+PySocks = {version = ">=1.5.6,<1.5.7 || >1.5.7,<2.0", optional = true, markers = "extra == \"socks\""}
[package.extras]
-brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
-secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"]
-socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
-zstd = ["zstandard (>=0.18.0)"]
+brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
+secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
+socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[[package]]
name = "uvicorn"
@@ -2756,19 +2637,19 @@ standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)",
[[package]]
name = "virtualenv"
-version = "20.24.5"
+version = "20.25.0"
description = "Virtual Python Environment builder"
optional = false
python-versions = ">=3.7"
files = [
- {file = "virtualenv-20.24.5-py3-none-any.whl", hash = "sha256:b80039f280f4919c77b30f1c23294ae357c4c8701042086e3fc005963e4e537b"},
- {file = "virtualenv-20.24.5.tar.gz", hash = "sha256:e8361967f6da6fbdf1426483bfe9fca8287c242ac0bc30429905721cefbff752"},
+ {file = "virtualenv-20.25.0-py3-none-any.whl", hash = "sha256:4238949c5ffe6876362d9c0180fc6c3a824a7b12b80604eeb8085f2ed7460de3"},
+ {file = "virtualenv-20.25.0.tar.gz", hash = "sha256:bf51c0d9c7dd63ea8e44086fa1e4fb1093a31e963b86959257378aef020e1f1b"},
]
[package.dependencies]
distlib = ">=0.3.7,<1"
filelock = ">=3.12.2,<4"
-platformdirs = ">=3.9.1,<4"
+platformdirs = ">=3.9.1,<5"
[package.extras]
docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"]
@@ -2776,13 +2657,13 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess
[[package]]
name = "wcwidth"
-version = "0.2.7"
+version = "0.2.12"
description = "Measures the displayed width of unicode strings in a terminal"
optional = false
python-versions = "*"
files = [
- {file = "wcwidth-0.2.7-py2.py3-none-any.whl", hash = "sha256:fabf3e32999d9b0dab7d19d845149f326f04fe29bac67709ee071dbd92640a36"},
- {file = "wcwidth-0.2.7.tar.gz", hash = "sha256:1b6d30a98ddd5ce9bbdb33658191fd2423fc9da203fe3ef1855407dcb7ee4e26"},
+ {file = "wcwidth-0.2.12-py2.py3-none-any.whl", hash = "sha256:f26ec43d96c8cbfed76a5075dac87680124fa84e0855195a6184da9c187f133c"},
+ {file = "wcwidth-0.2.12.tar.gz", hash = "sha256:f01c104efdf57971bcb756f054dd58ddec5204dd15fa31d6503ea57947d97c02"},
]
[[package]]
@@ -2801,85 +2682,101 @@ h11 = ">=0.9.0,<1"
[[package]]
name = "yarl"
-version = "1.9.2"
+version = "1.9.4"
description = "Yet another URL library"
optional = false
python-versions = ">=3.7"
files = [
- {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82"},
- {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8"},
- {file = "yarl-1.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9"},
- {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560"},
- {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac"},
- {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea"},
- {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608"},
- {file = "yarl-1.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5"},
- {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0"},
- {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4"},
- {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095"},
- {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3"},
- {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528"},
- {file = "yarl-1.9.2-cp310-cp310-win32.whl", hash = "sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3"},
- {file = "yarl-1.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde"},
- {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6"},
- {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb"},
- {file = "yarl-1.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0"},
- {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2"},
- {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191"},
- {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d"},
- {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7"},
- {file = "yarl-1.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6"},
- {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8"},
- {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9"},
- {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be"},
- {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7"},
- {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a"},
- {file = "yarl-1.9.2-cp311-cp311-win32.whl", hash = "sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8"},
- {file = "yarl-1.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051"},
- {file = "yarl-1.9.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:38a3928ae37558bc1b559f67410df446d1fbfa87318b124bf5032c31e3447b74"},
- {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac9bb4c5ce3975aeac288cfcb5061ce60e0d14d92209e780c93954076c7c4367"},
- {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3da8a678ca8b96c8606bbb8bfacd99a12ad5dd288bc6f7979baddd62f71c63ef"},
- {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13414591ff516e04fcdee8dc051c13fd3db13b673c7a4cb1350e6b2ad9639ad3"},
- {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf74d08542c3a9ea97bb8f343d4fcbd4d8f91bba5ec9d5d7f792dbe727f88938"},
- {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e7221580dc1db478464cfeef9b03b95c5852cc22894e418562997df0d074ccc"},
- {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:494053246b119b041960ddcd20fd76224149cfea8ed8777b687358727911dd33"},
- {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:52a25809fcbecfc63ac9ba0c0fb586f90837f5425edfd1ec9f3372b119585e45"},
- {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e65610c5792870d45d7b68c677681376fcf9cc1c289f23e8e8b39c1485384185"},
- {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:1b1bba902cba32cdec51fca038fd53f8beee88b77efc373968d1ed021024cc04"},
- {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:662e6016409828ee910f5d9602a2729a8a57d74b163c89a837de3fea050c7582"},
- {file = "yarl-1.9.2-cp37-cp37m-win32.whl", hash = "sha256:f364d3480bffd3aa566e886587eaca7c8c04d74f6e8933f3f2c996b7f09bee1b"},
- {file = "yarl-1.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6a5883464143ab3ae9ba68daae8e7c5c95b969462bbe42e2464d60e7e2698368"},
- {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac"},
- {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4"},
- {file = "yarl-1.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574"},
- {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb"},
- {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59"},
- {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e"},
- {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417"},
- {file = "yarl-1.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78"},
- {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333"},
- {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c"},
- {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5"},
- {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc"},
- {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b"},
- {file = "yarl-1.9.2-cp38-cp38-win32.whl", hash = "sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7"},
- {file = "yarl-1.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72"},
- {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9"},
- {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8"},
- {file = "yarl-1.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7"},
- {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716"},
- {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a"},
- {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3"},
- {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955"},
- {file = "yarl-1.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1"},
- {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4"},
- {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6"},
- {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf"},
- {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3"},
- {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80"},
- {file = "yarl-1.9.2-cp39-cp39-win32.whl", hash = "sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623"},
- {file = "yarl-1.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18"},
- {file = "yarl-1.9.2.tar.gz", hash = "sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571"},
+ {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e"},
+ {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2"},
+ {file = "yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66"},
+ {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234"},
+ {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392"},
+ {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551"},
+ {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455"},
+ {file = "yarl-1.9.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c"},
+ {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53"},
+ {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385"},
+ {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863"},
+ {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b"},
+ {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541"},
+ {file = "yarl-1.9.4-cp310-cp310-win32.whl", hash = "sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d"},
+ {file = "yarl-1.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b"},
+ {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:35a2b9396879ce32754bd457d31a51ff0a9d426fd9e0e3c33394bf4b9036b099"},
+ {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c7d56b293cc071e82532f70adcbd8b61909eec973ae9d2d1f9b233f3d943f2c"},
+ {file = "yarl-1.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d8a1c6c0be645c745a081c192e747c5de06e944a0d21245f4cf7c05e457c36e0"},
+ {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b3c1ffe10069f655ea2d731808e76e0f452fc6c749bea04781daf18e6039525"},
+ {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:549d19c84c55d11687ddbd47eeb348a89df9cb30e1993f1b128f4685cd0ebbf8"},
+ {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7409f968456111140c1c95301cadf071bd30a81cbd7ab829169fb9e3d72eae9"},
+ {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23a6d84d9d1738dbc6e38167776107e63307dfc8ad108e580548d1f2c587f42"},
+ {file = "yarl-1.9.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8b889777de69897406c9fb0b76cdf2fd0f31267861ae7501d93003d55f54fbe"},
+ {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce"},
+ {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e9035df8d0880b2f1c7f5031f33f69e071dfe72ee9310cfc76f7b605958ceb9"},
+ {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c0ec0ed476f77db9fb29bca17f0a8fcc7bc97ad4c6c1d8959c507decb22e8572"},
+ {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:ee04010f26d5102399bd17f8df8bc38dc7ccd7701dc77f4a68c5b8d733406958"},
+ {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49a180c2e0743d5d6e0b4d1a9e5f633c62eca3f8a86ba5dd3c471060e352ca98"},
+ {file = "yarl-1.9.4-cp311-cp311-win32.whl", hash = "sha256:81eb57278deb6098a5b62e88ad8281b2ba09f2f1147c4767522353eaa6260b31"},
+ {file = "yarl-1.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:d1d2532b340b692880261c15aee4dc94dd22ca5d61b9db9a8a361953d36410b1"},
+ {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0d2454f0aef65ea81037759be5ca9947539667eecebca092733b2eb43c965a81"},
+ {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:44d8ffbb9c06e5a7f529f38f53eda23e50d1ed33c6c869e01481d3fafa6b8142"},
+ {file = "yarl-1.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aaaea1e536f98754a6e5c56091baa1b6ce2f2700cc4a00b0d49eca8dea471074"},
+ {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3777ce5536d17989c91696db1d459574e9a9bd37660ea7ee4d3344579bb6f129"},
+ {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fc5fc1eeb029757349ad26bbc5880557389a03fa6ada41703db5e068881e5f2"},
+ {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea65804b5dc88dacd4a40279af0cdadcfe74b3e5b4c897aa0d81cf86927fee78"},
+ {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa102d6d280a5455ad6a0f9e6d769989638718e938a6a0a2ff3f4a7ff8c62cc4"},
+ {file = "yarl-1.9.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09efe4615ada057ba2d30df871d2f668af661e971dfeedf0c159927d48bbeff0"},
+ {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51"},
+ {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6f5cb257bc2ec58f437da2b37a8cd48f666db96d47b8a3115c29f316313654ff"},
+ {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:992f18e0ea248ee03b5a6e8b3b4738850ae7dbb172cc41c966462801cbf62cf7"},
+ {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0e9d124c191d5b881060a9e5060627694c3bdd1fe24c5eecc8d5d7d0eb6faabc"},
+ {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3986b6f41ad22988e53d5778f91855dc0399b043fc8946d4f2e68af22ee9ff10"},
+ {file = "yarl-1.9.4-cp312-cp312-win32.whl", hash = "sha256:4b21516d181cd77ebd06ce160ef8cc2a5e9ad35fb1c5930882baff5ac865eee7"},
+ {file = "yarl-1.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a9bd00dc3bc395a662900f33f74feb3e757429e545d831eef5bb280252631984"},
+ {file = "yarl-1.9.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63b20738b5aac74e239622d2fe30df4fca4942a86e31bf47a81a0e94c14df94f"},
+ {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d7f7de27b8944f1fee2c26a88b4dabc2409d2fea7a9ed3df79b67277644e17"},
+ {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c74018551e31269d56fab81a728f683667e7c28c04e807ba08f8c9e3bba32f14"},
+ {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca06675212f94e7a610e85ca36948bb8fc023e458dd6c63ef71abfd482481aa5"},
+ {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aef935237d60a51a62b86249839b51345f47564208c6ee615ed2a40878dccdd"},
+ {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b134fd795e2322b7684155b7855cc99409d10b2e408056db2b93b51a52accc7"},
+ {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d25039a474c4c72a5ad4b52495056f843a7ff07b632c1b92ea9043a3d9950f6e"},
+ {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f7d6b36dd2e029b6bcb8a13cf19664c7b8e19ab3a58e0fefbb5b8461447ed5ec"},
+ {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:957b4774373cf6f709359e5c8c4a0af9f6d7875db657adb0feaf8d6cb3c3964c"},
+ {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d7eeb6d22331e2fd42fce928a81c697c9ee2d51400bd1a28803965883e13cead"},
+ {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6a962e04b8f91f8c4e5917e518d17958e3bdee71fd1d8b88cdce74dd0ebbf434"},
+ {file = "yarl-1.9.4-cp37-cp37m-win32.whl", hash = "sha256:f3bc6af6e2b8f92eced34ef6a96ffb248e863af20ef4fde9448cc8c9b858b749"},
+ {file = "yarl-1.9.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4d7a90a92e528aadf4965d685c17dacff3df282db1121136c382dc0b6014d2"},
+ {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be"},
+ {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f"},
+ {file = "yarl-1.9.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf"},
+ {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1"},
+ {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57"},
+ {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa"},
+ {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130"},
+ {file = "yarl-1.9.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559"},
+ {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23"},
+ {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec"},
+ {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78"},
+ {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be"},
+ {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3"},
+ {file = "yarl-1.9.4-cp38-cp38-win32.whl", hash = "sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece"},
+ {file = "yarl-1.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b"},
+ {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27"},
+ {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1"},
+ {file = "yarl-1.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91"},
+ {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b"},
+ {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5"},
+ {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34"},
+ {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136"},
+ {file = "yarl-1.9.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7"},
+ {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e"},
+ {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4"},
+ {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec"},
+ {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c"},
+ {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0"},
+ {file = "yarl-1.9.4-cp39-cp39-win32.whl", hash = "sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575"},
+ {file = "yarl-1.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15"},
+ {file = "yarl-1.9.4-py3-none-any.whl", hash = "sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad"},
+ {file = "yarl-1.9.4.tar.gz", hash = "sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf"},
]
[package.dependencies]
@@ -2889,4 +2786,4 @@ multidict = ">=4.0"
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
-content-hash = "88fb4ebec8909eb124de6f9c2027aade4a7df333744ad1ea3151d5fbda5308fb"
+content-hash = "024cf99d0b83d13143dd03e3269ee1a6bba74c00ace1fadc5412f791dfa3c7b5"
diff --git a/benchmark/pyproject.toml b/benchmark/pyproject.toml
index 61c987b55..a577d7a1e 100644
--- a/benchmark/pyproject.toml
+++ b/benchmark/pyproject.toml
@@ -32,6 +32,8 @@ python-multipart = "^0.0.6"
toml = "^0.10.2"
helicone = "^1.0.9"
httpx = "^0.24.0"
+agent-protocol-client = "^1.1.0"
+click-default-group = "^1.2.4"
[tool.poetry.group.dev.dependencies]
flake8 = "^3.9.2"
diff --git a/benchmark/server.py b/benchmark/server.py
deleted file mode 100644
index e7f639eb8..000000000
--- a/benchmark/server.py
+++ /dev/null
@@ -1,121 +0,0 @@
-import io
-import json
-import logging
-import shutil
-from pathlib import Path
-from random import randint
-from typing import Annotated, Any, Dict, List
-
-from fastapi import FastAPI, File, Form, HTTPException, UploadFile
-from fastapi.responses import StreamingResponse
-from pydantic import BaseModel
-
-# Set up logging
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger(__name__)
-
-app = FastAPI()
-artifacts: List[Dict[str, Any]] = []
-
-
-class Task(BaseModel):
- input: str
-
-
-@app.post("/agent/tasks/{task_id}/artifacts")
-async def upload_file(
- task_id: str, file: Annotated[UploadFile, File()], relative_path: str = Form("")
-) -> Dict[str, Any]:
- logger.info(
- "Uploading file for task_id: %s with relative path: %s", task_id, relative_path
- )
- absolute_directory_path = Path(__file__).parent.absolute()
- save_path = (
- absolute_directory_path
- / "agent/gpt-engineer"
- / "projects/my-new-project/workspace"
- )
-
- random_string = str(randint(0, 100000))
- while random_string in artifacts:
- random_string = str(randint(0, 100000))
-
- artifact_data = await file.read()
- artifacts.append(
- {
- "binary": artifact_data,
- "relative_path": relative_path,
- "file_name": file.filename,
- "artifact_id": random_string,
- }
- )
-
- print(artifacts)
- return {
- "artifact_id": random_string,
- "file_name": "file_name",
- "relative_path": "relative_path",
- }
-
-
-@app.get("/agent/tasks/{task_id}/artifacts")
-async def get_files() -> List[Dict[str, Any]]:
- logger.info("Fetching list of files for task")
- return artifacts
-
-
-@app.get("/agent/tasks/{task_id}/artifacts/{artifact_id}")
-async def get_file(artifact_id: str):
- for artifact in artifacts:
- if artifact["artifact_id"] == artifact_id:
- break
- else:
- logger.error("Attempt to access nonexistent artifact with ID: %s", artifact_id)
- raise HTTPException(status_code=404, detail="Artifact not found")
-
- logger.info("Fetching artifact with ID: %s", artifact_id)
- # find aritifact where artifact_id = artifact_id
-
- for artifact in artifacts:
- if artifact["artifact_id"] == artifact_id:
- return StreamingResponse(
- io.BytesIO(artifact["binary"]),
- media_type="application/octet-stream",
- headers={"Content-Disposition": f"attachment; filename=test.txt"},
- )
- # return 404
- return HTTPException(status_code=404, detail="Artifact not found")
-
-
-@app.post("/agent/tasks/{task_id}/steps")
-async def create_steps(task_id: str):
- logger.info("Creating step for task_id: %s", task_id)
- return {
- "input": "random",
- "additional_input": {},
- "task_id": task_id,
- "step_id": "random_step",
- "name": "random",
- "status": "created",
- "output": "random",
- "additional_output": {},
- "artifacts": [],
- "is_last": True,
- }
-
-
-@app.post("/agent/tasks")
-async def create_tasks(task: Task):
- artifacts.clear()
- return {
- "input": "random",
- "additional_input": {},
- "task_id": "static_task_id",
- "artifacts": [],
- }
-
-
-if __name__ == "__main__":
- import uvicorn
-
- uvicorn.run(app, host="0.0.0.0", port=8000)