aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Nicholas Tindle <nicholas.tindle@agpt.co> 2024-04-19 10:50:35 -0500
committerGravatar Nicholas Tindle <nicholas.tindle@agpt.co> 2024-04-19 10:50:35 -0500
commitfb601627e86990272ddf0fed21a73bdea2a64e45 (patch)
tree9b00729cc1bc9dbb00802f76671e2314cd61f73d
parentMerge branch 'master' into forge/fixes (diff)
parentfeat(agent/core): Allow zero-argument instantiation of `OpenAIProvider` (diff)
downloadAuto-GPT-fb601627e86990272ddf0fed21a73bdea2a64e45.tar.gz
Auto-GPT-fb601627e86990272ddf0fed21a73bdea2a64e45.tar.bz2
Auto-GPT-fb601627e86990272ddf0fed21a73bdea2a64e45.zip
Merge branch 'master' into forge/fixes
-rw-r--r--.github/workflows/pr-label.yml1
-rw-r--r--autogpts/autogpt/autogpt/agent_factory/profile_generator.py5
-rw-r--r--autogpts/autogpt/autogpt/agents/features/agent_file_manager.py24
-rw-r--r--autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py9
-rw-r--r--autogpts/autogpt/autogpt/app/main.py18
-rw-r--r--autogpts/autogpt/autogpt/config/config.py37
-rw-r--r--autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py5
-rw-r--r--autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py5
-rw-r--r--autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py5
-rw-r--r--autogpts/autogpt/autogpt/core/resource/model_providers/openai.py283
-rw-r--r--autogpts/autogpt/autogpt/core/resource/model_providers/schema.py5
-rw-r--r--autogpts/autogpt/autogpt/core/utils/json_schema.py20
-rw-r--r--autogpts/autogpt/autogpt/core/utils/json_utils.py19
-rw-r--r--autogpts/autogpt/autogpt/file_storage/base.py4
-rw-r--r--autogpts/autogpt/autogpt/file_storage/gcs.py15
-rw-r--r--autogpts/autogpt/autogpt/file_storage/local.py14
-rw-r--r--autogpts/autogpt/autogpt/file_storage/s3.py33
-rw-r--r--autogpts/autogpt/autogpt/logs/config.py7
-rw-r--r--autogpts/autogpt/tests/unit/test_gcs_file_storage.py21
-rw-r--r--autogpts/autogpt/tests/unit/test_local_file_storage.py21
-rw-r--r--autogpts/autogpt/tests/unit/test_s3_file_storage.py21
21 files changed, 393 insertions, 179 deletions
diff --git a/.github/workflows/pr-label.yml b/.github/workflows/pr-label.yml
index 68327d976..415637702 100644
--- a/.github/workflows/pr-label.yml
+++ b/.github/workflows/pr-label.yml
@@ -52,6 +52,7 @@ jobs:
l_label: 'size/l'
l_max_size: 500
xl_label: 'size/xl'
+ message_if_xl:
scope:
if: ${{ github.event_name == 'pull_request_target' }}
diff --git a/autogpts/autogpt/autogpt/agent_factory/profile_generator.py b/autogpts/autogpt/autogpt/agent_factory/profile_generator.py
index 889b7f2d4..78afbe51a 100644
--- a/autogpts/autogpt/autogpt/agent_factory/profile_generator.py
+++ b/autogpts/autogpt/autogpt/agent_factory/profile_generator.py
@@ -15,7 +15,6 @@ from autogpt.core.resource.model_providers.schema import (
CompletionModelFunction,
)
from autogpt.core.utils.json_schema import JSONSchema
-from autogpt.core.utils.json_utils import json_loads
logger = logging.getLogger(__name__)
@@ -203,9 +202,7 @@ class AgentProfileGenerator(PromptStrategy):
f"LLM did not call {self._create_agent_function.name} function; "
"agent profile creation failed"
)
- arguments: object = json_loads(
- response_content.tool_calls[0].function.arguments
- )
+ arguments: object = response_content.tool_calls[0].function.arguments
ai_profile = AIProfile(
ai_name=arguments.get("name"),
ai_role=arguments.get("description"),
diff --git a/autogpts/autogpt/autogpt/agents/features/agent_file_manager.py b/autogpts/autogpt/autogpt/agents/features/agent_file_manager.py
index 6ba2bec78..80257fbea 100644
--- a/autogpts/autogpt/autogpt/agents/features/agent_file_manager.py
+++ b/autogpts/autogpt/autogpt/agents/features/agent_file_manager.py
@@ -1,6 +1,7 @@
from __future__ import annotations
import logging
+from typing import Optional
from autogpt.file_storage.base import FileStorage
@@ -13,11 +14,11 @@ class AgentFileManagerMixin:
"""Mixin that adds file manager (e.g. Agent state)
and workspace manager (e.g. Agent output files) support."""
- files: FileStorage = None
+ files: FileStorage
"""Agent-related files, e.g. state, logs.
Use `workspace` to access the agent's workspace files."""
- workspace: FileStorage = None
+ workspace: FileStorage
"""Workspace that the agent has access to, e.g. for reading/writing files.
Use `files` to access agent-related files, e.g. state, logs."""
@@ -68,10 +69,25 @@ class AgentFileManagerMixin:
"""Get the agent's file operation logs as list of strings."""
return self._file_logs_cache
- async def save_state(self) -> None:
+ async def save_state(self, save_as: Optional[str] = None) -> None:
"""Save the agent's state to the state file."""
state: BaseAgentSettings = getattr(self, "state")
- await self.files.write_file(self.files.root / self.STATE_FILE, state.json())
+ if save_as:
+ temp_id = state.agent_id
+ state.agent_id = save_as
+ self._file_storage.make_dir(f"agents/{save_as}")
+ # Save state
+ await self._file_storage.write_file(
+ f"agents/{save_as}/{self.STATE_FILE}", state.json()
+ )
+ # Copy workspace
+ self._file_storage.copy(
+ f"agents/{temp_id}/workspace",
+ f"agents/{save_as}/workspace",
+ )
+ state.agent_id = temp_id
+ else:
+ await self.files.write_file(self.files.root / self.STATE_FILE, state.json())
def change_agent_id(self, new_id: str):
"""Change the agent's ID and update the file storage accordingly."""
diff --git a/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py b/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py
index 72916e1fa..994df6181 100644
--- a/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py
+++ b/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py
@@ -26,7 +26,7 @@ from autogpt.core.resource.model_providers.schema import (
CompletionModelFunction,
)
from autogpt.core.utils.json_schema import JSONSchema
-from autogpt.core.utils.json_utils import extract_dict_from_json, json_loads
+from autogpt.core.utils.json_utils import extract_dict_from_json
from autogpt.prompts.utils import format_numbered_list, indent
@@ -392,10 +392,7 @@ class OneShotAgentPromptStrategy(PromptStrategy):
f"{json.dumps(assistant_reply_dict, indent=4)}"
)
- _, errors = self.response_schema.validate_object(
- object=assistant_reply_dict,
- logger=self.logger,
- )
+ _, errors = self.response_schema.validate_object(assistant_reply_dict)
if errors:
raise InvalidAgentResponseError(
"Validation of response failed:\n "
@@ -439,7 +436,7 @@ def extract_command(
raise InvalidAgentResponseError("No 'tool_calls' in assistant reply")
assistant_reply_json["command"] = {
"name": assistant_reply.tool_calls[0].function.name,
- "args": json_loads(assistant_reply.tool_calls[0].function.arguments),
+ "args": assistant_reply.tool_calls[0].function.arguments,
}
try:
if not isinstance(assistant_reply_json, dict):
diff --git a/autogpts/autogpt/autogpt/app/main.py b/autogpts/autogpt/autogpt/app/main.py
index 04df5e1a6..6c428ca6e 100644
--- a/autogpts/autogpt/autogpt/app/main.py
+++ b/autogpts/autogpt/autogpt/app/main.py
@@ -345,19 +345,13 @@ async def run_auto_gpt(
logger.info(f"Saving state of {agent_id}...")
# Allow user to Save As other ID
- save_as_id = (
- clean_input(
- config,
- f"Press enter to save as '{agent_id}',"
- " or enter a different ID to save to:",
- )
- or agent_id
+ save_as_id = clean_input(
+ config,
+ f"Press enter to save as '{agent_id}',"
+ " or enter a different ID to save to:",
)
- if save_as_id and save_as_id != agent_id:
- agent.change_agent_id(save_as_id)
- # TODO: allow many-to-one relations of agents and workspaces
-
- await agent.save_state()
+ # TODO: allow many-to-one relations of agents and workspaces
+ await agent.save_state(save_as_id if not save_as_id.isspace() else None)
@coroutine
diff --git a/autogpts/autogpt/autogpt/config/config.py b/autogpts/autogpt/autogpt/config/config.py
index ed1e5f78c..040345f08 100644
--- a/autogpts/autogpt/autogpt/config/config.py
+++ b/autogpts/autogpt/autogpt/config/config.py
@@ -63,10 +63,7 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
# File storage
file_storage_backend: FileStorageBackendName = UserConfigurable(
- default=FileStorageBackendName.LOCAL,
- from_env=lambda: FileStorageBackendName(v)
- if (v := os.getenv("FILE_STORAGE_BACKEND"))
- else None,
+ default=FileStorageBackendName.LOCAL, from_env="FILE_STORAGE_BACKEND"
)
##########################
@@ -74,27 +71,23 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
##########################
# Paths
ai_settings_file: Path = UserConfigurable(
- default=AI_SETTINGS_FILE,
- from_env=lambda: Path(f) if (f := os.getenv("AI_SETTINGS_FILE")) else None,
+ default=AI_SETTINGS_FILE, from_env="AI_SETTINGS_FILE"
)
prompt_settings_file: Path = UserConfigurable(
default=PROMPT_SETTINGS_FILE,
- from_env=lambda: Path(f) if (f := os.getenv("PROMPT_SETTINGS_FILE")) else None,
+ from_env="PROMPT_SETTINGS_FILE",
)
# Model configuration
fast_llm: str = UserConfigurable(
default="gpt-3.5-turbo-0125",
- from_env=lambda: os.getenv("FAST_LLM"),
+ from_env="FAST_LLM",
)
smart_llm: str = UserConfigurable(
default="gpt-4-turbo-preview",
- from_env=lambda: os.getenv("SMART_LLM"),
- )
- temperature: float = UserConfigurable(
- default=0,
- from_env=lambda: float(v) if (v := os.getenv("TEMPERATURE")) else None,
+ from_env="SMART_LLM",
)
+ temperature: float = UserConfigurable(default=0, from_env="TEMPERATURE")
openai_functions: bool = UserConfigurable(
default=False, from_env=lambda: os.getenv("OPENAI_FUNCTIONS", "False") == "True"
)
@@ -115,10 +108,7 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
memory_backend: str = UserConfigurable("json_file", from_env="MEMORY_BACKEND")
memory_index: str = UserConfigurable("auto-gpt-memory", from_env="MEMORY_INDEX")
redis_host: str = UserConfigurable("localhost", from_env="REDIS_HOST")
- redis_port: int = UserConfigurable(
- default=6379,
- from_env=lambda: int(v) if (v := os.getenv("REDIS_PORT")) else None,
- )
+ redis_port: int = UserConfigurable(default=6379, from_env="REDIS_PORT")
redis_password: str = UserConfigurable("", from_env="REDIS_PASSWORD")
wipe_redis_on_start: bool = UserConfigurable(
default=True,
@@ -170,10 +160,7 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
sd_webui_url: Optional[str] = UserConfigurable(
default="http://localhost:7860", from_env="SD_WEBUI_URL"
)
- image_size: int = UserConfigurable(
- default=256,
- from_env=lambda: int(v) if (v := os.getenv("IMAGE_SIZE")) else None,
- )
+ image_size: int = UserConfigurable(default=256, from_env="IMAGE_SIZE")
# Audio to text
audio_to_text_provider: str = UserConfigurable(
@@ -198,8 +185,7 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
###################
plugins_dir: str = UserConfigurable("plugins", from_env="PLUGINS_DIR")
plugins_config_file: Path = UserConfigurable(
- default=PLUGINS_CONFIG_FILE,
- from_env=lambda: Path(f) if (f := os.getenv("PLUGINS_CONFIG_FILE")) else None,
+ default=PLUGINS_CONFIG_FILE, from_env="PLUGINS_CONFIG_FILE"
)
plugins_config: PluginsConfig = Field(
default_factory=lambda: PluginsConfig(plugins={})
@@ -223,8 +209,7 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
# OpenAI
openai_credentials: Optional[OpenAICredentials] = None
azure_config_file: Optional[Path] = UserConfigurable(
- default=AZURE_CONFIG_FILE,
- from_env=lambda: Path(f) if (f := os.getenv("AZURE_CONFIG_FILE")) else None,
+ default=AZURE_CONFIG_FILE, from_env="AZURE_CONFIG_FILE"
)
# Github
@@ -234,7 +219,7 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
# Google
google_api_key: Optional[str] = UserConfigurable(from_env="GOOGLE_API_KEY")
google_custom_search_engine_id: Optional[str] = UserConfigurable(
- from_env=lambda: os.getenv("GOOGLE_CUSTOM_SEARCH_ENGINE_ID"),
+ from_env="GOOGLE_CUSTOM_SEARCH_ENGINE_ID",
)
# Huggingface
diff --git a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py
index d26d86fd6..ae137a985 100644
--- a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py
+++ b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py
@@ -11,7 +11,6 @@ from autogpt.core.resource.model_providers import (
CompletionModelFunction,
)
from autogpt.core.utils.json_schema import JSONSchema
-from autogpt.core.utils.json_utils import json_loads
logger = logging.getLogger(__name__)
@@ -195,9 +194,7 @@ class InitialPlan(PromptStrategy):
f"LLM did not call {self._create_plan_function.name} function; "
"plan creation failed"
)
- parsed_response: object = json_loads(
- response_content.tool_calls[0].function.arguments
- )
+ parsed_response: object = response_content.tool_calls[0].function.arguments
parsed_response["task_list"] = [
Task.parse_obj(task) for task in parsed_response["task_list"]
]
diff --git a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py
index d030c05e1..133b4590d 100644
--- a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py
+++ b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py
@@ -9,7 +9,6 @@ from autogpt.core.resource.model_providers import (
CompletionModelFunction,
)
from autogpt.core.utils.json_schema import JSONSchema
-from autogpt.core.utils.json_utils import json_loads
logger = logging.getLogger(__name__)
@@ -141,9 +140,7 @@ class NameAndGoals(PromptStrategy):
f"LLM did not call {self._create_agent_function} function; "
"agent profile creation failed"
)
- parsed_response = json_loads(
- response_content.tool_calls[0].function.arguments
- )
+ parsed_response = response_content.tool_calls[0].function.arguments
except KeyError:
logger.debug(f"Failed to parse this response content: {response_content}")
raise
diff --git a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py
index dec67c295..0d6daad2e 100644
--- a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py
+++ b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py
@@ -11,7 +11,6 @@ from autogpt.core.resource.model_providers import (
CompletionModelFunction,
)
from autogpt.core.utils.json_schema import JSONSchema
-from autogpt.core.utils.json_utils import json_loads
logger = logging.getLogger(__name__)
@@ -188,9 +187,7 @@ class NextAbility(PromptStrategy):
raise ValueError("LLM did not call any function")
function_name = response_content.tool_calls[0].function.name
- function_arguments = json_loads(
- response_content.tool_calls[0].function.arguments
- )
+ function_arguments = response_content.tool_calls[0].function.arguments
parsed_response = {
"motivation": function_arguments.pop("motivation"),
"self_criticism": function_arguments.pop("self_criticism"),
diff --git a/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py b/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py
index 69bfffb30..b974e6e03 100644
--- a/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py
+++ b/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py
@@ -3,7 +3,7 @@ import logging
import math
import os
from pathlib import Path
-from typing import Callable, Coroutine, Iterator, Optional, ParamSpec, TypeVar
+from typing import Any, Callable, Coroutine, Iterator, Optional, ParamSpec, TypeVar
import sentry_sdk
import tenacity
@@ -11,12 +11,17 @@ import tiktoken
import yaml
from openai._exceptions import APIStatusError, RateLimitError
from openai.types import CreateEmbeddingResponse
-from openai.types.chat import ChatCompletion
+from openai.types.chat import (
+ ChatCompletion,
+ ChatCompletionMessage,
+ ChatCompletionMessageParam,
+)
from pydantic import SecretStr
from autogpt.core.configuration import Configurable, UserConfigurable
from autogpt.core.resource.model_providers.schema import (
AssistantChatMessage,
+ AssistantFunctionCall,
AssistantToolCall,
AssistantToolCallDict,
ChatMessage,
@@ -314,15 +319,20 @@ class OpenAIProvider(
_budget: ModelProviderBudget
_configuration: OpenAIConfiguration
+ _credentials: OpenAICredentials
def __init__(
self,
- settings: OpenAISettings,
- logger: logging.Logger,
+ settings: Optional[OpenAISettings] = None,
+ logger: Optional[logging.Logger] = None,
):
+ if not settings:
+ settings = self.default_settings.copy(deep=True)
+ if not settings.credentials:
+ settings.credentials = OpenAICredentials.from_env()
+
self._settings = settings
- assert settings.credentials, "Cannot create OpenAIProvider without credentials"
self._configuration = settings.configuration
self._credentials = settings.credentials
self._budget = settings.budget
@@ -338,7 +348,7 @@ class OpenAIProvider(
self._client = AsyncOpenAI(**self._credentials.get_api_access_kwargs())
- self._logger = logger
+ self._logger = logger or logging.getLogger(__name__)
def get_token_limit(self, model_name: str) -> int:
"""Get the token limit for a given model."""
@@ -406,80 +416,90 @@ class OpenAIProvider(
) -> ChatModelResponse[_T]:
"""Create a completion using the OpenAI API."""
- completion_kwargs = self._get_completion_kwargs(model_name, functions, **kwargs)
- tool_calls_compat_mode = functions and "tools" not in completion_kwargs
- if "messages" in completion_kwargs:
- model_prompt += completion_kwargs["messages"]
- del completion_kwargs["messages"]
+ openai_messages, completion_kwargs = self._get_chat_completion_args(
+ model_prompt, model_name, functions, **kwargs
+ )
+ tool_calls_compat_mode = bool(functions and "tools" not in completion_kwargs)
- cost = 0.0
+ total_cost = 0.0
attempts = 0
while True:
- _response = await self._create_chat_completion(
- messages=model_prompt,
+ _response, _cost, t_input, t_output = await self._create_chat_completion(
+ messages=openai_messages,
**completion_kwargs,
)
+ total_cost += _cost
+
+ # If parsing the response fails, append the error to the prompt, and let the
+ # LLM fix its mistake(s).
+ attempts += 1
+ parse_errors: list[Exception] = []
_assistant_msg = _response.choices[0].message
+
+ tool_calls, _errors = self._parse_assistant_tool_calls(
+ _assistant_msg, tool_calls_compat_mode
+ )
+ parse_errors += _errors
+
assistant_msg = AssistantChatMessage(
content=_assistant_msg.content,
- tool_calls=(
- [AssistantToolCall(**tc.dict()) for tc in _assistant_msg.tool_calls]
- if _assistant_msg.tool_calls
- else None
- ),
- )
- response = ChatModelResponse(
- response=assistant_msg,
- model_info=OPEN_AI_CHAT_MODELS[model_name],
- prompt_tokens_used=(
- _response.usage.prompt_tokens if _response.usage else 0
- ),
- completion_tokens_used=(
- _response.usage.completion_tokens if _response.usage else 0
- ),
- )
- cost += self._budget.update_usage_and_cost(response)
- self._logger.debug(
- f"Completion usage: {response.prompt_tokens_used} input, "
- f"{response.completion_tokens_used} output - ${round(cost, 5)}"
+ tool_calls=tool_calls or None,
)
- # If parsing the response fails, append the error to the prompt, and let the
- # LLM fix its mistake(s).
- try:
- attempts += 1
-
- if (
- tool_calls_compat_mode
- and assistant_msg.content
- and not assistant_msg.tool_calls
- ):
- assistant_msg.tool_calls = list(
- _tool_calls_compat_extract_calls(assistant_msg.content)
+ parsed_result: _T = None # type: ignore
+ if not parse_errors:
+ try:
+ parsed_result = completion_parser(assistant_msg)
+ except Exception as e:
+ parse_errors.append(e)
+
+ if not parse_errors:
+ if attempts > 1:
+ self._logger.debug(
+ f"Total cost for {attempts} attempts: ${round(total_cost, 5)}"
)
- response.parsed_result = completion_parser(assistant_msg)
- break
- except Exception as e:
- self._logger.warning(f"Parsing attempt #{attempts} failed: {e}")
- self._logger.debug(f"Parsing failed on response: '''{assistant_msg}'''")
- sentry_sdk.capture_exception(
- error=e,
- extras={"assistant_msg": assistant_msg, "i_attempt": attempts},
+ return ChatModelResponse(
+ response=AssistantChatMessage(
+ content=_assistant_msg.content,
+ tool_calls=tool_calls or None,
+ ),
+ parsed_result=parsed_result,
+ model_info=OPEN_AI_CHAT_MODELS[model_name],
+ prompt_tokens_used=t_input,
+ completion_tokens_used=t_output,
+ )
+
+ else:
+ self._logger.debug(
+ f"Parsing failed on response: '''{_assistant_msg}'''"
+ )
+ self._logger.warning(
+ f"Parsing attempt #{attempts} failed: {parse_errors}"
)
+ for e in parse_errors:
+ sentry_sdk.capture_exception(
+ error=e,
+ extras={"assistant_msg": _assistant_msg, "i_attempt": attempts},
+ )
+
if attempts < self._configuration.fix_failed_parse_tries:
- model_prompt.append(assistant_msg)
- model_prompt.append(
- ChatMessage.system(f"ERROR PARSING YOUR RESPONSE:\n\n{e}")
+ openai_messages.append(_assistant_msg.dict(exclude_none=True))
+ openai_messages.append(
+ {
+ "role": "system",
+ "content": (
+ "ERROR PARSING YOUR RESPONSE:\n\n"
+ + "\n\n".join(
+ f"{e.__class__.__name__}: {e}" for e in parse_errors
+ )
+ ),
+ }
)
+ continue
else:
- raise
-
- if attempts > 1:
- self._logger.debug(f"Total cost for {attempts} attempts: ${round(cost, 5)}")
-
- return response
+ raise parse_errors[0]
async def create_embedding(
self,
@@ -501,21 +521,24 @@ class OpenAIProvider(
self._budget.update_usage_and_cost(response)
return response
- def _get_completion_kwargs(
+ def _get_chat_completion_args(
self,
+ model_prompt: list[ChatMessage],
model_name: OpenAIModelName,
functions: Optional[list[CompletionModelFunction]] = None,
**kwargs,
- ) -> dict:
- """Get kwargs for completion API call.
+ ) -> tuple[list[ChatCompletionMessageParam], dict[str, Any]]:
+ """Prepare chat completion arguments and keyword arguments for API call.
Args:
- model: The model to use.
- kwargs: Keyword arguments to override the default values.
+ model_prompt: List of ChatMessages.
+ model_name: The model to use.
+ functions: Optional list of functions available to the LLM.
+ kwargs: Additional keyword arguments.
Returns:
- The kwargs for the chat API call.
-
+ list[ChatCompletionMessageParam]: Prompt messages for the OpenAI call
+ dict[str, Any]: Any other kwargs for the OpenAI call
"""
kwargs.update(self._credentials.get_model_access_kwargs(model_name))
@@ -538,7 +561,19 @@ class OpenAIProvider(
kwargs["extra_headers"] = kwargs.get("extra_headers", {})
kwargs["extra_headers"].update(extra_headers.copy())
- return kwargs
+ if "messages" in kwargs:
+ model_prompt += kwargs["messages"]
+ del kwargs["messages"]
+
+ openai_messages: list[ChatCompletionMessageParam] = [
+ message.dict(
+ include={"role", "content", "tool_calls", "name"},
+ exclude_none=True,
+ )
+ for message in model_prompt
+ ]
+
+ return openai_messages, kwargs
def _get_embedding_kwargs(
self,
@@ -563,28 +598,106 @@ class OpenAIProvider(
return kwargs
- def _create_chat_completion(
- self, messages: list[ChatMessage], *_, **kwargs
- ) -> Coroutine[None, None, ChatCompletion]:
- """Create a chat completion using the OpenAI API with retry handling."""
+ async def _create_chat_completion(
+ self,
+ messages: list[ChatCompletionMessageParam],
+ model: OpenAIModelName,
+ *_,
+ **kwargs,
+ ) -> tuple[ChatCompletion, float, int, int]:
+ """
+ Create a chat completion using the OpenAI API with retry handling.
+
+ Params:
+ openai_messages: List of OpenAI-consumable message dict objects
+ model: The model to use for the completion
+
+ Returns:
+ ChatCompletion: The chat completion response object
+ float: The cost ($) of this completion
+ int: Number of prompt tokens used
+ int: Number of completion tokens used
+ """
@self._retry_api_request
async def _create_chat_completion_with_retry(
- messages: list[ChatMessage], *_, **kwargs
+ messages: list[ChatCompletionMessageParam], **kwargs
) -> ChatCompletion:
- raw_messages = [
- message.dict(
- include={"role", "content", "tool_calls", "name"},
- exclude_none=True,
- )
- for message in messages
- ]
return await self._client.chat.completions.create(
- messages=raw_messages, # type: ignore
+ messages=messages, # type: ignore
**kwargs,
)
- return _create_chat_completion_with_retry(messages, *_, **kwargs)
+ completion = await _create_chat_completion_with_retry(
+ messages, model=model, **kwargs
+ )
+
+ if completion.usage:
+ prompt_tokens_used = completion.usage.prompt_tokens
+ completion_tokens_used = completion.usage.completion_tokens
+ else:
+ prompt_tokens_used = completion_tokens_used = 0
+
+ cost = self._budget.update_usage_and_cost(
+ ChatModelResponse(
+ response=AssistantChatMessage(content=None),
+ model_info=OPEN_AI_CHAT_MODELS[model],
+ prompt_tokens_used=prompt_tokens_used,
+ completion_tokens_used=completion_tokens_used,
+ )
+ )
+ self._logger.debug(
+ f"Completion usage: {prompt_tokens_used} input, "
+ f"{completion_tokens_used} output - ${round(cost, 5)}"
+ )
+ return completion, cost, prompt_tokens_used, completion_tokens_used
+
+ def _parse_assistant_tool_calls(
+ self, assistant_message: ChatCompletionMessage, compat_mode: bool = False
+ ):
+ tool_calls: list[AssistantToolCall] = []
+ parse_errors: list[Exception] = []
+
+ if assistant_message.tool_calls:
+ for _tc in assistant_message.tool_calls:
+ try:
+ parsed_arguments = json_loads(_tc.function.arguments)
+ except Exception as e:
+ err_message = (
+ f"Decoding arguments for {_tc.function.name} failed: "
+ + str(e.args[0])
+ )
+ parse_errors.append(
+ type(e)(err_message, *e.args[1:]).with_traceback(
+ e.__traceback__
+ )
+ )
+ continue
+
+ tool_calls.append(
+ AssistantToolCall(
+ id=_tc.id,
+ type=_tc.type,
+ function=AssistantFunctionCall(
+ name=_tc.function.name,
+ arguments=parsed_arguments,
+ ),
+ )
+ )
+
+ # If parsing of all tool calls succeeds in the end, we ignore any issues
+ if len(tool_calls) == len(assistant_message.tool_calls):
+ parse_errors = []
+
+ elif compat_mode and assistant_message.content:
+ try:
+ tool_calls = list(
+ _tool_calls_compat_extract_calls(assistant_message.content)
+ )
+ except Exception as e:
+ parse_errors.append(e)
+
+ return tool_calls, parse_errors
def _create_embedding(
self, text: str, *_, **kwargs
diff --git a/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py b/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py
index 43d4bd296..cc0030995 100644
--- a/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py
+++ b/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py
@@ -2,6 +2,7 @@ import abc
import enum
import math
from typing import (
+ Any,
Callable,
ClassVar,
Generic,
@@ -68,12 +69,12 @@ class ChatMessageDict(TypedDict):
class AssistantFunctionCall(BaseModel):
name: str
- arguments: str
+ arguments: dict[str, Any]
class AssistantFunctionCallDict(TypedDict):
name: str
- arguments: str
+ arguments: dict[str, Any]
class AssistantToolCall(BaseModel):
diff --git a/autogpts/autogpt/autogpt/core/utils/json_schema.py b/autogpts/autogpt/autogpt/core/utils/json_schema.py
index c9f9026e0..d72b509dd 100644
--- a/autogpts/autogpt/autogpt/core/utils/json_schema.py
+++ b/autogpts/autogpt/autogpt/core/utils/json_schema.py
@@ -1,9 +1,8 @@
import enum
-from logging import Logger
from textwrap import indent
-from typing import Literal, Optional
+from typing import Optional
-from jsonschema import Draft7Validator
+from jsonschema import Draft7Validator, ValidationError
from pydantic import BaseModel
@@ -84,27 +83,24 @@ class JSONSchema(BaseModel):
v.required = k in schema_node["required"]
return properties
- def validate_object(
- self, object: object, logger: Logger
- ) -> tuple[Literal[True], None] | tuple[Literal[False], list]:
+ def validate_object(self, object: object) -> tuple[bool, list[ValidationError]]:
"""
- Validates a dictionary object against the JSONSchema.
+ Validates an object or a value against the JSONSchema.
Params:
- object: The dictionary object to validate.
+ object: The value/object to validate.
schema (JSONSchema): The JSONSchema to validate against.
Returns:
- tuple: A tuple where the first element is a boolean indicating whether the
- object is valid or not, and the second element is a list of errors found
- in the object, or None if the object is valid.
+ bool: Indicates whether the given value or object is valid for the schema.
+ list[ValidationError]: The issues with the value or object (if any).
"""
validator = Draft7Validator(self.to_dict())
if errors := sorted(validator.iter_errors(object), key=lambda e: e.path):
return False, errors
- return True, None
+ return True, []
def to_typescript_object_interface(self, interface_name: str = "") -> str:
if self.type != JSONSchema.Type.OBJECT:
diff --git a/autogpts/autogpt/autogpt/core/utils/json_utils.py b/autogpts/autogpt/autogpt/core/utils/json_utils.py
index 664cb87c1..0374a85c1 100644
--- a/autogpts/autogpt/autogpt/core/utils/json_utils.py
+++ b/autogpts/autogpt/autogpt/core/utils/json_utils.py
@@ -1,4 +1,3 @@
-import io
import logging
import re
from typing import Any
@@ -32,16 +31,18 @@ def json_loads(json_str: str) -> Any:
if match:
json_str = match.group(1).strip()
- error_buffer = io.StringIO()
- json_result = demjson3.decode(
- json_str, return_errors=True, write_errors=error_buffer
- )
+ json_result = demjson3.decode(json_str, return_errors=True)
+ assert json_result is not None # by virtue of return_errors=True
- if error_buffer.getvalue():
- logger.debug(f"JSON parse errors:\n{error_buffer.getvalue()}")
+ if json_result.errors:
+ logger.debug(
+ "JSON parse errors:\n" + "\n".join(str(e) for e in json_result.errors)
+ )
- if json_result is None:
- raise ValueError(f"Failed to parse JSON string: {json_str}")
+ if json_result.object is demjson3.undefined:
+ raise ValueError(
+ f"Failed to parse JSON string: {json_str}", *json_result.errors
+ )
return json_result.object
diff --git a/autogpts/autogpt/autogpt/file_storage/base.py b/autogpts/autogpt/autogpt/file_storage/base.py
index 7db9569dd..62521bb4a 100644
--- a/autogpts/autogpt/autogpt/file_storage/base.py
+++ b/autogpts/autogpt/autogpt/file_storage/base.py
@@ -128,6 +128,10 @@ class FileStorage(ABC):
"""Rename a file or folder in the storage."""
@abstractmethod
+ def copy(self, source: str | Path, destination: str | Path) -> None:
+ """Copy a file or folder with all contents in the storage."""
+
+ @abstractmethod
def make_dir(self, path: str | Path) -> None:
"""Create a directory in the storage if doesn't exist."""
diff --git a/autogpts/autogpt/autogpt/file_storage/gcs.py b/autogpts/autogpt/autogpt/file_storage/gcs.py
index 0d6cbc4f2..45545d449 100644
--- a/autogpts/autogpt/autogpt/file_storage/gcs.py
+++ b/autogpts/autogpt/autogpt/file_storage/gcs.py
@@ -182,6 +182,21 @@ class GCSFileStorage(FileStorage):
new_name = str(blob.name).replace(str(old_path), str(new_path), 1)
self._bucket.rename_blob(blob, new_name=new_name)
+ def copy(self, source: str | Path, destination: str | Path) -> None:
+ """Copy a file or folder with all contents in the storage."""
+ source = self.get_path(source)
+ destination = self.get_path(destination)
+ # If the source is a file, copy it
+ if self._bucket.blob(str(source)).exists():
+ self._bucket.copy_blob(
+ self._bucket.blob(str(source)), self._bucket, str(destination)
+ )
+ return
+ # Otherwise, copy all blobs with the prefix (folder)
+ for blob in self._bucket.list_blobs(prefix=f"{source}/"):
+ new_name = str(blob.name).replace(str(source), str(destination), 1)
+ self._bucket.copy_blob(blob, self._bucket, new_name)
+
def clone_with_subroot(self, subroot: str | Path) -> GCSFileStorage:
"""Create a new GCSFileStorage with a subroot of the current storage."""
file_storage = GCSFileStorage(
diff --git a/autogpts/autogpt/autogpt/file_storage/local.py b/autogpts/autogpt/autogpt/file_storage/local.py
index fa6df2619..3a52bd572 100644
--- a/autogpts/autogpt/autogpt/file_storage/local.py
+++ b/autogpts/autogpt/autogpt/file_storage/local.py
@@ -115,6 +115,20 @@ class LocalFileStorage(FileStorage):
new_path = self.get_path(new_path)
old_path.rename(new_path)
+ def copy(self, source: str | Path, destination: str | Path) -> None:
+ """Copy a file or folder with all contents in the storage."""
+ source = self.get_path(source)
+ destination = self.get_path(destination)
+ if source.is_file():
+ destination.write_bytes(source.read_bytes())
+ else:
+ destination.mkdir(exist_ok=True, parents=True)
+ for file in source.rglob("*"):
+ if file.is_file():
+ target = destination / file.relative_to(source)
+ target.parent.mkdir(exist_ok=True, parents=True)
+ target.write_bytes(file.read_bytes())
+
def clone_with_subroot(self, subroot: str | Path) -> FileStorage:
"""Create a new LocalFileStorage with a subroot of the current storage."""
return LocalFileStorage(
diff --git a/autogpts/autogpt/autogpt/file_storage/s3.py b/autogpts/autogpt/autogpt/file_storage/s3.py
index 5d4bf8dc5..7c69d9d1e 100644
--- a/autogpts/autogpt/autogpt/file_storage/s3.py
+++ b/autogpts/autogpt/autogpt/file_storage/s3.py
@@ -29,9 +29,7 @@ logger = logging.getLogger(__name__)
class S3FileStorageConfiguration(FileStorageConfiguration):
bucket: str = UserConfigurable("autogpt", from_env="STORAGE_BUCKET")
- s3_endpoint_url: Optional[SecretStr] = UserConfigurable(
- from_env=lambda: SecretStr(v) if (v := os.getenv("S3_ENDPOINT_URL")) else None
- )
+ s3_endpoint_url: Optional[SecretStr] = UserConfigurable(from_env="S3_ENDPOINT_URL")
class S3FileStorage(FileStorage):
@@ -222,6 +220,35 @@ class S3FileStorage(FileStorage):
else:
raise # Re-raise for any other client errors
+ def copy(self, source: str | Path, destination: str | Path) -> None:
+ """Copy a file or folder with all contents in the storage."""
+ source = str(self.get_path(source))
+ destination = str(self.get_path(destination))
+
+ try:
+ # If source is a file, copy it
+ self._s3.meta.client.head_object(Bucket=self._bucket_name, Key=source)
+ self._s3.meta.client.copy_object(
+ CopySource={"Bucket": self._bucket_name, "Key": source},
+ Bucket=self._bucket_name,
+ Key=destination,
+ )
+ except botocore.exceptions.ClientError as e:
+ if int(e.response["ResponseMetadata"]["HTTPStatusCode"]) == 404:
+ # If the object does not exist,
+ # it may be a folder
+ prefix = f"{source.rstrip('/')}/"
+ objs = list(self._bucket.objects.filter(Prefix=prefix))
+ for obj in objs:
+ new_key = destination + obj.key[len(source) :]
+ self._s3.meta.client.copy_object(
+ CopySource={"Bucket": self._bucket_name, "Key": obj.key},
+ Bucket=self._bucket_name,
+ Key=new_key,
+ )
+ else:
+ raise
+
def clone_with_subroot(self, subroot: str | Path) -> S3FileStorage:
"""Create a new S3FileStorage with a subroot of the current storage."""
file_storage = S3FileStorage(
diff --git a/autogpts/autogpt/autogpt/logs/config.py b/autogpts/autogpt/autogpt/logs/config.py
index ae483d0f8..437f68a8a 100644
--- a/autogpts/autogpt/autogpt/logs/config.py
+++ b/autogpts/autogpt/autogpt/logs/config.py
@@ -57,8 +57,7 @@ class LoggingConfig(SystemConfiguration):
# Console output
log_format: LogFormatName = UserConfigurable(
- default=LogFormatName.SIMPLE,
- from_env=lambda: LogFormatName(os.getenv("LOG_FORMAT", "simple")),
+ default=LogFormatName.SIMPLE, from_env="LOG_FORMAT"
)
plain_console_output: bool = UserConfigurable(
default=False,
@@ -69,8 +68,8 @@ class LoggingConfig(SystemConfiguration):
log_dir: Path = LOG_DIR
log_file_format: Optional[LogFormatName] = UserConfigurable(
default=LogFormatName.SIMPLE,
- from_env=lambda: LogFormatName(
- os.getenv("LOG_FILE_FORMAT", os.getenv("LOG_FORMAT", "simple"))
+ from_env=lambda: os.getenv(
+ "LOG_FILE_FORMAT", os.getenv("LOG_FORMAT", "simple")
),
)
diff --git a/autogpts/autogpt/tests/unit/test_gcs_file_storage.py b/autogpts/autogpt/tests/unit/test_gcs_file_storage.py
index f1348b62d..a9dcd0103 100644
--- a/autogpts/autogpt/tests/unit/test_gcs_file_storage.py
+++ b/autogpts/autogpt/tests/unit/test_gcs_file_storage.py
@@ -177,3 +177,24 @@ def test_clone(gcs_storage_with_files: GCSFileStorage, gcs_root: Path):
assert cloned._bucket.name == gcs_storage_with_files._bucket.name
assert cloned.exists("dir")
assert cloned.exists("dir/test_file_4")
+
+
+@pytest.mark.asyncio
+async def test_copy_file(storage: GCSFileStorage):
+ await storage.write_file("test_file.txt", "test content")
+ storage.copy("test_file.txt", "test_file_copy.txt")
+ storage.make_dir("dir")
+ storage.copy("test_file.txt", "dir/test_file_copy.txt")
+ assert storage.read_file("test_file_copy.txt") == "test content"
+ assert storage.read_file("dir/test_file_copy.txt") == "test content"
+
+
+@pytest.mark.asyncio
+async def test_copy_dir(storage: GCSFileStorage):
+ storage.make_dir("dir")
+ storage.make_dir("dir/sub_dir")
+ await storage.write_file("dir/test_file.txt", "test content")
+ await storage.write_file("dir/sub_dir/test_file.txt", "test content")
+ storage.copy("dir", "dir_copy")
+ assert storage.read_file("dir_copy/test_file.txt") == "test content"
+ assert storage.read_file("dir_copy/sub_dir/test_file.txt") == "test content"
diff --git a/autogpts/autogpt/tests/unit/test_local_file_storage.py b/autogpts/autogpt/tests/unit/test_local_file_storage.py
index 5afcff01f..971a2e421 100644
--- a/autogpts/autogpt/tests/unit/test_local_file_storage.py
+++ b/autogpts/autogpt/tests/unit/test_local_file_storage.py
@@ -188,3 +188,24 @@ def test_get_path_accessible(accessible_path: Path, storage: LocalFileStorage):
def test_get_path_inaccessible(inaccessible_path: Path, storage: LocalFileStorage):
with pytest.raises(ValueError):
storage.get_path(inaccessible_path)
+
+
+@pytest.mark.asyncio
+async def test_copy_file(storage: LocalFileStorage):
+ await storage.write_file("test_file.txt", "test content")
+ storage.copy("test_file.txt", "test_file_copy.txt")
+ storage.make_dir("dir")
+ storage.copy("test_file.txt", "dir/test_file_copy.txt")
+ assert storage.read_file("test_file_copy.txt") == "test content"
+ assert storage.read_file("dir/test_file_copy.txt") == "test content"
+
+
+@pytest.mark.asyncio
+async def test_copy_dir(storage: LocalFileStorage):
+ storage.make_dir("dir")
+ storage.make_dir("dir/sub_dir")
+ await storage.write_file("dir/test_file.txt", "test content")
+ await storage.write_file("dir/sub_dir/test_file.txt", "test content")
+ storage.copy("dir", "dir_copy")
+ assert storage.read_file("dir_copy/test_file.txt") == "test content"
+ assert storage.read_file("dir_copy/sub_dir/test_file.txt") == "test content"
diff --git a/autogpts/autogpt/tests/unit/test_s3_file_storage.py b/autogpts/autogpt/tests/unit/test_s3_file_storage.py
index 7002cd972..82bd5428c 100644
--- a/autogpts/autogpt/tests/unit/test_s3_file_storage.py
+++ b/autogpts/autogpt/tests/unit/test_s3_file_storage.py
@@ -172,3 +172,24 @@ def test_clone(s3_storage_with_files: S3FileStorage, s3_root: Path):
assert cloned._bucket.name == s3_storage_with_files._bucket.name
assert cloned.exists("dir")
assert cloned.exists("dir/test_file_4")
+
+
+@pytest.mark.asyncio
+async def test_copy_file(storage: S3FileStorage):
+ await storage.write_file("test_file.txt", "test content")
+ storage.copy("test_file.txt", "test_file_copy.txt")
+ storage.make_dir("dir")
+ storage.copy("test_file.txt", "dir/test_file_copy.txt")
+ assert storage.read_file("test_file_copy.txt") == "test content"
+ assert storage.read_file("dir/test_file_copy.txt") == "test content"
+
+
+@pytest.mark.asyncio
+async def test_copy_dir(storage: S3FileStorage):
+ storage.make_dir("dir")
+ storage.make_dir("dir/sub_dir")
+ await storage.write_file("dir/test_file.txt", "test content")
+ await storage.write_file("dir/sub_dir/test_file.txt", "test content")
+ storage.copy("dir", "dir_copy")
+ assert storage.read_file("dir_copy/test_file.txt") == "test content"
+ assert storage.read_file("dir_copy/sub_dir/test_file.txt") == "test content"