aboutsummaryrefslogtreecommitdiff
path: root/autogpts/autogpt/autogpt/agents/agent.py
diff options
context:
space:
mode:
Diffstat (limited to 'autogpts/autogpt/autogpt/agents/agent.py')
-rw-r--r--autogpts/autogpt/autogpt/agents/agent.py27
1 files changed, 19 insertions, 8 deletions
diff --git a/autogpts/autogpt/autogpt/agents/agent.py b/autogpts/autogpt/autogpt/agents/agent.py
index 13275a4e8..8847ae7f2 100644
--- a/autogpts/autogpt/autogpt/agents/agent.py
+++ b/autogpts/autogpt/autogpt/agents/agent.py
@@ -15,9 +15,9 @@ from pydantic import Field
from autogpt.core.configuration import Configurable
from autogpt.core.prompting import ChatPrompt
from autogpt.core.resource.model_providers import (
+ AssistantChatMessage,
ChatMessage,
ChatModelProvider,
- ChatModelResponse,
)
from autogpt.llm.api_manager import ApiManager
from autogpt.logs.log_cycle import (
@@ -26,6 +26,7 @@ from autogpt.logs.log_cycle import (
USER_INPUT_FILE_NAME,
LogCycleHandler,
)
+from autogpt.logs.utils import fmt_kwargs
from autogpt.models.action_history import (
Action,
ActionErrorResult,
@@ -44,7 +45,12 @@ from .prompt_strategies.one_shot import (
OneShotAgentPromptConfiguration,
OneShotAgentPromptStrategy,
)
-from .utils.exceptions import AgentException, CommandExecutionError, UnknownCommandError
+from .utils.exceptions import (
+ AgentException,
+ AgentTerminated,
+ CommandExecutionError,
+ UnknownCommandError,
+)
logger = logging.getLogger(__name__)
@@ -76,6 +82,8 @@ class Agent(
description=__doc__,
)
+ prompt_strategy: OneShotAgentPromptStrategy
+
def __init__(
self,
settings: AgentSettings,
@@ -164,20 +172,18 @@ class Agent(
return prompt
def parse_and_process_response(
- self, llm_response: ChatModelResponse, *args, **kwargs
+ self, llm_response: AssistantChatMessage, *args, **kwargs
) -> Agent.ThoughtProcessOutput:
for plugin in self.config.plugins:
if not plugin.can_handle_post_planning():
continue
- llm_response.response["content"] = plugin.post_planning(
- llm_response.response.get("content", "")
- )
+ llm_response.content = plugin.post_planning(llm_response.content or "")
(
command_name,
arguments,
assistant_reply_dict,
- ) = self.prompt_strategy.parse_response_content(llm_response.response)
+ ) = self.prompt_strategy.parse_response_content(llm_response)
self.log_cycle_handler.log_cycle(
self.ai_profile.ai_name,
@@ -232,7 +238,7 @@ class Agent(
)
# Intercept ContextItem if one is returned by the command
- if type(return_value) == tuple and isinstance(
+ if type(return_value) is tuple and isinstance(
return_value[1], ContextItem
):
context_item = return_value[1]
@@ -243,8 +249,13 @@ class Agent(
self.context.add(context_item)
result = ActionSuccessResult(outputs=return_value)
+ except AgentTerminated:
+ raise
except AgentException as e:
result = ActionErrorResult.from_exception(e)
+ logger.warning(
+ f"{command_name}({fmt_kwargs(command_args)}) raised an error: {e}"
+ )
result_tlength = self.llm_provider.count_tokens(str(result), self.llm.name)
if result_tlength > self.send_token_limit // 3: