diff options
Diffstat (limited to 'autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py')
-rw-r--r-- | autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py index f4bade1fa..9672f8e5b 100644 --- a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py +++ b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py @@ -6,7 +6,7 @@ from autogpt.core.prompting import PromptStrategy from autogpt.core.prompting.schema import ChatPrompt, LanguageModelClassification from autogpt.core.prompting.utils import json_loads, to_numbered_list from autogpt.core.resource.model_providers import ( - AssistantChatMessageDict, + AssistantChatMessage, ChatMessage, CompletionModelFunction, ) @@ -171,7 +171,7 @@ class NextAbility(PromptStrategy): def parse_response_content( self, - response_content: AssistantChatMessageDict, + response_content: AssistantChatMessage, ) -> dict: """Parse the actual text response from the objective model. @@ -183,9 +183,12 @@ class NextAbility(PromptStrategy): """ try: - function_name = response_content["tool_calls"][0]["function"]["name"] + if not response_content.tool_calls: + raise ValueError("LLM did not call any function") + + function_name = response_content.tool_calls[0].function.name function_arguments = json_loads( - response_content["tool_calls"][0]["function"]["arguments"] + response_content.tool_calls[0].function.arguments ) parsed_response = { "motivation": function_arguments.pop("motivation"), |