diff options
Diffstat (limited to 'autogpt/prompts')
-rw-r--r-- | autogpt/prompts/__init__.py | 0 | ||||
-rw-r--r-- | autogpt/prompts/default_prompts.py | 29 | ||||
-rw-r--r-- | autogpt/prompts/generator.py | 180 | ||||
-rw-r--r-- | autogpt/prompts/prompt.py | 132 |
4 files changed, 0 insertions, 341 deletions
diff --git a/autogpt/prompts/__init__.py b/autogpt/prompts/__init__.py deleted file mode 100644 index e69de29bb..000000000 --- a/autogpt/prompts/__init__.py +++ /dev/null diff --git a/autogpt/prompts/default_prompts.py b/autogpt/prompts/default_prompts.py deleted file mode 100644 index ebbfa781c..000000000 --- a/autogpt/prompts/default_prompts.py +++ /dev/null @@ -1,29 +0,0 @@ -#########################Setup.py################################# - -DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC = """ -Your task is to devise up to 5 highly effective goals and an appropriate role-based name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned with the successful completion of its assigned task. - -The user will provide the task, you will provide only the output in the exact format specified below with no explanation or conversation. - -Example input: -Help me with marketing my business - -Example output: -Name: CMOGPT -Description: a professional digital marketer AI that assists Solopreneurs in growing their businesses by providing world-class expertise in solving marketing problems for SaaS, content products, agencies, and more. -Goals: -- Engage in effective problem-solving, prioritization, planning, and supporting execution to address your marketing needs as your virtual Chief Marketing Officer. - -- Provide specific, actionable, and concise advice to help you make informed decisions without the use of platitudes or overly wordy explanations. - -- Identify and prioritize quick wins and cost-effective campaigns that maximize results with minimal time and budget investment. - -- Proactively take the lead in guiding you and offering suggestions when faced with unclear information or uncertainty to ensure your marketing strategy remains on track. -""" - -DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC = ( - "Task: '{{user_prompt}}'\n" - "Respond only with the output in the exact format specified in the system prompt, with no explanation or conversation.\n" -) - -DEFAULT_USER_DESIRE_PROMPT = "Write a wikipedia style article about the project: https://github.com/significant-gravitas/Auto-GPT" # Default prompt diff --git a/autogpt/prompts/generator.py b/autogpt/prompts/generator.py deleted file mode 100644 index bc836f30c..000000000 --- a/autogpt/prompts/generator.py +++ /dev/null @@ -1,180 +0,0 @@ -""" A module for generating custom prompt strings.""" -from __future__ import annotations - -import json -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, TypedDict - -from autogpt.config import Config -from autogpt.json_utils.utilities import llm_response_schema - -if TYPE_CHECKING: - from autogpt.models.command_registry import CommandRegistry - - -class PromptGenerator: - """ - A class for generating custom prompt strings based on constraints, commands, - resources, and performance evaluations. - """ - - class Command(TypedDict): - label: str - name: str - params: dict[str, str] - function: Optional[Callable] - - constraints: list[str] - commands: list[Command] - resources: list[str] - performance_evaluation: list[str] - command_registry: CommandRegistry | None - - # TODO: replace with AIConfig - name: str - role: str - goals: list[str] - - def __init__(self): - self.constraints = [] - self.commands = [] - self.resources = [] - self.performance_evaluation = [] - self.command_registry = None - - self.name = "Bob" - self.role = "AI" - self.goals = [] - - def add_constraint(self, constraint: str) -> None: - """ - Add a constraint to the constraints list. - - Args: - constraint (str): The constraint to be added. - """ - self.constraints.append(constraint) - - def add_command( - self, - command_label: str, - command_name: str, - params: dict[str, str] = {}, - function: Optional[Callable] = None, - ) -> None: - """ - Add a command to the commands list with a label, name, and optional arguments. - - *Should only be used by plugins.* Native commands should be added - directly to the CommandRegistry. - - Args: - command_label (str): The label of the command. - command_name (str): The name of the command. - params (dict, optional): A dictionary containing argument names and their - values. Defaults to None. - function (callable, optional): A callable function to be called when - the command is executed. Defaults to None. - """ - command_params = {name: type for name, type in params.items()} - - command: PromptGenerator.Command = { - "label": command_label, - "name": command_name, - "params": command_params, - "function": function, - } - - self.commands.append(command) - - def _generate_command_string(self, command: Dict[str, Any]) -> str: - """ - Generate a formatted string representation of a command. - - Args: - command (dict): A dictionary containing command information. - - Returns: - str: The formatted command string. - """ - params_string = ", ".join( - f'"{key}": "{value}"' for key, value in command["params"].items() - ) - return f'{command["label"]}: "{command["name"]}", params: {params_string}' - - def add_resource(self, resource: str) -> None: - """ - Add a resource to the resources list. - - Args: - resource (str): The resource to be added. - """ - self.resources.append(resource) - - def add_performance_evaluation(self, evaluation: str) -> None: - """ - Add a performance evaluation item to the performance_evaluation list. - - Args: - evaluation (str): The evaluation item to be added. - """ - self.performance_evaluation.append(evaluation) - - def _generate_numbered_list(self, items: List[Any], item_type="list") -> str: - """ - Generate a numbered list from given items based on the item_type. - - Args: - items (list): A list of items to be numbered. - item_type (str, optional): The type of items in the list. - Defaults to 'list'. - - Returns: - str: The formatted numbered list. - """ - if item_type == "command": - command_strings = [] - if self.command_registry: - command_strings += [ - str(item) - for item in self.command_registry.commands.values() - if item.enabled - ] - # terminate command is added manually - command_strings += [self._generate_command_string(item) for item in items] - return "\n".join(f"{i+1}. {item}" for i, item in enumerate(command_strings)) - else: - return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items)) - - def generate_prompt_string(self, config: Config) -> str: - """ - Generate a prompt string based on the constraints, commands, resources, - and performance evaluations. - - Returns: - str: The generated prompt string. - """ - return ( - f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n" - f"{generate_commands(self, config)}" - f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n" - "Performance Evaluation:\n" - f"{self._generate_numbered_list(self.performance_evaluation)}\n\n" - "Respond with only valid JSON conforming to the following schema: \n" - f"{json.dumps(llm_response_schema(config))}\n" - ) - - -def generate_commands(self, config: Config) -> str: - """ - Generate a prompt string based on the constraints, commands, resources, - and performance evaluations. - - Returns: - str: The generated prompt string. - """ - if config.openai_functions: - return "" - return ( - "Commands:\n" - f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n" - ) diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py deleted file mode 100644 index b5a0ec882..000000000 --- a/autogpt/prompts/prompt.py +++ /dev/null @@ -1,132 +0,0 @@ -from typing import Optional - -from colorama import Fore - -from autogpt.config.ai_config import AIConfig -from autogpt.config.config import Config -from autogpt.config.prompt_config import PromptConfig -from autogpt.llm.api_manager import ApiManager -from autogpt.logs import logger -from autogpt.prompts.generator import PromptGenerator -from autogpt.setup import prompt_user -from autogpt.utils import clean_input - -DEFAULT_TRIGGERING_PROMPT = "Determine exactly one command to use, and respond using the JSON schema specified previously:" - - -def build_default_prompt_generator(config: Config) -> PromptGenerator: - """ - This function generates a prompt string that includes various constraints, - commands, resources, and performance evaluations. - - Returns: - str: The generated prompt string. - """ - - # Initialize the PromptGenerator object - prompt_generator = PromptGenerator() - - # Initialize the PromptConfig object and load the file set in the main config (default: prompts_settings.yaml) - prompt_config = PromptConfig(config.prompt_settings_file) - - # Add constraints to the PromptGenerator object - for constraint in prompt_config.constraints: - prompt_generator.add_constraint(constraint) - - # Add resources to the PromptGenerator object - for resource in prompt_config.resources: - prompt_generator.add_resource(resource) - - # Add performance evaluations to the PromptGenerator object - for performance_evaluation in prompt_config.performance_evaluations: - prompt_generator.add_performance_evaluation(performance_evaluation) - - return prompt_generator - - -def construct_main_ai_config( - config: Config, - name: Optional[str] = None, - role: Optional[str] = None, - goals: tuple[str] = tuple(), -) -> AIConfig: - """Construct the prompt for the AI to respond to - - Returns: - str: The prompt string - """ - ai_config = AIConfig.load(config.ai_settings_file) - - # Apply overrides - if name: - ai_config.ai_name = name - if role: - ai_config.ai_role = role - if goals: - ai_config.ai_goals = list(goals) - - if ( - all([name, role, goals]) - or config.skip_reprompt - and all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals]) - ): - logger.typewriter_log("Name :", Fore.GREEN, ai_config.ai_name) - logger.typewriter_log("Role :", Fore.GREEN, ai_config.ai_role) - logger.typewriter_log("Goals:", Fore.GREEN, f"{ai_config.ai_goals}") - logger.typewriter_log( - "API Budget:", - Fore.GREEN, - "infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}", - ) - elif all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals]): - logger.typewriter_log( - "Welcome back! ", - Fore.GREEN, - f"Would you like me to return to being {ai_config.ai_name}?", - speak_text=True, - ) - should_continue = clean_input( - config, - f"""Continue with the last settings? -Name: {ai_config.ai_name} -Role: {ai_config.ai_role} -Goals: {ai_config.ai_goals} -API Budget: {"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}"} -Continue ({config.authorise_key}/{config.exit_key}): """, - ) - if should_continue.lower() == config.exit_key: - ai_config = AIConfig() - - if any([not ai_config.ai_name, not ai_config.ai_role, not ai_config.ai_goals]): - ai_config = prompt_user(config) - ai_config.save(config.ai_settings_file) - - if config.restrict_to_workspace: - logger.typewriter_log( - "NOTE:All files/directories created by this agent can be found inside its workspace at:", - Fore.YELLOW, - f"{config.workspace_path}", - ) - # set the total api budget - api_manager = ApiManager() - api_manager.set_total_budget(ai_config.api_budget) - - # Agent Created, print message - logger.typewriter_log( - ai_config.ai_name, - Fore.LIGHTBLUE_EX, - "has been created with the following details:", - speak_text=True, - ) - - # Print the ai_config details - # Name - logger.typewriter_log("Name:", Fore.GREEN, ai_config.ai_name, speak_text=False) - # Role - logger.typewriter_log("Role:", Fore.GREEN, ai_config.ai_role, speak_text=False) - # Goals - logger.typewriter_log("Goals:", Fore.GREEN, "", speak_text=False) - for goal in ai_config.ai_goals: - logger.typewriter_log("-", Fore.GREEN, goal, speak_text=False) - - return ai_config |