aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Luke <2609441+lc0rp@users.noreply.github.com> 2023-08-11 13:49:39 -0400
committerGravatar GitHub <noreply@github.com> 2023-08-11 13:49:39 -0400
commitbb3a06d54b368b52994362adca3691fb2b98c8d5 (patch)
treebb9568395132e6448f3ffa50f0652f49feca532a
parentRelease v0.4.6 (#5039) (diff)
parentBulleting update & version bump (#5112) (diff)
downloadAuto-GPT-bb3a06d54b368b52994362adca3691fb2b98c8d5.tar.gz
Auto-GPT-bb3a06d54b368b52994362adca3691fb2b98c8d5.tar.bz2
Auto-GPT-bb3a06d54b368b52994362adca3691fb2b98c8d5.zip
Release v0.4.7 (#5094)v0.4.7
-rw-r--r--.gitignore7
-rw-r--r--BULLETIN.md24
-rw-r--r--agbenchmark/__init__.py (renamed from autogpt/core/runner/cli_web_app/client/__init__.py)0
-rw-r--r--agbenchmark/benchmarks.py54
-rw-r--r--agbenchmark/config.json4
-rw-r--r--agbenchmark/regression_tests.json24
-rw-r--r--autogpt/agents/agent.py14
-rw-r--r--autogpt/agents/base.py112
-rw-r--r--autogpt/app/main.py87
-rw-r--r--autogpt/app/setup.py3
-rw-r--r--autogpt/app/spinner.py (renamed from autogpt/spinner.py)0
-rw-r--r--autogpt/app/utils.py147
-rw-r--r--autogpt/command_decorator.py8
-rw-r--r--autogpt/commands/__init__.py2
-rw-r--r--autogpt/commands/execute_code.py10
-rw-r--r--autogpt/commands/file_operations.py57
-rw-r--r--autogpt/commands/git_operations.py5
-rw-r--r--autogpt/commands/image_gen.py6
-rw-r--r--autogpt/commands/system.py (renamed from autogpt/commands/task_statuses.py)6
-rw-r--r--autogpt/commands/web_search.py6
-rw-r--r--autogpt/commands/web_selenium.py44
-rw-r--r--autogpt/config/ai_config.py66
-rw-r--r--autogpt/config/config.py14
-rw-r--r--autogpt/config/prompt_config.py2
-rw-r--r--autogpt/core/ability/base.py8
-rw-r--r--autogpt/core/planning/templates.py1
-rw-r--r--autogpt/core/resource/model_providers/openai.py2
-rw-r--r--autogpt/core/runner/cli_web_app/cli.py55
-rw-r--r--autogpt/core/runner/cli_web_app/client/client.py16
-rw-r--r--autogpt/core/runner/cli_web_app/server/api.py120
-rw-r--r--autogpt/core/runner/cli_web_app/server/schema.py36
-rw-r--r--autogpt/core/runner/cli_web_app/server/services/__init__.py0
-rw-r--r--autogpt/core/runner/cli_web_app/server/services/users.py20
-rw-r--r--autogpt/llm/base.py2
-rw-r--r--autogpt/llm/providers/openai.py4
-rw-r--r--autogpt/llm/utils/__init__.py7
-rw-r--r--autogpt/models/command.py7
-rw-r--r--autogpt/models/command_registry.py95
-rw-r--r--autogpt/processing/text.py10
-rw-r--r--autogpt/prompts/generator.py144
-rw-r--r--autogpt/prompts/prompt.py14
-rw-r--r--autogpt/utils.py170
-rw-r--r--autogpt/workspace/workspace.py23
-rw-r--r--benchmarks.py16
-rw-r--r--docs/_javascript/mathjax.js16
-rw-r--r--docs/_javascript/tablesort.js6
-rw-r--r--docs/challenges/building_challenges.md1
-rw-r--r--docs/challenges/introduction.md12
-rw-r--r--docs/challenges/memory/challenge_b.md5
-rw-r--r--docs/challenges/memory/challenge_c.md28
-rw-r--r--docs/challenges/memory/challenge_d.md19
-rw-r--r--docs/configuration/imagegen.md12
-rw-r--r--docs/configuration/memory.md26
-rw-r--r--docs/configuration/voice.md2
-rw-r--r--docs/imgs/Auto_GPT_Logo.pngbin0 -> 26841 bytes
-rw-r--r--docs/setup.md113
-rw-r--r--docs/share-your-logs.md2
-rw-r--r--docs/testing.md25
-rw-r--r--docs/usage.md42
-rw-r--r--mkdocs.yml121
-rw-r--r--prompt_settings.yaml5
-rw-r--r--pyproject.toml2
-rw-r--r--requirements.txt8
m---------tests/Auto-GPT-test-cassettes0
-rw-r--r--tests/challenges/utils.py8
-rw-r--r--tests/conftest.py2
-rw-r--r--tests/integration/test_setup.py6
-rw-r--r--tests/integration/test_update_user.py33
-rw-r--r--tests/mocks/mock_commands.py2
-rw-r--r--tests/unit/test_commands.py4
-rw-r--r--tests/unit/test_config.py19
-rw-r--r--tests/unit/test_file_operations.py18
-rw-r--r--tests/unit/test_prompt_config.py22
-rw-r--r--tests/unit/test_prompt_generator.py21
-rw-r--r--tests/unit/test_spinner.py2
-rw-r--r--tests/unit/test_utils.py30
76 files changed, 1201 insertions, 863 deletions
diff --git a/.gitignore b/.gitignore
index 195ecb717..3b4363131 100644
--- a/.gitignore
+++ b/.gitignore
@@ -160,3 +160,10 @@ openai/
# news
CURRENT_BULLETIN.md
+
+# AgBenchmark
+agbenchmark/reports/
+
+# Nodejs
+package-lock.json
+package.json \ No newline at end of file
diff --git a/BULLETIN.md b/BULLETIN.md
index 11cc62777..9a24b4986 100644
--- a/BULLETIN.md
+++ b/BULLETIN.md
@@ -4,24 +4,18 @@
📖 *User Guide*: https://docs.agpt.co.
👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing.
-# v0.4.6 RELEASE HIGHLIGHTS! 🚀
+# v0.4.7 RELEASE HIGHLIGHTS! 🚀
# -----------------------------
-This release includes under-the-hood improvements and bug fixes, including better UTF-8
-special character support, workspace write access for sandboxed Python execution,
-more robust path resolution for config files and the workspace, and a full restructure
-of the Agent class, the "brain" of Auto-GPT, to make it more extensible.
+This release introduces initial REST API support, powered by e2b's agent
+protocol SDK (https://github.com/e2b-dev/agent-protocol#sdk).
-We have also released some documentation updates, including:
+It also includes improvements to prompt generation and support
+for our new benchmarking tool, Auto-GPT-Benchmarks
+(https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks).
-- *How to share your system logs*
- Visit [docs/share-your-logs.md] to learn how to how to share logs with us
- via a log analyzer graciously contributed by https://www.e2b.dev/
+We've also moved our documentation to Material Theme, at https://docs.agpt.co.
-- *Auto-GPT re-architecture documentation*
- You can learn more about the inner-workings of the Auto-GPT re-architecture
- released last cycle, via these links:
- * [autogpt/core/README.md]
- * [autogpt/core/ARCHITECTURE_NOTES.md]
+As usual, we've squashed a few bugs and made some under-the-hood improvements.
-Take a look at the Release Notes on Github for the full changelog!
+Take a look at the Release Notes on Github for the full changelog:
https://github.com/Significant-Gravitas/Auto-GPT/releases.
diff --git a/autogpt/core/runner/cli_web_app/client/__init__.py b/agbenchmark/__init__.py
index e69de29bb..e69de29bb 100644
--- a/autogpt/core/runner/cli_web_app/client/__init__.py
+++ b/agbenchmark/__init__.py
diff --git a/agbenchmark/benchmarks.py b/agbenchmark/benchmarks.py
new file mode 100644
index 000000000..6a646f370
--- /dev/null
+++ b/agbenchmark/benchmarks.py
@@ -0,0 +1,54 @@
+import os
+import sys
+from pathlib import Path
+from typing import Tuple
+
+from autogpt.agents import Agent
+from autogpt.app.main import run_interaction_loop
+from autogpt.commands import COMMAND_CATEGORIES
+from autogpt.config import AIConfig, Config, ConfigBuilder
+from autogpt.memory.vector import get_memory
+from autogpt.models.command_registry import CommandRegistry
+from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
+from autogpt.workspace import Workspace
+
+PROJECT_DIR = Path().resolve()
+
+
+def run_specific_agent(task, continuous_mode=False) -> Tuple[str, int]:
+ agent = bootstrap_agent(task, continuous_mode)
+ run_interaction_loop(agent)
+
+
+def bootstrap_agent(task, continuous_mode) -> Agent:
+ config = ConfigBuilder.build_config_from_env(workdir=PROJECT_DIR)
+ config.debug_mode = True
+ config.continuous_mode = continuous_mode
+ config.temperature = 0
+ config.plain_output = True
+ command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)
+ config.memory_backend = "no_memory"
+ config.workspace_path = Workspace.init_workspace_directory(config)
+ config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)
+ ai_config = AIConfig(
+ ai_name="Auto-GPT",
+ ai_role="a multi-purpose AI assistant.",
+ ai_goals=[task],
+ )
+ ai_config.command_registry = command_registry
+ return Agent(
+ memory=get_memory(config),
+ command_registry=command_registry,
+ ai_config=ai_config,
+ config=config,
+ triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
+ )
+
+
+if __name__ == "__main__":
+ # The first argument is the script name itself, second is the task
+ if len(sys.argv) != 2:
+ print("Usage: python script.py <task>")
+ sys.exit(1)
+ task = sys.argv[1]
+ run_specific_agent(task, continuous_mode=True)
diff --git a/agbenchmark/config.json b/agbenchmark/config.json
new file mode 100644
index 000000000..47785864c
--- /dev/null
+++ b/agbenchmark/config.json
@@ -0,0 +1,4 @@
+{
+ "workspace": "auto_gpt_workspace",
+ "entry_path": "agbenchmark.benchmarks"
+}
diff --git a/agbenchmark/regression_tests.json b/agbenchmark/regression_tests.json
new file mode 100644
index 000000000..8d59b1a4f
--- /dev/null
+++ b/agbenchmark/regression_tests.json
@@ -0,0 +1,24 @@
+{
+ "TestBasicCodeGeneration": {
+ "difficulty": "basic",
+ "dependencies": [
+ "TestWriteFile"
+ ],
+ "data_path": "agbenchmark/challenges/code/d3"
+ },
+ "TestBasicMemory": {
+ "difficulty": "basic",
+ "data_path": "agbenchmark/challenges/memory/m1"
+ },
+ "TestReadFile": {
+ "difficulty": "basic",
+ "dependencies": [
+ "TestWriteFile"
+ ],
+ "data_path": "agbenchmark/challenges/interface/read_file"
+ },
+ "TestWriteFile": {
+ "dependencies": [],
+ "data_path": "agbenchmark/challenges/interface/write_file"
+ }
+}
diff --git a/autogpt/agents/agent.py b/autogpt/agents/agent.py
index 93d3de865..fa20ea587 100644
--- a/autogpt/agents/agent.py
+++ b/autogpt/agents/agent.py
@@ -17,6 +17,7 @@ from autogpt.llm.base import Message
from autogpt.llm.utils import count_string_tokens
from autogpt.logs import logger
from autogpt.logs.log_cycle import (
+ CURRENT_CONTEXT_FILE_NAME,
FULL_MESSAGE_HISTORY_FILE_NAME,
NEXT_ACTION_FILE_NAME,
USER_INPUT_FILE_NAME,
@@ -109,6 +110,13 @@ class Agent(BaseAgent):
self.history.raw(),
FULL_MESSAGE_HISTORY_FILE_NAME,
)
+ self.log_cycle_handler.log_cycle(
+ self.ai_config.ai_name,
+ self.created_at,
+ self.cycle_count,
+ prompt.raw(),
+ CURRENT_CONTEXT_FILE_NAME,
+ )
return prompt
def execute(
@@ -285,10 +293,10 @@ def execute_command(
# Handle non-native commands (e.g. from plugins)
for command in agent.ai_config.prompt_generator.commands:
if (
- command_name == command["label"].lower()
- or command_name == command["name"].lower()
+ command_name == command.label.lower()
+ or command_name == command.name.lower()
):
- return command["function"](**arguments)
+ return command.function(**arguments)
raise RuntimeError(
f"Cannot execute '{command_name}': unknown command."
diff --git a/autogpt/agents/base.py b/autogpt/agents/base.py
index c0133ea7c..bf43b3769 100644
--- a/autogpt/agents/base.py
+++ b/autogpt/agents/base.py
@@ -1,7 +1,8 @@
from __future__ import annotations
+import re
from abc import ABCMeta, abstractmethod
-from typing import TYPE_CHECKING, Any, Optional
+from typing import TYPE_CHECKING, Any, Literal, Optional
if TYPE_CHECKING:
from autogpt.config import AIConfig, Config
@@ -23,6 +24,8 @@ AgentThoughts = dict[str, Any]
class BaseAgent(metaclass=ABCMeta):
"""Base class for all Auto-GPT agents."""
+ ThoughtProcessID = Literal["one-shot"]
+
def __init__(
self,
ai_config: AIConfig,
@@ -91,6 +94,7 @@ class BaseAgent(metaclass=ABCMeta):
def think(
self,
instruction: Optional[str] = None,
+ thought_process_id: ThoughtProcessID = "one-shot",
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
"""Runs the agent for one cycle.
@@ -103,9 +107,8 @@ class BaseAgent(metaclass=ABCMeta):
instruction = instruction or self.default_cycle_instruction
- prompt: ChatSequence = self.construct_prompt(instruction)
- prompt = self.on_before_think(prompt, instruction)
-
+ prompt: ChatSequence = self.construct_prompt(instruction, thought_process_id)
+ prompt = self.on_before_think(prompt, thought_process_id, instruction)
raw_response = create_chat_completion(
prompt,
self.config,
@@ -115,7 +118,7 @@ class BaseAgent(metaclass=ABCMeta):
)
self.cycle_count += 1
- return self.on_response(raw_response, prompt, instruction)
+ return self.on_response(raw_response, thought_process_id, prompt, instruction)
@abstractmethod
def execute(
@@ -138,6 +141,7 @@ class BaseAgent(metaclass=ABCMeta):
def construct_base_prompt(
self,
+ thought_process_id: ThoughtProcessID,
prepend_messages: list[Message] = [],
append_messages: list[Message] = [],
reserve_tokens: int = 0,
@@ -179,7 +183,11 @@ class BaseAgent(metaclass=ABCMeta):
return prompt
- def construct_prompt(self, cycle_instruction: str) -> ChatSequence:
+ def construct_prompt(
+ self,
+ cycle_instruction: str,
+ thought_process_id: ThoughtProcessID,
+ ) -> ChatSequence:
"""Constructs and returns a prompt with the following structure:
1. System prompt
2. Message history of the agent, truncated & prepended with running summary as needed
@@ -196,14 +204,86 @@ class BaseAgent(metaclass=ABCMeta):
cycle_instruction_tlength = count_message_tokens(
cycle_instruction_msg, self.llm.name
)
- prompt = self.construct_base_prompt(reserve_tokens=cycle_instruction_tlength)
+
+ append_messages: list[Message] = []
+
+ response_format_instr = self.response_format_instruction(thought_process_id)
+ if response_format_instr:
+ append_messages.append(Message("system", response_format_instr))
+
+ prompt = self.construct_base_prompt(
+ thought_process_id,
+ append_messages=append_messages,
+ reserve_tokens=cycle_instruction_tlength,
+ )
# ADD user input message ("triggering prompt")
prompt.append(cycle_instruction_msg)
return prompt
- def on_before_think(self, prompt: ChatSequence, instruction: str) -> ChatSequence:
+ # This can be expanded to support multiple types of (inter)actions within an agent
+ def response_format_instruction(self, thought_process_id: ThoughtProcessID) -> str:
+ if thought_process_id != "one-shot":
+ raise NotImplementedError(f"Unknown thought process '{thought_process_id}'")
+
+ RESPONSE_FORMAT_WITH_COMMAND = """```ts
+ interface Response {
+ thoughts: {
+ // Thoughts
+ text: string;
+ reasoning: string;
+ // Short markdown-style bullet list that conveys the long-term plan
+ plan: string;
+ // Constructive self-criticism
+ criticism: string;
+ // Summary of thoughts to say to the user
+ speak: string;
+ };
+ command: {
+ name: string;
+ args: Record<string, any>;
+ };
+ }
+ ```"""
+
+ RESPONSE_FORMAT_WITHOUT_COMMAND = """```ts
+ interface Response {
+ thoughts: {
+ // Thoughts
+ text: string;
+ reasoning: string;
+ // Short markdown-style bullet list that conveys the long-term plan
+ plan: string;
+ // Constructive self-criticism
+ criticism: string;
+ // Summary of thoughts to say to the user
+ speak: string;
+ };
+ }
+ ```"""
+
+ response_format = re.sub(
+ r"\n\s+",
+ "\n",
+ RESPONSE_FORMAT_WITHOUT_COMMAND
+ if self.config.openai_functions
+ else RESPONSE_FORMAT_WITH_COMMAND,
+ )
+
+ use_functions = self.config.openai_functions and self.command_registry.commands
+ return (
+ f"Respond strictly with JSON{', and also specify a command to use through a function_call' if use_functions else ''}. "
+ "The JSON should be compatible with the TypeScript type `Response` from the following:\n"
+ f"{response_format}\n"
+ )
+
+ def on_before_think(
+ self,
+ prompt: ChatSequence,
+ thought_process_id: ThoughtProcessID,
+ instruction: str,
+ ) -> ChatSequence:
"""Called after constructing the prompt but before executing it.
Calls the `on_planning` hook of any enabled and capable plugins, adding their
@@ -238,7 +318,11 @@ class BaseAgent(metaclass=ABCMeta):
return prompt
def on_response(
- self, llm_response: ChatModelResponse, prompt: ChatSequence, instruction: str
+ self,
+ llm_response: ChatModelResponse,
+ thought_process_id: ThoughtProcessID,
+ prompt: ChatSequence,
+ instruction: str,
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
"""Called upon receiving a response from the chat model.
@@ -261,7 +345,9 @@ class BaseAgent(metaclass=ABCMeta):
) # FIXME: support function calls
try:
- return self.parse_and_process_response(llm_response, prompt, instruction)
+ return self.parse_and_process_response(
+ llm_response, thought_process_id, prompt, instruction
+ )
except SyntaxError as e:
logger.error(f"Response could not be parsed: {e}")
# TODO: tune this message
@@ -276,7 +362,11 @@ class BaseAgent(metaclass=ABCMeta):
@abstractmethod
def parse_and_process_response(
- self, llm_response: ChatModelResponse, prompt: ChatSequence, instruction: str
+ self,
+ llm_response: ChatModelResponse,
+ thought_process_id: ThoughtProcessID,
+ prompt: ChatSequence,
+ instruction: str,
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
"""Validate, parse & process the LLM's response.
diff --git a/autogpt/app/main.py b/autogpt/app/main.py
index 5abaaac8a..ed77cd438 100644
--- a/autogpt/app/main.py
+++ b/autogpt/app/main.py
@@ -13,6 +13,14 @@ from colorama import Fore, Style
from autogpt.agents import Agent, AgentThoughts, CommandArgs, CommandName
from autogpt.app.configurator import create_config
from autogpt.app.setup import prompt_user
+from autogpt.app.spinner import Spinner
+from autogpt.app.utils import (
+ clean_input,
+ get_current_git_branch,
+ get_latest_bulletin,
+ get_legal_warning,
+ markdown_to_ansi_style,
+)
from autogpt.commands import COMMAND_CATEGORIES
from autogpt.config import AIConfig, Config, ConfigBuilder, check_openai_api_key
from autogpt.llm.api_manager import ApiManager
@@ -22,14 +30,6 @@ from autogpt.models.command_registry import CommandRegistry
from autogpt.plugins import scan_plugins
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
from autogpt.speech import say_text
-from autogpt.spinner import Spinner
-from autogpt.utils import (
- clean_input,
- get_current_git_branch,
- get_latest_bulletin,
- get_legal_warning,
- markdown_to_ansi_style,
-)
from autogpt.workspace import Workspace
from scripts.install_plugin_deps import install_plugin_dependencies
@@ -126,42 +126,17 @@ def run_auto_gpt(
# TODO: have this directory live outside the repository (e.g. in a user's
# home directory) and have it come in as a command line argument or part of
# the env file.
- Workspace.set_workspace_directory(config, workspace_directory)
+ config.workspace_path = Workspace.init_workspace_directory(
+ config, workspace_directory
+ )
# HACK: doing this here to collect some globals that depend on the workspace.
- Workspace.set_file_logger_path(config, config.workspace_path)
+ config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)
config.plugins = scan_plugins(config, config.debug_mode)
- # Create a CommandRegistry instance and scan default folder
- command_registry = CommandRegistry()
-
- logger.debug(
- f"The following command categories are disabled: {config.disabled_command_categories}"
- )
- enabled_command_categories = [
- x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories
- ]
-
- logger.debug(
- f"The following command categories are enabled: {enabled_command_categories}"
- )
- for command_category in enabled_command_categories:
- command_registry.import_commands(command_category)
-
- # Unregister commands that are incompatible with the current config
- incompatible_commands = []
- for command in command_registry.commands.values():
- if callable(command.enabled) and not command.enabled(config):
- command.enabled = False
- incompatible_commands.append(command)
-
- for command in incompatible_commands:
- command_registry.unregister(command)
- logger.debug(
- f"Unregistering incompatible command: {command.name}, "
- f"reason - {command.disabled_reason or 'Disabled by current config.'}"
- )
+ # Create a CommandRegistry instance and scan default folder
+ command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)
ai_config = construct_main_ai_config(
config,
@@ -368,23 +343,25 @@ def update_user(
print_assistant_thoughts(ai_config.ai_name, assistant_reply_dict, config)
if command_name is not None:
- if config.speak_mode:
- say_text(f"I want to execute {command_name}", config)
+ if command_name.lower().startswith("error"):
+ logger.typewriter_log(
+ "ERROR: ",
+ Fore.RED,
+ f"The Agent failed to select an action. "
+ f"Error message: {command_name}",
+ )
+ else:
+ if config.speak_mode:
+ say_text(f"I want to execute {command_name}", config)
- # First log new-line so user can differentiate sections better in console
- logger.typewriter_log("\n")
- logger.typewriter_log(
- "NEXT ACTION: ",
- Fore.CYAN,
- f"COMMAND = {Fore.CYAN}{remove_ansi_escape(command_name)}{Style.RESET_ALL} "
- f"ARGUMENTS = {Fore.CYAN}{command_args}{Style.RESET_ALL}",
- )
- elif command_name.lower().startswith("error"):
- logger.typewriter_log(
- "ERROR: ",
- Fore.RED,
- f"The Agent failed to select an action. " f"Error message: {command_name}",
- )
+ # First log new-line so user can differentiate sections better in console
+ logger.typewriter_log("\n")
+ logger.typewriter_log(
+ "NEXT ACTION: ",
+ Fore.CYAN,
+ f"COMMAND = {Fore.CYAN}{remove_ansi_escape(command_name)}{Style.RESET_ALL} "
+ f"ARGUMENTS = {Fore.CYAN}{command_args}{Style.RESET_ALL}",
+ )
else:
logger.typewriter_log(
"NO ACTION SELECTED: ",
diff --git a/autogpt/app/setup.py b/autogpt/app/setup.py
index f2b52916c..f2879af6b 100644
--- a/autogpt/app/setup.py
+++ b/autogpt/app/setup.py
@@ -5,7 +5,7 @@ from typing import Optional
from colorama import Fore, Style
from jinja2 import Template
-from autogpt import utils
+from autogpt.app import utils
from autogpt.config import Config
from autogpt.config.ai_config import AIConfig
from autogpt.llm.base import ChatSequence, Message
@@ -83,6 +83,7 @@ def prompt_user(
"Falling back to manual mode.",
speak_text=True,
)
+ logger.debug(f"Error during AIConfig generation: {e}")
return generate_aiconfig_manual(config)
diff --git a/autogpt/spinner.py b/autogpt/app/spinner.py
index 8b2aa6c3c..8b2aa6c3c 100644
--- a/autogpt/spinner.py
+++ b/autogpt/app/spinner.py
diff --git a/autogpt/app/utils.py b/autogpt/app/utils.py
new file mode 100644
index 000000000..5bf0d6c7c
--- /dev/null
+++ b/autogpt/app/utils.py
@@ -0,0 +1,147 @@
+import os
+import re
+
+import requests
+from colorama import Fore, Style
+from git.repo import Repo
+from prompt_toolkit import ANSI, PromptSession
+from prompt_toolkit.history import InMemoryHistory
+
+from autogpt.config import Config
+from autogpt.logs import logger
+
+session = PromptSession(history=InMemoryHistory())
+
+
+def clean_input(config: Config, prompt: str = "", talk=False):
+ try:
+ if config.chat_messages_enabled:
+ for plugin in config.plugins:
+ if not hasattr(plugin, "can_handle_user_input"):
+ continue
+ if not plugin.can_handle_user_input(user_input=prompt):
+ continue
+ plugin_response = plugin.user_input(user_input=prompt)
+ if not plugin_response:
+ continue
+ if plugin_response.lower() in [
+ "yes",
+ "yeah",
+ "y",
+ "ok",
+ "okay",
+ "sure",
+ "alright",
+ ]:
+ return config.authorise_key
+ elif plugin_response.lower() in [
+ "no",
+ "nope",
+ "n",
+ "negative",
+ ]:
+ return config.exit_key
+ return plugin_response
+
+ # ask for input, default when just pressing Enter is y
+ logger.info("Asking user via keyboard...")
+
+ # handle_sigint must be set to False, so the signal handler in the
+ # autogpt/main.py could be employed properly. This referes to
+ # https://github.com/Significant-Gravitas/Auto-GPT/pull/4799/files/3966cdfd694c2a80c0333823c3bc3da090f85ed3#r1264278776
+ answer = session.prompt(ANSI(prompt), handle_sigint=False)
+ return answer
+ except KeyboardInterrupt:
+ logger.info("You interrupted Auto-GPT")
+ logger.info("Quitting...")
+ exit(0)
+
+
+def get_bulletin_from_web():
+ try:
+ response = requests.get(
+ "https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md"
+ )
+ if response.status_code == 200:
+ return response.text
+ except requests.exceptions.RequestException:
+ pass
+
+ return ""
+
+
+def get_current_git_branch() -> str:
+ try:
+ repo = Repo(search_parent_directories=True)
+ branch = repo.active_branch
+ return branch.name
+ except:
+ return ""
+
+
+def get_latest_bulletin() -> tuple[str, bool]:
+ exists = os.path.exists("data/CURRENT_BULLETIN.md")
+ current_bulletin = ""
+ if exists:
+ current_bulletin = open(
+ "data/CURRENT_BULLETIN.md", "r", encoding="utf-8"
+ ).read()
+ new_bulletin = get_bulletin_from_web()
+ is_new_news = new_bulletin != "" and new_bulletin != current_bulletin
+
+ news_header = Fore.YELLOW + "Welcome to Auto-GPT!\n"
+ if new_bulletin or current_bulletin:
+ news_header += (
+ "Below you'll find the latest Auto-GPT News and updates regarding features!\n"
+ "If you don't wish to see this message, you "
+ "can run Auto-GPT with the *--skip-news* flag.\n"
+ )
+
+ if new_bulletin and is_new_news:
+ open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
+ current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}"
+
+ return f"{news_header}\n{current_bulletin}", is_new_news
+
+
+def markdown_to_ansi_style(markdown: str):
+ ansi_lines: list[str] = []
+ for line in markdown.split("\n"):
+ line_style = ""
+
+ if line.startswith("# "):
+ line_style += Style.BRIGHT
+ else:
+ line = re.sub(
+ r"(?<!\*)\*(\*?[^*]+\*?)\*(?!\*)",
+ rf"{Style.BRIGHT}\1{Style.NORMAL}",
+ line,
+ )
+
+ if re.match(r"^#+ ", line) is not None:
+ line_style += Fore.CYAN
+ line = re.sub(r"^#+ ", "", line)
+
+ ansi_lines.append(f"{line_style}{line}{Style.RESET_ALL}")
+ return "\n".join(ansi_lines)
+
+
+def get_legal_warning() -> str:
+ legal_text = """
+## DISCLAIMER AND INDEMNIFICATION AGREEMENT
+### PLEASE READ THIS DISCLAIMER AND INDEMNIFICATION AGREEMENT CAREFULLY BEFORE USING THE AUTOGPT SYSTEM. BY USING THE AUTOGPT SYSTEM, YOU AGREE TO BE BOUND BY THIS AGREEMENT.
+
+## Introduction
+AutoGPT (the "System") is a project that connects a GPT-like artificial intelligence system to the internet and allows it to automate tasks. While the System is designed to be useful and efficient, there may be instances where the System could perform actions that may cause harm or have unintended consequences.
+
+## No Liability for Actions of the System
+The developers, contributors, and maintainers of the AutoGPT project (collectively, the "Project Parties") make no warranties or representations, express or implied, about the System's performance, accuracy, reliability, or safety. By using the System, you understand and agree that the Project Parties shall not be liable for any actions taken by the System or any consequences resulting from such actions.
+
+## User Responsibility and Respondeat Superior Liability
+As a user of the System, you are responsible for supervising and monitoring the actions of the System while it is operating on your
+behalf. You acknowledge that using the System could expose you to potential liability including but not limited to respondeat superior and you agree to assume all risks and liabilities associated with such potential liability.
+
+## Indemnification
+By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences.
+ """
+ return legal_text
diff --git a/autogpt/command_decorator.py b/autogpt/command_decorator.py
index d082d9bf5..9a6f58ae1 100644
--- a/autogpt/command_decorator.py
+++ b/autogpt/command_decorator.py
@@ -1,7 +1,11 @@
+from __future__ import annotations
+
import functools
-from typing import Any, Callable, Optional, TypedDict
+from typing import TYPE_CHECKING, Any, Callable, Optional, TypedDict
+
+if TYPE_CHECKING:
+ from autogpt.config import Config
-from autogpt.config import Config
from autogpt.models.command import Command, CommandParameter
# Unique identifier for auto-gpt commands
diff --git a/autogpt/commands/__init__.py b/autogpt/commands/__init__.py
index 9a932b175..018f5b8fc 100644
--- a/autogpt/commands/__init__.py
+++ b/autogpt/commands/__init__.py
@@ -3,5 +3,5 @@ COMMAND_CATEGORIES = [
"autogpt.commands.file_operations",
"autogpt.commands.web_search",
"autogpt.commands.web_selenium",
- "autogpt.commands.task_statuses",
+ "autogpt.commands.system",
]
diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py
index dd35f8593..3d52eb0a5 100644
--- a/autogpt/commands/execute_code.py
+++ b/autogpt/commands/execute_code.py
@@ -1,4 +1,8 @@
-"""Execute code in a Docker container"""
+"""Commands to execute code"""
+
+COMMAND_CATEGORY = "execute_code"
+COMMAND_CATEGORY_TITLE = "Execute Code"
+
import os
import subprocess
from pathlib import Path
@@ -251,9 +255,9 @@ def execute_shell(command_line: str, agent: Agent) -> str:
"execute_shell_popen",
"Executes a Shell Command, non-interactive commands only",
{
- "query": {
+ "command_line": {
"type": "string",
- "description": "The search query",
+ "description": "The command line to execute",
"required": True,
}
},
diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py
index 939b7dc18..41da057e3 100644
--- a/autogpt/commands/file_operations.py
+++ b/autogpt/commands/file_operations.py
@@ -1,6 +1,10 @@
-"""File operations for AutoGPT"""
+"""Commands to perform operations on files"""
+
from __future__ import annotations
+COMMAND_CATEGORY = "file_operations"
+COMMAND_CATEGORY_TITLE = "File Operations"
+
import contextlib
import hashlib
import os
@@ -25,7 +29,7 @@ def text_checksum(text: str) -> str:
def operations_from_log(
- log_path: str,
+ log_path: str | Path,
) -> Generator[tuple[Operation, str, str | None], None, None]:
"""Parse the file operations log and return a tuple containing the log entries"""
try:
@@ -52,7 +56,7 @@ def operations_from_log(
log.close()
-def file_operations_state(log_path: str) -> dict[str, str]:
+def file_operations_state(log_path: str | Path) -> dict[str, str]:
"""Iterates over the operations log and returns the expected state.
Parses a log file at config.file_logger_path to construct a dictionary that maps
@@ -228,22 +232,6 @@ def write_to_file(filename: str, text: str, agent: Agent) -> str:
return f"Error: {err}"
-@command(
- "append_to_file",
- "Appends to a file",
- {
- "filename": {
- "type": "string",
- "description": "The name of the file to write to",
- "required": True,
- },
- "text": {
- "type": "string",
- "description": "The text to write to the file",
- "required": True,
- },
- },
-)
@sanitize_path_arg("filename")
def append_to_file(
filename: str, text: str, agent: Agent, should_log: bool = True
@@ -275,37 +263,6 @@ def append_to_file(
@command(
- "delete_file",
- "Deletes a file",
- {
- "filename": {
- "type": "string",
- "description": "The name of the file to delete",
- "required": True,
- }
- },
-)
-@sanitize_path_arg("filename")
-def delete_file(filename: str, agent: Agent) -> str:
- """Delete a file
-
- Args:
- filename (str): The name of the file to delete
-
- Returns:
- str: A message indicating success or failure
- """
- if is_duplicate_operation("delete", filename, agent):
- return "Error: File has already been deleted."
- try:
- os.remove(filename)
- log_operation("delete", filename, agent)
- return "File deleted successfully."
- except Exception as err:
- return f"Error: {err}"
-
-
-@command(
"list_files",
"Lists Files in a Directory",
{
diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py
index 021157fbb..f7f8186be 100644
--- a/autogpt/commands/git_operations.py
+++ b/autogpt/commands/git_operations.py
@@ -1,4 +1,7 @@
-"""Git operations for autogpt"""
+"""Commands to perform Git operations"""
+
+COMMAND_CATEGORY = "git_operations"
+COMMAND_CATEGORY_TITLE = "Git Operations"
from git.repo import Repo
diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py
index e02400a81..3f6c1d98d 100644
--- a/autogpt/commands/image_gen.py
+++ b/autogpt/commands/image_gen.py
@@ -1,4 +1,8 @@
-""" Image Generation Module for AutoGPT."""
+"""Commands to generate images based on text input"""
+
+COMMAND_CATEGORY = "text_to_image"
+COMMAND_CATEGORY_TITLE = "Text to Image"
+
import io
import json
import time
diff --git a/autogpt/commands/task_statuses.py b/autogpt/commands/system.py
index 34908928f..08bfd5e57 100644
--- a/autogpt/commands/task_statuses.py
+++ b/autogpt/commands/system.py
@@ -1,6 +1,10 @@
-"""Task Statuses module."""
+"""Commands to control the internal state of the program"""
+
from __future__ import annotations
+COMMAND_CATEGORY = "system"
+COMMAND_CATEGORY_TITLE = "System"
+
from typing import NoReturn
from autogpt.agents.agent import Agent
diff --git a/autogpt/commands/web_search.py b/autogpt/commands/web_search.py
index 9ea0d2061..49712049d 100644
--- a/autogpt/commands/web_search.py
+++ b/autogpt/commands/web_search.py
@@ -1,6 +1,10 @@
-"""Google search command for Autogpt."""
+"""Commands to search the web with"""
+
from __future__ import annotations
+COMMAND_CATEGORY = "web_search"
+COMMAND_CATEGORY_TITLE = "Web Search"
+
import json
import time
from itertools import islice
diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py
index 948d799e9..92aa5bece 100644
--- a/autogpt/commands/web_selenium.py
+++ b/autogpt/commands/web_selenium.py
@@ -1,10 +1,16 @@
-"""Selenium web scraping module."""
+"""Commands for browsing a website"""
+
from __future__ import annotations
+from autogpt.llm.utils.token_counter import count_string_tokens
+
+COMMAND_CATEGORY = "web_browse"
+COMMAND_CATEGORY_TITLE = "Web Browsing"
+
import logging
from pathlib import Path
from sys import platform
-from typing import Optional, Type
+from typing import Optional
from bs4 import BeautifulSoup
from selenium.common.exceptions import WebDriverException
@@ -12,6 +18,7 @@ from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.chrome.service import Service as ChromeDriverService
from selenium.webdriver.chrome.webdriver import WebDriver as ChromeDriver
from selenium.webdriver.common.by import By
+from selenium.webdriver.common.options import ArgOptions as BrowserOptions
from selenium.webdriver.edge.options import Options as EdgeOptions
from selenium.webdriver.edge.service import Service as EdgeDriverService
from selenium.webdriver.edge.webdriver import WebDriver as EdgeDriver
@@ -34,9 +41,9 @@ from autogpt.memory.vector import MemoryItem, get_memory
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
from autogpt.url_utils.validators import validate_url
-BrowserOptions = ChromeOptions | EdgeOptions | FirefoxOptions | SafariOptions
-
FILE_DIR = Path(__file__).parent.parent
+TOKENS_TO_TRIGGER_SUMMARY = 50
+LINKS_TO_RETURN = 20
@command(
@@ -60,25 +67,30 @@ def browse_website(url: str, question: str, agent: Agent) -> str:
question (str): The question asked by the user
Returns:
- Tuple[str, WebDriver]: The answer and links to the user and the webdriver
+ str: The answer and links to the user and the webdriver
"""
+ driver = None
try:
driver, text = scrape_text_with_selenium(url, agent)
+ add_header(driver)
+ if TOKENS_TO_TRIGGER_SUMMARY < count_string_tokens(text, agent.llm.name):
+ text = summarize_memorize_webpage(url, text, question, agent, driver)
+
+ links = scrape_links_with_selenium(driver, url)
+
+ # Limit links to LINKS_TO_RETURN
+ if len(links) > LINKS_TO_RETURN:
+ links = links[:LINKS_TO_RETURN]
+
+ return f"Answer gathered from website: {text}\n\nLinks: {links}"
except WebDriverException as e:
# These errors are often quite long and include lots of context.
# Just grab the first line.
msg = e.msg.split("\n")[0]
return f"Error: {msg}"
-
- add_header(driver)
- summary = summarize_memorize_webpage(url, text, question, agent, driver)
- links = scrape_links_with_selenium(driver, url)
-
- # Limit links to 5
- if len(links) > 5:
- links = links[:5]
- close_browser(driver)
- return f"Answer gathered from website: {summary}\n\nLinks: {links}"
+ finally:
+ if driver:
+ close_browser(driver)
def scrape_text_with_selenium(url: str, agent: Agent) -> tuple[WebDriver, str]:
@@ -92,7 +104,7 @@ def scrape_text_with_selenium(url: str, agent: Agent) -> tuple[WebDriver, str]:
"""
logging.getLogger("selenium").setLevel(logging.CRITICAL)
- options_available: dict[str, Type[BrowserOptions]] = {
+ options_available: dict[str, BrowserOptions] = {
"chrome": ChromeOptions,
"edge": EdgeOptions,
"firefox": FirefoxOptions,
diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py
index b47740f6a..ce26e23dd 100644
--- a/autogpt/config/ai_config.py
+++ b/autogpt/config/ai_config.py
@@ -1,7 +1,4 @@
-# sourcery skip: do-not-use-staticmethod
-"""
-A module that contains the AIConfig class object that contains the configuration
-"""
+"""A module that contains the AIConfig class object that contains the configuration"""
from __future__ import annotations
import platform
@@ -15,6 +12,8 @@ if TYPE_CHECKING:
from autogpt.models.command_registry import CommandRegistry
from autogpt.prompts.generator import PromptGenerator
+ from .config import Config
+
class AIConfig:
"""
@@ -104,7 +103,7 @@ class AIConfig:
yaml.dump(config, file, allow_unicode=True)
def construct_full_prompt(
- self, config, prompt_generator: Optional[PromptGenerator] = None
+ self, config: Config, prompt_generator: Optional[PromptGenerator] = None
) -> str:
"""
Returns a prompt to the user with the class information in an organized fashion.
@@ -117,26 +116,27 @@ class AIConfig:
including the ai_name, ai_role, ai_goals, and api_budget.
"""
- prompt_start = (
- "Your decisions must always be made independently without"
- " seeking user assistance. Play to your strengths as an LLM and pursue"
- " simple strategies with no legal complications."
- ""
- )
-
from autogpt.prompts.prompt import build_default_prompt_generator
+ prompt_generator = prompt_generator or self.prompt_generator
if prompt_generator is None:
prompt_generator = build_default_prompt_generator(config)
- prompt_generator.goals = self.ai_goals
- prompt_generator.name = self.ai_name
- prompt_generator.role = self.ai_role
- prompt_generator.command_registry = self.command_registry
+ prompt_generator.command_registry = self.command_registry
+ self.prompt_generator = prompt_generator
+
for plugin in config.plugins:
if not plugin.can_handle_post_prompt():
continue
prompt_generator = plugin.post_prompt(prompt_generator)
+ # Construct full prompt
+ full_prompt_parts = [
+ f"You are {self.ai_name}, {self.ai_role.rstrip('.')}.",
+ "Your decisions must always be made independently without seeking "
+ "user assistance. Play to your strengths as an LLM and pursue "
+ "simple strategies with no legal complications.",
+ ]
+
if config.execute_local_commands:
# add OS info to prompt
os_name = platform.system()
@@ -146,14 +146,30 @@ class AIConfig:
else distro.name(pretty=True)
)
- prompt_start += f"\nThe OS you are running on is: {os_info}"
+ full_prompt_parts.append(f"The OS you are running on is: {os_info}")
- # Construct full prompt
- full_prompt = f"You are {prompt_generator.name}, {prompt_generator.role}\n{prompt_start}\n\nGOALS:\n\n"
- for i, goal in enumerate(self.ai_goals):
- full_prompt += f"{i+1}. {goal}\n"
+ additional_constraints: list[str] = []
if self.api_budget > 0.0:
- full_prompt += f"\nIt takes money to let you run. Your API budget is ${self.api_budget:.3f}"
- self.prompt_generator = prompt_generator
- full_prompt += f"\n\n{prompt_generator.generate_prompt_string(config)}"
- return full_prompt
+ additional_constraints.append(
+ f"It takes money to let you run. "
+ f"Your API budget is ${self.api_budget:.3f}"
+ )
+
+ full_prompt_parts.append(
+ prompt_generator.generate_prompt_string(
+ additional_constraints=additional_constraints
+ )
+ )
+
+ if self.ai_goals:
+ full_prompt_parts.append(
+ "\n".join(
+ [
+ "## Goals",
+ "For your task, you must fulfill the following goals:",
+ *[f"{i+1}. {goal}" for i, goal in enumerate(self.ai_goals)],
+ ]
+ )
+ )
+
+ return "\n\n".join(full_prompt_parts).strip("\n")
diff --git a/autogpt/config/config.py b/autogpt/config/config.py
index 8fba182c5..93fc42e91 100644
--- a/autogpt/config/config.py
+++ b/autogpt/config/config.py
@@ -13,6 +13,7 @@ from colorama import Fore
from pydantic import Field, validator
from autogpt.core.configuration.schema import Configurable, SystemSettings
+from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
from autogpt.plugins.plugins_config import PluginsConfig
AI_SETTINGS_FILE = "ai_settings.yaml"
@@ -51,10 +52,10 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
prompt_settings_file: str = PROMPT_SETTINGS_FILE
workdir: Path = None
workspace_path: Optional[Path] = None
- file_logger_path: Optional[str] = None
+ file_logger_path: Optional[Path] = None
# Model configuration
fast_llm: str = "gpt-3.5-turbo"
- smart_llm: str = "gpt-4"
+ smart_llm: str = "gpt-4-0314"
temperature: float = 0
openai_functions: bool = False
embedding_model: str = "text-embedding-ada-002"
@@ -147,6 +148,15 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
), f"Plugins must subclass AutoGPTPluginTemplate; {p} is a template instance"
return p
+ @validator("openai_functions")
+ def validate_openai_functions(cls, v: bool, values: dict[str, Any]):
+ if v:
+ smart_llm = values["smart_llm"]
+ assert OPEN_AI_CHAT_MODELS[smart_llm].supports_functions, (
+ f"Model {smart_llm} does not support OpenAI Functions. "
+ "Please disable OPENAI_FUNCTIONS or choose a suitable model."
+ )
+
def get_openai_credentials(self, model: str) -> dict[str, str]:
credentials = {
"api_key": self.openai_api_key,
diff --git a/autogpt/config/prompt_config.py b/autogpt/config/prompt_config.py
index 793bb4440..055e7897b 100644
--- a/autogpt/config/prompt_config.py
+++ b/autogpt/config/prompt_config.py
@@ -44,4 +44,4 @@ class PromptConfig:
self.constraints = config_params.get("constraints", [])
self.resources = config_params.get("resources", [])
- self.performance_evaluations = config_params.get("performance_evaluations", [])
+ self.best_practices = config_params.get("best_practices", [])
diff --git a/autogpt/core/ability/base.py b/autogpt/core/ability/base.py
index ac26f0267..1faaaf332 100644
--- a/autogpt/core/ability/base.py
+++ b/autogpt/core/ability/base.py
@@ -1,6 +1,6 @@
import abc
from pprint import pformat
-from typing import ClassVar
+from typing import Any, ClassVar
import inflection
from pydantic import Field
@@ -50,11 +50,11 @@ class Ability(abc.ABC):
return []
@abc.abstractmethod
- async def __call__(self, *args, **kwargs) -> AbilityResult:
+ async def __call__(self, *args: Any, **kwargs: Any) -> AbilityResult:
...
def __str__(self) -> str:
- return pformat(self.dump)
+ return pformat(self.dump())
def dump(self) -> dict:
return {
@@ -88,5 +88,5 @@ class AbilityRegistry(abc.ABC):
...
@abc.abstractmethod
- def perform(self, ability_name: str, **kwargs) -> AbilityResult:
+ async def perform(self, ability_name: str, **kwargs: Any) -> AbilityResult:
...
diff --git a/autogpt/core/planning/templates.py b/autogpt/core/planning/templates.py
index e28f2ed75..59792f656 100644
--- a/autogpt/core/planning/templates.py
+++ b/autogpt/core/planning/templates.py
@@ -17,7 +17,6 @@ ABILITIES = (
'analyze_code: Analyze Code, args: "code": "<full_code_string>"',
'execute_python_file: Execute Python File, args: "filename": "<filename>"',
'append_to_file: Append to file, args: "filename": "<filename>", "text": "<text>"',
- 'delete_file: Delete file, args: "filename": "<filename>"',
'list_files: List Files in Directory, args: "directory": "<directory>"',
'read_file: Read a file, args: "filename": "<filename>"',
'write_to_file: Write to file, args: "filename": "<filename>", "text": "<text>"',
diff --git a/autogpt/core/resource/model_providers/openai.py b/autogpt/core/resource/model_providers/openai.py
index 3707796a1..df7bdb83b 100644
--- a/autogpt/core/resource/model_providers/openai.py
+++ b/autogpt/core/resource/model_providers/openai.py
@@ -109,7 +109,7 @@ class OpenAIModelProviderBudget(ModelProviderBudget):
class OpenAISettings(ModelProviderSettings):
configuration: OpenAIConfiguration
- credentials: ModelProviderCredentials()
+ credentials: ModelProviderCredentials
budget: OpenAIModelProviderBudget
diff --git a/autogpt/core/runner/cli_web_app/cli.py b/autogpt/core/runner/cli_web_app/cli.py
index 6600b8e1d..e933739b2 100644
--- a/autogpt/core/runner/cli_web_app/cli.py
+++ b/autogpt/core/runner/cli_web_app/cli.py
@@ -1,19 +1,13 @@
-import contextlib
import pathlib
-import shlex
-import subprocess
-import sys
-import time
import click
-import requests
-import uvicorn
import yaml
+from agent_protocol import Agent as AgentProtocol
+from autogpt.core.runner.cli_web_app.server.api import task_handler
from autogpt.core.runner.client_lib.shared_click_commands import (
DEFAULT_SETTINGS_FILE,
make_settings,
- status,
)
from autogpt.core.runner.client_lib.utils import coroutine
@@ -25,34 +19,20 @@ def autogpt():
autogpt.add_command(make_settings)
-autogpt.add_command(status)
@autogpt.command()
@click.option(
- "host",
- "--host",
- default="localhost",
- help="The host for the webserver.",
- type=click.STRING,
-)
-@click.option(
"port",
"--port",
default=8080,
help="The port of the webserver.",
type=click.INT,
)
-def server(host: str, port: int) -> None:
+def server(port: int) -> None:
"""Run the Auto-GPT runner httpserver."""
click.echo("Running Auto-GPT runner httpserver...")
- uvicorn.run(
- "autogpt.core.runner.cli_web_app.server.api:app",
- workers=1,
- host=host,
- port=port,
- reload=True,
- )
+ AgentProtocol.handle_task(task_handler).start(port)
@autogpt.command()
@@ -69,32 +49,7 @@ async def client(settings_file) -> None:
if settings_file.exists():
settings = yaml.safe_load(settings_file.read_text())
- from autogpt.core.runner.cli_web_app.client.client import run
-
- with autogpt_server():
- run()
-
-
-@contextlib.contextmanager
-def autogpt_server():
- host = "localhost"
- port = 8080
- cmd = shlex.split(
- f"{sys.executable} autogpt/core/runner/cli_web_app/cli.py server --host {host} --port {port}"
- )
- server_process = subprocess.Popen(
- args=cmd,
- )
- started = False
-
- while not started:
- try:
- requests.get(f"http://{host}:{port}")
- started = True
- except requests.exceptions.ConnectionError:
- time.sleep(0.2)
- yield server_process
- server_process.terminate()
+ # TODO: Call the API server with the settings and task, using the Python API client for agent protocol.
if __name__ == "__main__":
diff --git a/autogpt/core/runner/cli_web_app/client/client.py b/autogpt/core/runner/cli_web_app/client/client.py
deleted file mode 100644
index 346203f7c..000000000
--- a/autogpt/core/runner/cli_web_app/client/client.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import json
-
-import requests
-
-
-def run():
- body = json.dumps(
- {"ai_name": "HelloBot", "ai_role": "test", "ai_goals": ["goal1", "goal2"]}
- )
-
- header = {"Content-Type": "application/json", "openai_api_key": "asdf"}
- print("Sending: ", header, body)
- response = requests.post(
- "http://localhost:8080/api/v1/agents", data=body, headers=header
- )
- print(response.content.decode("utf-8"))
diff --git a/autogpt/core/runner/cli_web_app/server/api.py b/autogpt/core/runner/cli_web_app/server/api.py
index 01c50b06d..7a5ae9a74 100644
--- a/autogpt/core/runner/cli_web_app/server/api.py
+++ b/autogpt/core/runner/cli_web_app/server/api.py
@@ -1,48 +1,104 @@
-import uuid
+from pathlib import Path
-from fastapi import APIRouter, FastAPI, Request
+from agent_protocol import StepHandler, StepResult
+from colorama import Fore
-from autogpt.core.runner.cli_web_app.server.schema import InteractRequestBody
+from autogpt.agents import Agent
+from autogpt.app.main import UserFeedback
+from autogpt.commands import COMMAND_CATEGORIES
+from autogpt.config import AIConfig, ConfigBuilder
+from autogpt.logs import logger
+from autogpt.memory.vector import get_memory
+from autogpt.models.command_registry import CommandRegistry
+from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
+from autogpt.workspace import Workspace
-router = APIRouter()
+PROJECT_DIR = Path().resolve()
-@router.post("/agents")
-async def create_agent(request: Request):
- """Create a new agent."""
- agent_id = uuid.uuid4().hex
- return {"agent_id": agent_id}
+async def task_handler(task_input) -> StepHandler:
+ task = task_input.__root__ if task_input else {}
+ agent = bootstrap_agent(task.get("user_input"), False)
+ next_command_name: str | None = None
+ next_command_args: dict[str, str] | None = None
-@router.post("/agents/{agent_id}")
-async def interact(request: Request, agent_id: str, body: InteractRequestBody):
- """Interact with an agent."""
+ async def step_handler(step_input) -> StepResult:
+ step = step_input.__root__ if step_input else {}
- # check headers
+ nonlocal next_command_name, next_command_args
- # check if agent_id exists
+ result = await interaction_step(
+ agent,
+ step.get("user_input"),
+ step.get("user_feedback"),
+ next_command_name,
+ next_command_args,
+ )
- # get agent object from somewhere, e.g. a database/disk/global dict
+ next_command_name = result["next_step_command_name"] if result else None
+ next_command_args = result["next_step_command_args"] if result else None
- # continue agent interaction with user input
+ if not result:
+ return StepResult(output=None, is_last=True)
+ return StepResult(output=result)
+
+ return step_handler
+
+
+async def interaction_step(
+ agent: Agent,
+ user_input,
+ user_feedback: UserFeedback | None,
+ command_name: str | None,
+ command_args: dict[str, str] | None,
+):
+ """Run one step of the interaction loop."""
+ if user_feedback == UserFeedback.EXIT:
+ return
+ if user_feedback == UserFeedback.TEXT:
+ command_name = "human_feedback"
+
+ result: str | None = None
+
+ if command_name is not None:
+ result = agent.execute(command_name, command_args, user_input)
+ if result is None:
+ logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
+ return
+
+ next_command_name, next_command_args, assistant_reply_dict = agent.think()
return {
- "thoughts": {
- "thoughts": {
- "text": "text",
- "reasoning": "reasoning",
- "plan": "plan",
- "criticism": "criticism",
- "speak": "speak",
- },
- "commands": {
- "name": "name",
- "args": {"arg_1": "value_1", "arg_2": "value_2"},
- },
- },
- "messages": ["message1", agent_id],
+ "config": agent.config,
+ "ai_config": agent.ai_config,
+ "result": result,
+ "assistant_reply_dict": assistant_reply_dict,
+ "next_step_command_name": next_command_name,
+ "next_step_command_args": next_command_args,
}
-app = FastAPI()
-app.include_router(router, prefix="/api/v1")
+def bootstrap_agent(task, continuous_mode) -> Agent:
+ config = ConfigBuilder.build_config_from_env(workdir=PROJECT_DIR)
+ config.debug_mode = True
+ config.continuous_mode = continuous_mode
+ config.temperature = 0
+ config.plain_output = True
+ command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)
+ config.memory_backend = "no_memory"
+ config.workspace_path = Workspace.init_workspace_directory(config)
+ config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)
+ ai_config = AIConfig(
+ ai_name="Auto-GPT",
+ ai_role="a multi-purpose AI assistant.",
+ ai_goals=[task],
+ )
+ ai_config.command_registry = command_registry
+ return Agent(
+ memory=get_memory(config),
+ command_registry=command_registry,
+ ai_config=ai_config,
+ config=config,
+ triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
+ )
diff --git a/autogpt/core/runner/cli_web_app/server/schema.py b/autogpt/core/runner/cli_web_app/server/schema.py
deleted file mode 100644
index 272fbc78e..000000000
--- a/autogpt/core/runner/cli_web_app/server/schema.py
+++ /dev/null
@@ -1,36 +0,0 @@
-from uuid import UUID
-
-from pydantic import BaseModel, validator
-
-
-class AgentInfo(BaseModel):
- id: UUID = None
- objective: str = ""
- name: str = ""
- role: str = ""
- goals: list[str] = []
-
-
-class AgentConfiguration(BaseModel):
- """Configuration for creation of a new agent."""
-
- # We'll want to get this schema from the configuration, so it needs to be dynamic.
- user_configuration: dict
- agent_goals: AgentInfo
-
- @validator("agent_goals")
- def only_objective_or_name_role_goals(cls, agent_goals):
- goals_specification = [agent_goals.name, agent_goals.role, agent_goals.goals]
- if agent_goals.objective and any(goals_specification):
- raise ValueError("Cannot specify both objective and name, role, or goals")
- if not agent_goals.objective and not all(goals_specification):
- raise ValueError("Must specify either objective or name, role, and goals")
-
-
-class InteractRequestBody(BaseModel):
- user_input: str = ""
-
-
-class InteractResponseBody(BaseModel):
- thoughts: dict[str, str] # TBD
- messages: list[str] # for example
diff --git a/autogpt/core/runner/cli_web_app/server/services/__init__.py b/autogpt/core/runner/cli_web_app/server/services/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/autogpt/core/runner/cli_web_app/server/services/__init__.py
+++ /dev/null
diff --git a/autogpt/core/runner/cli_web_app/server/services/users.py b/autogpt/core/runner/cli_web_app/server/services/users.py
deleted file mode 100644
index 5192dcdb2..000000000
--- a/autogpt/core/runner/cli_web_app/server/services/users.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import uuid
-
-from fastapi import Request
-
-
-class UserService:
- def __init__(self):
- self.users = {}
-
- def get_user_id(self, request: Request) -> uuid.UUID:
- # TODO: something real. I don't know how this works.
- hostname = request.client.host
- port = request.client.port
- user = f"{hostname}:{port}"
- if user not in self.users:
- self.users[user] = uuid.uuid4()
- return self.users[user]
-
-
-USER_SERVICE = UserService()
diff --git a/autogpt/llm/base.py b/autogpt/llm/base.py
index 14a146b3c..1ac00112d 100644
--- a/autogpt/llm/base.py
+++ b/autogpt/llm/base.py
@@ -67,6 +67,8 @@ class CompletionModelInfo(ModelInfo):
class ChatModelInfo(CompletionModelInfo):
"""Struct for chat model information."""
+ supports_functions: bool = False
+
@dataclass
class TextModelInfo(CompletionModelInfo):
diff --git a/autogpt/llm/providers/openai.py b/autogpt/llm/providers/openai.py
index 6e7461428..35c652f05 100644
--- a/autogpt/llm/providers/openai.py
+++ b/autogpt/llm/providers/openai.py
@@ -36,12 +36,14 @@ OPEN_AI_CHAT_MODELS = {
prompt_token_cost=0.0015,
completion_token_cost=0.002,
max_tokens=4096,
+ supports_functions=True,
),
ChatModelInfo(
name="gpt-3.5-turbo-16k-0613",
prompt_token_cost=0.003,
completion_token_cost=0.004,
max_tokens=16384,
+ supports_functions=True,
),
ChatModelInfo(
name="gpt-4-0314",
@@ -54,6 +56,7 @@ OPEN_AI_CHAT_MODELS = {
prompt_token_cost=0.03,
completion_token_cost=0.06,
max_tokens=8191,
+ supports_functions=True,
),
ChatModelInfo(
name="gpt-4-32k-0314",
@@ -66,6 +69,7 @@ OPEN_AI_CHAT_MODELS = {
prompt_token_cost=0.06,
completion_token_cost=0.12,
max_tokens=32768,
+ supports_functions=True,
),
]
}
diff --git a/autogpt/llm/utils/__init__.py b/autogpt/llm/utils/__init__.py
index e433476ec..5438bdd85 100644
--- a/autogpt/llm/utils/__init__.py
+++ b/autogpt/llm/utils/__init__.py
@@ -119,7 +119,9 @@ def create_chat_completion(
temperature = config.temperature
if max_tokens is None:
prompt_tlength = prompt.token_length
- max_tokens = OPEN_AI_CHAT_MODELS[model].max_tokens - prompt_tlength
+ max_tokens = (
+ OPEN_AI_CHAT_MODELS[model].max_tokens - prompt_tlength - 1
+ ) # the -1 is just here because we have a bug and we don't know how to fix it. When using gpt-4-0314 we get a token error.
logger.debug(f"Prompt length: {prompt_tlength} tokens")
if functions:
functions_tlength = count_openai_functions_tokens(functions, model)
@@ -154,6 +156,9 @@ def create_chat_completion(
function.schema for function in functions
]
+ # Print full prompt to debug log
+ logger.debug(prompt.dump())
+
response = iopenai.create_chat_completion(
messages=prompt.raw(),
**chat_completion_kwargs,
diff --git a/autogpt/models/command.py b/autogpt/models/command.py
index 614697861..a7cec509f 100644
--- a/autogpt/models/command.py
+++ b/autogpt/models/command.py
@@ -1,6 +1,9 @@
-from typing import Any, Callable, Optional
+from __future__ import annotations
-from autogpt.config import Config
+from typing import TYPE_CHECKING, Any, Callable, Optional
+
+if TYPE_CHECKING:
+ from autogpt.config import Config
from .command_parameter import CommandParameter
diff --git a/autogpt/models/command_registry.py b/autogpt/models/command_registry.py
index f54f4adb5..9dfb35bd3 100644
--- a/autogpt/models/command_registry.py
+++ b/autogpt/models/command_registry.py
@@ -1,6 +1,13 @@
+from __future__ import annotations
+
import importlib
import inspect
-from typing import Any
+from dataclasses import dataclass, field
+from types import ModuleType
+from typing import TYPE_CHECKING, Any
+
+if TYPE_CHECKING:
+ from autogpt.config import Config
from autogpt.command_decorator import AUTO_GPT_COMMAND_IDENTIFIER
from autogpt.logs import logger
@@ -18,9 +25,21 @@ class CommandRegistry:
commands: dict[str, Command]
commands_aliases: dict[str, Command]
+ # Alternative way to structure the registry; currently redundant with self.commands
+ categories: dict[str, CommandCategory]
+
+ @dataclass
+ class CommandCategory:
+ name: str
+ title: str
+ description: str
+ commands: list[Command] = field(default_factory=list[Command])
+ modules: list[ModuleType] = field(default_factory=list[ModuleType])
+
def __init__(self):
self.commands = {}
self.commands_aliases = {}
+ self.categories = {}
def __contains__(self, command_name: str):
return command_name in self.commands or command_name in self.commands_aliases
@@ -84,7 +103,41 @@ class CommandRegistry:
]
return "\n".join(commands_list)
- def import_commands(self, module_name: str) -> None:
+ @staticmethod
+ def with_command_modules(modules: list[str], config: Config) -> CommandRegistry:
+ new_registry = CommandRegistry()
+
+ logger.debug(
+ f"The following command categories are disabled: {config.disabled_command_categories}"
+ )
+ enabled_command_modules = [
+ x for x in modules if x not in config.disabled_command_categories
+ ]
+
+ logger.debug(
+ f"The following command categories are enabled: {enabled_command_modules}"
+ )
+
+ for command_module in enabled_command_modules:
+ new_registry.import_command_module(command_module)
+
+ # Unregister commands that are incompatible with the current config
+ incompatible_commands: list[Command] = []
+ for command in new_registry.commands.values():
+ if callable(command.enabled) and not command.enabled(config):
+ command.enabled = False
+ incompatible_commands.append(command)
+
+ for command in incompatible_commands:
+ new_registry.unregister(command)
+ logger.debug(
+ f"Unregistering incompatible command: {command.name}, "
+ f"reason - {command.disabled_reason or 'Disabled by current config.'}"
+ )
+
+ return new_registry
+
+ def import_command_module(self, module_name: str) -> None:
"""
Imports the specified Python module containing command plugins.
@@ -99,16 +152,42 @@ class CommandRegistry:
module = importlib.import_module(module_name)
+ category = self.register_module_category(module)
+
for attr_name in dir(module):
attr = getattr(module, attr_name)
+
+ command = None
+
# Register decorated functions
- if hasattr(attr, AUTO_GPT_COMMAND_IDENTIFIER) and getattr(
- attr, AUTO_GPT_COMMAND_IDENTIFIER
- ):
- self.register(attr.command)
+ if getattr(attr, AUTO_GPT_COMMAND_IDENTIFIER, False):
+ command = attr.command
+
# Register command classes
elif (
inspect.isclass(attr) and issubclass(attr, Command) and attr != Command
):
- cmd_instance = attr()
- self.register(cmd_instance)
+ command = attr()
+
+ if command:
+ self.register(command)
+ category.commands.append(command)
+
+ def register_module_category(self, module: ModuleType) -> CommandCategory:
+ if not (category_name := getattr(module, "COMMAND_CATEGORY", None)):
+ raise ValueError(f"Cannot import invalid command module {module.__name__}")
+
+ if category_name not in self.categories:
+ self.categories[category_name] = CommandRegistry.CommandCategory(
+ name=category_name,
+ title=getattr(
+ module, "COMMAND_CATEGORY_TITLE", category_name.capitalize()
+ ),
+ description=getattr(module, "__doc__", ""),
+ )
+
+ category = self.categories[category_name]
+ if module not in category.modules:
+ category.modules.append(module)
+
+ return category
diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py
index faaa50e00..dc245bb2a 100644
--- a/autogpt/processing/text.py
+++ b/autogpt/processing/text.py
@@ -10,7 +10,15 @@ from autogpt.llm.base import ChatSequence
from autogpt.llm.providers.openai import OPEN_AI_MODELS
from autogpt.llm.utils import count_string_tokens, create_chat_completion
from autogpt.logs import logger
-from autogpt.utils import batch
+
+
+def batch(iterable, max_batch_length: int, overlap: int = 0):
+ """Batch data from iterable into slices of length N. The last batch may be shorter."""
+ # batched('ABCDEFG', 3) --> ABC DEF G
+ if max_batch_length < 1:
+ raise ValueError("n must be at least one")
+ for i in range(0, len(iterable), max_batch_length - overlap):
+ yield iterable[i : i + max_batch_length]
def _max_chunk_length(model: str, max: Optional[int] = None) -> int:
diff --git a/autogpt/prompts/generator.py b/autogpt/prompts/generator.py
index bc836f30c..a8217953d 100644
--- a/autogpt/prompts/generator.py
+++ b/autogpt/prompts/generator.py
@@ -1,11 +1,8 @@
""" A module for generating custom prompt strings."""
from __future__ import annotations
-import json
-from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, TypedDict
-
-from autogpt.config import Config
-from autogpt.json_utils.utilities import llm_response_schema
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, Callable, Optional
if TYPE_CHECKING:
from autogpt.models.command_registry import CommandRegistry
@@ -17,34 +14,33 @@ class PromptGenerator:
resources, and performance evaluations.
"""
- class Command(TypedDict):
+ @dataclass
+ class Command:
label: str
name: str
params: dict[str, str]
function: Optional[Callable]
+ def __str__(self) -> str:
+ """Returns a string representation of the command."""
+ params_string = ", ".join(
+ f'"{key}": "{value}"' for key, value in self.params.items()
+ )
+ return f'{self.label}: "{self.name}", params: ({params_string})'
+
constraints: list[str]
commands: list[Command]
resources: list[str]
- performance_evaluation: list[str]
+ best_practices: list[str]
command_registry: CommandRegistry | None
- # TODO: replace with AIConfig
- name: str
- role: str
- goals: list[str]
-
def __init__(self):
self.constraints = []
self.commands = []
self.resources = []
- self.performance_evaluation = []
+ self.best_practices = []
self.command_registry = None
- self.name = "Bob"
- self.role = "AI"
- self.goals = []
-
def add_constraint(self, constraint: str) -> None:
"""
Add a constraint to the constraints list.
@@ -75,31 +71,15 @@ class PromptGenerator:
function (callable, optional): A callable function to be called when
the command is executed. Defaults to None.
"""
- command_params = {name: type for name, type in params.items()}
-
- command: PromptGenerator.Command = {
- "label": command_label,
- "name": command_name,
- "params": command_params,
- "function": function,
- }
- self.commands.append(command)
-
- def _generate_command_string(self, command: Dict[str, Any]) -> str:
- """
- Generate a formatted string representation of a command.
-
- Args:
- command (dict): A dictionary containing command information.
-
- Returns:
- str: The formatted command string.
- """
- params_string = ", ".join(
- f'"{key}": "{value}"' for key, value in command["params"].items()
+ self.commands.append(
+ PromptGenerator.Command(
+ label=command_label,
+ name=command_name,
+ params={name: type for name, type in params.items()},
+ function=function,
+ )
)
- return f'{command["label"]}: "{command["name"]}", params: {params_string}'
def add_resource(self, resource: str) -> None:
"""
@@ -110,71 +90,67 @@ class PromptGenerator:
"""
self.resources.append(resource)
- def add_performance_evaluation(self, evaluation: str) -> None:
+ def add_best_practice(self, best_practice: str) -> None:
"""
- Add a performance evaluation item to the performance_evaluation list.
+ Add an item to the list of best practices.
Args:
- evaluation (str): The evaluation item to be added.
+ best_practice (str): The best practice item to be added.
"""
- self.performance_evaluation.append(evaluation)
+ self.best_practices.append(best_practice)
- def _generate_numbered_list(self, items: List[Any], item_type="list") -> str:
+ def _generate_numbered_list(self, items: list[str], start_at: int = 1) -> str:
"""
- Generate a numbered list from given items based on the item_type.
+ Generate a numbered list containing the given items.
Args:
items (list): A list of items to be numbered.
- item_type (str, optional): The type of items in the list.
- Defaults to 'list'.
+ start_at (int, optional): The number to start the sequence with; defaults to 1.
Returns:
str: The formatted numbered list.
"""
- if item_type == "command":
- command_strings = []
- if self.command_registry:
- command_strings += [
- str(item)
- for item in self.command_registry.commands.values()
- if item.enabled
- ]
- # terminate command is added manually
- command_strings += [self._generate_command_string(item) for item in items]
- return "\n".join(f"{i+1}. {item}" for i, item in enumerate(command_strings))
- else:
- return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
-
- def generate_prompt_string(self, config: Config) -> str:
+ return "\n".join(f"{i}. {item}" for i, item in enumerate(items, start_at))
+
+ def generate_prompt_string(
+ self,
+ *,
+ additional_constraints: list[str] = [],
+ additional_resources: list[str] = [],
+ additional_best_practices: list[str] = [],
+ ) -> str:
"""
Generate a prompt string based on the constraints, commands, resources,
- and performance evaluations.
+ and best practices.
Returns:
str: The generated prompt string.
"""
+
return (
- f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n"
- f"{generate_commands(self, config)}"
- f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n"
- "Performance Evaluation:\n"
- f"{self._generate_numbered_list(self.performance_evaluation)}\n\n"
- "Respond with only valid JSON conforming to the following schema: \n"
- f"{json.dumps(llm_response_schema(config))}\n"
+ "## Constraints\n"
+ "You operate within the following constraints:\n"
+ f"{self._generate_numbered_list(self.constraints + additional_constraints)}\n\n"
+ "## Commands\n"
+ "You have access to the following commands:\n"
+ f"{self._generate_commands()}\n\n"
+ "## Resources\n"
+ "You can leverage access to the following resources:\n"
+ f"{self._generate_numbered_list(self.resources + additional_resources)}\n\n"
+ "## Best practices\n"
+ f"{self._generate_numbered_list(self.best_practices + additional_best_practices)}"
)
+ def _generate_commands(self) -> str:
+ command_strings = []
+ if self.command_registry:
+ command_strings += [
+ str(cmd)
+ for cmd in self.command_registry.commands.values()
+ if cmd.enabled
+ ]
-def generate_commands(self, config: Config) -> str:
- """
- Generate a prompt string based on the constraints, commands, resources,
- and performance evaluations.
+ # Add commands from plugins etc.
+ command_strings += [str(cmd) for cmd in self.commands]
- Returns:
- str: The generated prompt string.
- """
- if config.openai_functions:
- return ""
- return (
- "Commands:\n"
- f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n"
- )
+ return self._generate_numbered_list(command_strings)
diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py
index b64f11f59..627b6c50f 100644
--- a/autogpt/prompts/prompt.py
+++ b/autogpt/prompts/prompt.py
@@ -2,13 +2,17 @@ from autogpt.config.config import Config
from autogpt.config.prompt_config import PromptConfig
from autogpt.prompts.generator import PromptGenerator
-DEFAULT_TRIGGERING_PROMPT = "Determine exactly one command to use, and respond using the JSON schema specified previously:"
+DEFAULT_TRIGGERING_PROMPT = (
+ "Determine exactly one command to use based on the given goals "
+ "and the progress you have made so far, "
+ "and respond using the JSON schema specified previously:"
+)
def build_default_prompt_generator(config: Config) -> PromptGenerator:
"""
This function generates a prompt string that includes various constraints,
- commands, resources, and performance evaluations.
+ commands, resources, and best practices.
Returns:
str: The generated prompt string.
@@ -28,8 +32,8 @@ def build_default_prompt_generator(config: Config) -> PromptGenerator:
for resource in prompt_config.resources:
prompt_generator.add_resource(resource)
- # Add performance evaluations to the PromptGenerator object
- for performance_evaluation in prompt_config.performance_evaluations:
- prompt_generator.add_performance_evaluation(performance_evaluation)
+ # Add best practices to the PromptGenerator object
+ for best_practice in prompt_config.best_practices:
+ prompt_generator.add_best_practice(best_practice)
return prompt_generator
diff --git a/autogpt/utils.py b/autogpt/utils.py
index 28c4be517..f69fe50f8 100644
--- a/autogpt/utils.py
+++ b/autogpt/utils.py
@@ -1,70 +1,5 @@
-import os
-import re
-
-import requests
import yaml
-from colorama import Fore, Style
-from git.repo import Repo
-from prompt_toolkit import ANSI, PromptSession
-from prompt_toolkit.history import InMemoryHistory
-
-from autogpt.config import Config
-from autogpt.logs import logger
-
-session = PromptSession(history=InMemoryHistory())
-
-
-def batch(iterable, max_batch_length: int, overlap: int = 0):
- """Batch data from iterable into slices of length N. The last batch may be shorter."""
- # batched('ABCDEFG', 3) --> ABC DEF G
- if max_batch_length < 1:
- raise ValueError("n must be at least one")
- for i in range(0, len(iterable), max_batch_length - overlap):
- yield iterable[i : i + max_batch_length]
-
-
-def clean_input(config: Config, prompt: str = "", talk=False):
- try:
- if config.chat_messages_enabled:
- for plugin in config.plugins:
- if not hasattr(plugin, "can_handle_user_input"):
- continue
- if not plugin.can_handle_user_input(user_input=prompt):
- continue
- plugin_response = plugin.user_input(user_input=prompt)
- if not plugin_response:
- continue
- if plugin_response.lower() in [
- "yes",
- "yeah",
- "y",
- "ok",
- "okay",
- "sure",
- "alright",
- ]:
- return config.authorise_key
- elif plugin_response.lower() in [
- "no",
- "nope",
- "n",
- "negative",
- ]:
- return config.exit_key
- return plugin_response
-
- # ask for input, default when just pressing Enter is y
- logger.info("Asking user via keyboard...")
-
- # handle_sigint must be set to False, so the signal handler in the
- # autogpt/main.py could be employed properly. This referes to
- # https://github.com/Significant-Gravitas/Auto-GPT/pull/4799/files/3966cdfd694c2a80c0333823c3bc3da090f85ed3#r1264278776
- answer = session.prompt(ANSI(prompt), handle_sigint=False)
- return answer
- except KeyboardInterrupt:
- logger.info("You interrupted Auto-GPT")
- logger.info("Quitting...")
- exit(0)
+from colorama import Fore
def validate_yaml_file(file: str):
@@ -80,106 +15,3 @@ def validate_yaml_file(file: str):
)
return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!")
-
-
-def readable_file_size(size, decimal_places=2):
- """Converts the given size in bytes to a readable format.
- Args:
- size: Size in bytes
- decimal_places (int): Number of decimal places to display
- """
- for unit in ["B", "KB", "MB", "GB", "TB"]:
- if size < 1024.0:
- break
- size /= 1024.0
- return f"{size:.{decimal_places}f} {unit}"
-
-
-def get_bulletin_from_web():
- try:
- response = requests.get(
- "https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md"
- )
- if response.status_code == 200:
- return response.text
- except requests.exceptions.RequestException:
- pass
-
- return ""
-
-
-def get_current_git_branch() -> str:
- try:
- repo = Repo(search_parent_directories=True)
- branch = repo.active_branch
- return branch.name
- except:
- return ""
-
-
-def get_latest_bulletin() -> tuple[str, bool]:
- exists = os.path.exists("data/CURRENT_BULLETIN.md")
- current_bulletin = ""
- if exists:
- current_bulletin = open(
- "data/CURRENT_BULLETIN.md", "r", encoding="utf-8"
- ).read()
- new_bulletin = get_bulletin_from_web()
- is_new_news = new_bulletin != "" and new_bulletin != current_bulletin
-
- news_header = Fore.YELLOW + "Welcome to Auto-GPT!\n"
- if new_bulletin or current_bulletin:
- news_header += (
- "Below you'll find the latest Auto-GPT News and updates regarding features!\n"
- "If you don't wish to see this message, you "
- "can run Auto-GPT with the *--skip-news* flag.\n"
- )
-
- if new_bulletin and is_new_news:
- open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
- current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}"
-
- return f"{news_header}\n{current_bulletin}", is_new_news
-
-
-def markdown_to_ansi_style(markdown: str):
- ansi_lines: list[str] = []
- for line in markdown.split("\n"):
- line_style = ""
-
- if line.startswith("# "):
- line_style += Style.BRIGHT
- else:
- line = re.sub(
- r"(?<!\*)\*(\*?[^*]+\*?)\*(?!\*)",
- rf"{Style.BRIGHT}\1{Style.NORMAL}",
- line,
- )
-
- if re.match(r"^#+ ", line) is not None:
- line_style += Fore.CYAN
- line = re.sub(r"^#+ ", "", line)
-
- ansi_lines.append(f"{line_style}{line}{Style.RESET_ALL}")
- return "\n".join(ansi_lines)
-
-
-def get_legal_warning() -> str:
- legal_text = """
-## DISCLAIMER AND INDEMNIFICATION AGREEMENT
-### PLEASE READ THIS DISCLAIMER AND INDEMNIFICATION AGREEMENT CAREFULLY BEFORE USING THE AUTOGPT SYSTEM. BY USING THE AUTOGPT SYSTEM, YOU AGREE TO BE BOUND BY THIS AGREEMENT.
-
-## Introduction
-AutoGPT (the "System") is a project that connects a GPT-like artificial intelligence system to the internet and allows it to automate tasks. While the System is designed to be useful and efficient, there may be instances where the System could perform actions that may cause harm or have unintended consequences.
-
-## No Liability for Actions of the System
-The developers, contributors, and maintainers of the AutoGPT project (collectively, the "Project Parties") make no warranties or representations, express or implied, about the System's performance, accuracy, reliability, or safety. By using the System, you understand and agree that the Project Parties shall not be liable for any actions taken by the System or any consequences resulting from such actions.
-
-## User Responsibility and Respondeat Superior Liability
-As a user of the System, you are responsible for supervising and monitoring the actions of the System while it is operating on your
-behalf. You acknowledge that using the System could expose you to potential liability including but not limited to respondeat superior and you agree to assume all risks and liabilities associated with such potential liability.
-
-## Indemnification
-By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences.
- """
- return legal_text
diff --git a/autogpt/workspace/workspace.py b/autogpt/workspace/workspace.py
index 6e77c21ac..2176d4149 100644
--- a/autogpt/workspace/workspace.py
+++ b/autogpt/workspace/workspace.py
@@ -144,21 +144,24 @@ class Workspace:
return full_path
@staticmethod
- def set_file_logger_path(config: Config, workspace_directory: Path):
+ def build_file_logger_path(workspace_directory: Path) -> Path:
file_logger_path = workspace_directory / "file_logger.txt"
if not file_logger_path.exists():
with file_logger_path.open(mode="w", encoding="utf-8") as f:
f.write("File Operation Logger ")
- config.file_logger_path = str(file_logger_path)
+ return file_logger_path
@staticmethod
- def set_workspace_directory(
- config: Config, workspace_directory: Optional[str | Path] = None
- ) -> None:
- if workspace_directory is None:
- workspace_directory = config.workdir / "auto_gpt_workspace"
- elif type(workspace_directory) == str:
- workspace_directory = Path(workspace_directory)
+ def init_workspace_directory(
+ config: Config, override_workspace_path: Optional[str | Path] = None
+ ) -> Path:
+ if override_workspace_path is None:
+ workspace_path = config.workdir / "auto_gpt_workspace"
+ elif type(override_workspace_path) == str:
+ workspace_path = Path(override_workspace_path)
+ else:
+ workspace_path = override_workspace_path
+
# TODO: pass in the ai_settings file and the env file and have them cloned into
# the workspace directory so we can bind them to the agent.
- config.workspace_path = Workspace.make_workspace(workspace_directory)
+ return Workspace.make_workspace(workspace_path)
diff --git a/benchmarks.py b/benchmarks.py
index 04153f4b1..62f89662e 100644
--- a/benchmarks.py
+++ b/benchmarks.py
@@ -22,10 +22,10 @@ def bootstrap_agent(task):
config.continuous_mode = False
config.temperature = 0
config.plain_output = True
- command_registry = get_command_registry(config)
+ command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)
config.memory_backend = "no_memory"
- Workspace.set_workspace_directory(config)
- Workspace.set_file_logger_path(config, config.workspace_path)
+ config.workspace_path = Workspace.init_workspace_directory(config)
+ config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)
ai_config = AIConfig(
ai_name="Auto-GPT",
ai_role="a multi-purpose AI assistant.",
@@ -39,13 +39,3 @@ def bootstrap_agent(task):
config=config,
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
)
-
-
-def get_command_registry(config: Config):
- command_registry = CommandRegistry()
- enabled_command_categories = [
- x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories
- ]
- for command_category in enabled_command_categories:
- command_registry.import_commands(command_category)
- return command_registry
diff --git a/docs/_javascript/mathjax.js b/docs/_javascript/mathjax.js
new file mode 100644
index 000000000..a80ddbff7
--- /dev/null
+++ b/docs/_javascript/mathjax.js
@@ -0,0 +1,16 @@
+window.MathJax = {
+ tex: {
+ inlineMath: [["\\(", "\\)"]],
+ displayMath: [["\\[", "\\]"]],
+ processEscapes: true,
+ processEnvironments: true
+ },
+ options: {
+ ignoreHtmlClass: ".*|",
+ processHtmlClass: "arithmatex"
+ }
+};
+
+document$.subscribe(() => {
+ MathJax.typesetPromise()
+}) \ No newline at end of file
diff --git a/docs/_javascript/tablesort.js b/docs/_javascript/tablesort.js
new file mode 100644
index 000000000..ee04e9008
--- /dev/null
+++ b/docs/_javascript/tablesort.js
@@ -0,0 +1,6 @@
+document$.subscribe(function () {
+ var tables = document.querySelectorAll("article table:not([class])")
+ tables.forEach(function (table) {
+ new Tablesort(table)
+ })
+}) \ No newline at end of file
diff --git a/docs/challenges/building_challenges.md b/docs/challenges/building_challenges.md
index a4d0fa082..9caf5cdd2 100644
--- a/docs/challenges/building_challenges.md
+++ b/docs/challenges/building_challenges.md
@@ -59,7 +59,6 @@ def kubernetes_agent(
config=ai_config,
next_action_count=0,
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
- workspace_directory=workspace.root,
)
return agent
diff --git a/docs/challenges/introduction.md b/docs/challenges/introduction.md
index 63f570f26..256a82385 100644
--- a/docs/challenges/introduction.md
+++ b/docs/challenges/introduction.md
@@ -21,3 +21,15 @@ There are two main ways to get involved with challenges:
To learn more about submitting and beating challenges, please visit the [List of Challenges](list.md), [Submit a Challenge](submit.md), and [Beat a Challenge](beat.md) pages.
We look forward to your contributions and the exciting solutions that the community will develop together to make Auto-GPT even better!
+
+!!! warning
+
+ We're slowly transitioning to agbenchmark. agbenchmark is a simpler way to improve Auto-GPT. Simply run:
+
+ ```
+ agbenchmark start
+ ```
+
+ and beat as many challenges as possible.
+
+For more agbenchmark options, look at the [readme](https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks/tree/master/agbenchmark).
diff --git a/docs/challenges/memory/challenge_b.md b/docs/challenges/memory/challenge_b.md
index abc6da6be..7df7044ff 100644
--- a/docs/challenges/memory/challenge_b.md
+++ b/docs/challenges/memory/challenge_b.md
@@ -3,9 +3,10 @@
**Status**: Current level to beat: level 3
**Command to try**:
-```
+
+```shell
pytest -s tests/challenges/memory/test_memory_challenge_b.py --level=3
-``
+```
## Description
diff --git a/docs/challenges/memory/challenge_c.md b/docs/challenges/memory/challenge_c.md
index e197ddbd2..955c823f3 100644
--- a/docs/challenges/memory/challenge_c.md
+++ b/docs/challenges/memory/challenge_c.md
@@ -3,9 +3,10 @@
**Status**: Current level to beat: level 1
**Command to try**:
-```
+
+```shell
pytest -s tests/challenges/memory/test_memory_challenge_c.py --level=2
-``
+```
## Description
@@ -20,13 +21,23 @@ The challenge involves a series of text files containing instructions and silly
- `instructions_1.txt`
-The current phrase is "The purple elephant danced on a rainbow while eating a taco.".\n[NOISE intended to confuse the agent]
-Read the file instructions_2.txt using the read_file command.
+The current phrase is
+
+```
+The purple elephant danced on a rainbow while eating a taco.\n[NOISE intended to confuse the agent]
+```
+
+Read the file `instructions_2.txt` using the read_file command.
- `instructions_2.txt`
-The current phrase is "The sneaky toaster stole my socks and ran away to Hawaii.".\n[NOISE intended to confuse the agent]
+The current phrase is
+
+```
+The sneaky toaster stole my socks and ran away to Hawaii.\n[NOISE intended to confuse the agent]
+```
+
Read the file instructions_3.txt using the read_file command.
@@ -34,7 +45,12 @@ Read the file instructions_3.txt using the read_file command.
- `instructions_n.txt`
-The current phrase is "My pet rock sings better than Beyoncé on Tuesdays."
+The current phrase is
+
+```
+My pet rock sings better than Beyoncé on Tuesdays.
+```
+
Write all the phrases into the file output.txt. The file has not been created yet. After that, use the task_complete command.
diff --git a/docs/challenges/memory/challenge_d.md b/docs/challenges/memory/challenge_d.md
index 7563cce59..5ecec045e 100644
--- a/docs/challenges/memory/challenge_d.md
+++ b/docs/challenges/memory/challenge_d.md
@@ -1,11 +1,12 @@
-# Memory Challenge C
+# Memory Challenge D
**Status**: Current level to beat: level 1
**Command to try**:
-```
+
+```shell
pytest -s tests/challenges/memory/test_memory_challenge_d.py --level=1
-``
+```
## Description
@@ -30,13 +31,16 @@ The test runs for levels up to the maximum level that the AI has successfully be
- `instructions_1.txt`
-"Sally has a marble (marble A) and she puts it in her basket (basket S), then leaves the room. Anne moves marble A from Sally's basket (basket S) to her own basket (basket A).",
+```
+Sally has a marble (marble A) and she puts it in her basket (basket S), then leaves the room. Anne moves marble A from Sally's basket (basket S) to her own basket (basket A).
+```
- `instructions_2.txt`
-"Sally gives a new marble (marble B) to Bob who is outside with her. Bob goes into the room and places marble B into Anne's basket (basket A). Anne tells Bob to tell Sally that he lost the marble b. Bob leaves the room and speaks to Sally about the marble B. Meanwhile, after Bob left the room, Anne moves marble A into the green box, but tells Charlie to tell Sally that marble A is under the sofa. Charlie leaves the room and speak to Sally about the marble A as instructed by Anne.",
-
+```
+Sally gives a new marble (marble B) to Bob who is outside with her. Bob goes into the room and places marble B into Anne's basket (basket A). Anne tells Bob to tell Sally that he lost the marble b. Bob leaves the room and speaks to Sally about the marble B. Meanwhile, after Bob left the room, Anne moves marble A into the green box, but tells Charlie to tell Sally that marble A is under the sofa. Charlie leaves the room and speak to Sally about the marble A as instructed by Anne.
+```
...and so on.
@@ -44,6 +48,7 @@ The test runs for levels up to the maximum level that the AI has successfully be
The expected believes of every characters are given in a list:
+```json
expected_beliefs = {
1: {
'Sally': {
@@ -68,7 +73,7 @@ expected_beliefs = {
'A': 'sofa', # Because Anne told him to tell Sally so
}
},...
-
+```
## Objective
diff --git a/docs/configuration/imagegen.md b/docs/configuration/imagegen.md
index 38fdcebb2..1a10d61d2 100644
--- a/docs/configuration/imagegen.md
+++ b/docs/configuration/imagegen.md
@@ -7,7 +7,8 @@
## DALL-e
In `.env`, make sure `IMAGE_PROVIDER` is commented (or set to `dalle`):
-``` ini
+
+```ini
# IMAGE_PROVIDER=dalle # this is the default
```
@@ -23,7 +24,8 @@ To use text-to-image models from Hugging Face, you need a Hugging Face API token
Link to the appropriate settings page: [Hugging Face > Settings > Tokens](https://huggingface.co/settings/tokens)
Once you have an API token, uncomment and adjust these variables in your `.env`:
-``` ini
+
+```ini
IMAGE_PROVIDER=huggingface
HUGGINGFACE_API_TOKEN=your-huggingface-api-token
```
@@ -39,7 +41,8 @@ Further optional configuration:
## Stable Diffusion WebUI
It is possible to use your own self-hosted Stable Diffusion WebUI with Auto-GPT:
-``` ini
+
+```ini
IMAGE_PROVIDER=sdwebui
```
@@ -54,6 +57,7 @@ Further optional configuration:
| `SD_WEBUI_AUTH` | `{username}:{password}` | *Note: do not copy the braces!* |
## Selenium
-``` shell
+
+```shell
sudo Xvfb :10 -ac -screen 0 1024x768x24 & DISPLAY=:10 <YOUR_CLIENT>
```
diff --git a/docs/configuration/memory.md b/docs/configuration/memory.md
index 9d18f5ba2..1a5e716ab 100644
--- a/docs/configuration/memory.md
+++ b/docs/configuration/memory.md
@@ -51,17 +51,19 @@ Links to memory backends
1. Launch Redis container
- :::shell
- docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest
+ ```shell
+ docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest
+ ```
3. Set the following settings in `.env`
- :::ini
- MEMORY_BACKEND=redis
- REDIS_HOST=localhost
- REDIS_PORT=6379
- REDIS_PASSWORD=<PASSWORD>
-
+ ```shell
+ MEMORY_BACKEND=redis
+ REDIS_HOST=localhost
+ REDIS_PORT=6379
+ REDIS_PASSWORD=<PASSWORD>
+ ```
+
Replace `<PASSWORD>` by your password, omitting the angled brackets (<>).
Optional configuration:
@@ -157,7 +159,7 @@ To enable it, set `USE_WEAVIATE_EMBEDDED` to `True` and make sure you `pip insta
Install the Weaviate client before usage.
-``` shell
+```shell
$ pip install weaviate-client
```
@@ -165,7 +167,7 @@ $ pip install weaviate-client
In your `.env` file set the following:
-``` ini
+```ini
MEMORY_BACKEND=weaviate
WEAVIATE_HOST="127.0.0.1" # the IP or domain of the running Weaviate instance
WEAVIATE_PORT="8080"
@@ -186,7 +188,7 @@ View memory usage by using the `--debug` flag :)
## 🧠 Memory pre-seeding
!!! warning
- Data ingestion is broken in v0.4.6 and possibly earlier versions. This is a known issue that will be addressed in future releases. Follow these issues for updates.
+ Data ingestion is broken in v0.4.7 and possibly earlier versions. This is a known issue that will be addressed in future releases. Follow these issues for updates.
[Issue 4435](https://github.com/Significant-Gravitas/Auto-GPT/issues/4435)
[Issue 4024](https://github.com/Significant-Gravitas/Auto-GPT/issues/4024)
[Issue 2076](https://github.com/Significant-Gravitas/Auto-GPT/issues/2076)
@@ -195,7 +197,7 @@ View memory usage by using the `--debug` flag :)
Memory pre-seeding allows you to ingest files into memory and pre-seed it before running Auto-GPT.
-``` shell
+```shell
$ python data_ingestion.py -h
usage: data_ingestion.py [-h] (--file FILE | --dir DIR) [--init] [--overlap OVERLAP] [--max_length MAX_LENGTH]
diff --git a/docs/configuration/voice.md b/docs/configuration/voice.md
index 728fbaf5f..654d2ee45 100644
--- a/docs/configuration/voice.md
+++ b/docs/configuration/voice.md
@@ -2,7 +2,7 @@
Enter this command to use TTS _(Text-to-Speech)_ for Auto-GPT
-``` shell
+```shell
python -m autogpt --speak
```
diff --git a/docs/imgs/Auto_GPT_Logo.png b/docs/imgs/Auto_GPT_Logo.png
new file mode 100644
index 000000000..9c60eea98
--- /dev/null
+++ b/docs/imgs/Auto_GPT_Logo.png
Binary files differ
diff --git a/docs/setup.md b/docs/setup.md
index d0079e0f0..bd2f142e0 100644
--- a/docs/setup.md
+++ b/docs/setup.md
@@ -36,40 +36,43 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt
1. Make sure you have Docker installed, see [requirements](#requirements)
2. Create a project directory for Auto-GPT
- :::shell
- mkdir Auto-GPT
- cd Auto-GPT
+ ```shell
+ mkdir Auto-GPT
+ cd Auto-GPT
+ ```
3. In the project directory, create a file called `docker-compose.yml` with the following contents:
- :::yaml
- version: "3.9"
- services:
- auto-gpt:
- image: significantgravitas/auto-gpt
- env_file:
- - .env
- profiles: ["exclude-from-up"]
- volumes:
- - ./auto_gpt_workspace:/app/auto_gpt_workspace
- - ./data:/app/data
- ## allow auto-gpt to write logs to disk
- - ./logs:/app/logs
- ## uncomment following lines if you want to make use of these files
- ## you must have them existing in the same folder as this docker-compose.yml
- #- type: bind
- # source: ./azure.yaml
- # target: /app/azure.yaml
- #- type: bind
- # source: ./ai_settings.yaml
- # target: /app/ai_settings.yaml
+ ```yaml
+ version: "3.9"
+ services:
+ auto-gpt:
+ image: significantgravitas/auto-gpt
+ env_file:
+ - .env
+ profiles: ["exclude-from-up"]
+ volumes:
+ - ./auto_gpt_workspace:/app/auto_gpt_workspace
+ - ./data:/app/data
+ ## allow auto-gpt to write logs to disk
+ - ./logs:/app/logs
+ ## uncomment following lines if you want to make use of these files
+ ## you must have them existing in the same folder as this docker-compose.yml
+ #- type: bind
+ # source: ./azure.yaml
+ # target: /app/azure.yaml
+ #- type: bind
+ # source: ./ai_settings.yaml
+ # target: /app/ai_settings.yaml
+ ```
4. Create the necessary [configuration](#configuration) files. If needed, you can find
templates in the [repository].
5. Pull the latest image from [Docker Hub]
- :::shell
- docker pull significantgravitas/auto-gpt
+ ```shell
+ docker pull significantgravitas/auto-gpt
+ ```
6. Continue to [Run with Docker](#run-with-docker)
@@ -92,14 +95,15 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt
1. Clone the repository
- :::shell
- git clone -b stable https://github.com/Significant-Gravitas/Auto-GPT.git
+ ```shell
+ git clone -b stable https://github.com/Significant-Gravitas/Auto-GPT.git
+ ```
2. Navigate to the directory where you downloaded the repository
- :::shell
- cd Auto-GPT
-
+ ```shell
+ cd Auto-GPT
+ ```
### Set up without Git/Docker
@@ -139,12 +143,13 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt
Example:
- :::yaml
- # Please specify all of these values as double-quoted strings
- # Replace string in angled brackets (<>) to your own deployment Name
- azure_model_map:
- fast_llm_deployment_id: "<auto-gpt-deployment>"
- ...
+ ```yaml
+ # Please specify all of these values as double-quoted strings
+ # Replace string in angled brackets (<>) to your own deployment Name
+ azure_model_map:
+ fast_llm_deployment_id: "<auto-gpt-deployment>"
+ ...
+ ```
Details can be found in the [openai-python docs], and in the [Azure OpenAI docs] for the embedding model.
If you're on Windows you may need to install an [MSVC library](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170).
@@ -164,7 +169,9 @@ Easiest is to use `docker compose`.
Important: Docker Compose version 1.29.0 or later is required to use version 3.9 of the Compose file format.
You can check the version of Docker Compose installed on your system by running the following command:
- docker compose version
+```shell
+docker compose version
+```
This will display the version of Docker Compose that is currently installed on your system.
@@ -174,13 +181,15 @@ Once you have a recent version of Docker Compose, run the commands below in your
1. Build the image. If you have pulled the image from Docker Hub, skip this step (NOTE: You *will* need to do this if you are modifying requirements.txt to add/remove dependencies like Python libs/frameworks)
- :::shell
- docker compose build auto-gpt
-
+ ```shell
+ docker compose build auto-gpt
+ ```
+
2. Run Auto-GPT
- :::shell
- docker compose run --rm auto-gpt
+ ```shell
+ docker compose run --rm auto-gpt
+ ```
By default, this will also start and attach a Redis memory backend. If you do not
want this, comment or remove the `depends: - redis` and `redis:` sections from
@@ -189,12 +198,14 @@ Once you have a recent version of Docker Compose, run the commands below in your
For related settings, see [Memory > Redis setup](./configuration/memory.md#redis-setup).
You can pass extra arguments, e.g. running with `--gpt3only` and `--continuous`:
-``` shell
+
+```shell
docker compose run --rm auto-gpt --gpt3only --continuous
```
If you dare, you can also build and run it with "vanilla" docker commands:
-``` shell
+
+```shell
docker build -t auto-gpt .
docker run -it --env-file=.env -v $PWD:/app auto-gpt
docker run -it --env-file=.env -v $PWD:/app --rm auto-gpt --gpt3only --continuous
@@ -218,7 +229,7 @@ docker run -it --env-file=.env -v $PWD:/app --rm auto-gpt --gpt3only --continuou
Create a virtual environment to run in.
-``` shell
+```shell
python -m venv venvAutoGPT
source venvAutoGPT/bin/activate
pip3 install --upgrade pip
@@ -232,13 +243,15 @@ packages and launch Auto-GPT.
- On Linux/MacOS:
- :::shell
- ./run.sh
+ ```shell
+ ./run.sh
+ ```
- On Windows:
- :::shell
- .\run.bat
+ ```shell
+ .\run.bat
+ ```
If this gives errors, make sure you have a compatible Python version installed. See also
the [requirements](./installation.md#requirements).
diff --git a/docs/share-your-logs.md b/docs/share-your-logs.md
index f673e375c..ebcce8393 100644
--- a/docs/share-your-logs.md
+++ b/docs/share-your-logs.md
@@ -8,7 +8,7 @@ Activity, Error, and Debug logs are located in `./logs`
To print out debug logs:
-``` shell
+```shell
./run.sh --debug # on Linux / macOS
.\run.bat --debug # on Windows
diff --git a/docs/testing.md b/docs/testing.md
index 9a1735966..ef8176abf 100644
--- a/docs/testing.md
+++ b/docs/testing.md
@@ -2,12 +2,13 @@
To run all tests, use the following command:
-``` shell
+```shell
pytest
```
If `pytest` is not found:
-``` shell
+
+```shell
python -m pytest
```
@@ -15,18 +16,21 @@ python -m pytest
- To run without integration tests:
- :::shell
- pytest --without-integration
+```shell
+pytest --without-integration
+```
- To run without *slow* integration tests:
- :::shell
- pytest --without-slow-integration
+```shell
+pytest --without-slow-integration
+```
- To run tests and see coverage:
- :::shell
- pytest --cov=autogpt --without-integration --without-slow-integration
+```shell
+pytest --cov=autogpt --without-integration --without-slow-integration
+```
## Running the linter
@@ -36,11 +40,12 @@ See the [flake8 rules](https://www.flake8rules.com/) for more information.
To run the linter:
-``` shell
+```shell
flake8 .
```
Or:
-``` shell
+
+```shell
python -m flake8 .
```
diff --git a/docs/usage.md b/docs/usage.md
index cb74ef7f6..f280bc8f5 100644
--- a/docs/usage.md
+++ b/docs/usage.md
@@ -3,7 +3,7 @@
## Command Line Arguments
Running with `--help` lists all the possible command line arguments you can pass:
-``` shell
+```shell
./run.sh --help # on Linux / macOS
.\run.bat --help # on Windows
@@ -13,9 +13,10 @@ Running with `--help` lists all the possible command line arguments you can pass
For use with Docker, replace the script in the examples with
`docker compose run --rm auto-gpt`:
- :::shell
- docker compose run --rm auto-gpt --help
- docker compose run --rm auto-gpt --ai-settings <filename>
+ ```shell
+ docker compose run --rm auto-gpt --help
+ docker compose run --rm auto-gpt --ai-settings <filename>
+ ```
!!! note
Replace anything in angled brackets (<>) to a value you want to specify
@@ -23,18 +24,22 @@ Running with `--help` lists all the possible command line arguments you can pass
Here are some common arguments you can use when running Auto-GPT:
* Run Auto-GPT with a different AI Settings file
- ``` shell
- ./run.sh --ai-settings <filename>
- ```
+
+```shell
+./run.sh --ai-settings <filename>
+```
+
* Run Auto-GPT with a different Prompt Settings file
- ``` shell
- ./run.sh --prompt-settings <filename>
- ```
-* Specify a memory backend
- :::shell
- ./run.sh --use-memory <memory-backend>
+```shell
+./run.sh --prompt-settings <filename>
+```
+
+* Specify a memory backend
+```shell
+./run.sh --use-memory <memory-backend>
+```
!!! note
There are shorthands for some of these flags, for example `-m` for `--use-memory`.
@@ -44,7 +49,7 @@ Here are some common arguments you can use when running Auto-GPT:
Enter this command to use TTS _(Text-to-Speech)_ for Auto-GPT
-``` shell
+```shell
./run.sh --speak
```
@@ -55,9 +60,10 @@ Continuous mode is NOT recommended.
It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorize.
Use at your own risk.
-``` shell
+```shell
./run.sh --continuous
```
+
To exit the program, press ++ctrl+c++
### ♻️ Self-Feedback Mode ⚠️
@@ -68,7 +74,7 @@ Running Self-Feedback will **INCREASE** token use and thus cost more. This featu
If you don't have access to GPT-4, this mode allows you to use Auto-GPT!
-``` shell
+```shell
./run.sh --gpt3only
```
@@ -79,7 +85,7 @@ You can achieve the same by setting `SMART_LLM` in `.env` to `gpt-3.5-turbo`.
If you have access to GPT-4, this mode allows you to use Auto-GPT solely with GPT-4.
This may give your bot increased intelligence.
-``` shell
+```shell
./run.sh --gpt4only
```
@@ -97,7 +103,7 @@ Activity, Error, and Debug logs are located in `./logs`
To print out debug logs:
-``` shell
+```shell
./run.sh --debug # on Linux / macOS
.\run.bat --debug # on Windows
diff --git a/mkdocs.yml b/mkdocs.yml
index 2265a63fa..0a9bb9e12 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -7,39 +7,110 @@ nav:
- Usage: usage.md
- Plugins: plugins.md
- Configuration:
- - Options: configuration/options.md
- - Search: configuration/search.md
- - Memory: configuration/memory.md
- - Voice: configuration/voice.md
- - Image Generation: configuration/imagegen.md
+ - Options: configuration/options.md
+ - Search: configuration/search.md
+ - Memory: configuration/memory.md
+ - Voice: configuration/voice.md
+ - Image Generation: configuration/imagegen.md
- Help us improve Auto-GPT:
- - Share your debug logs with us: share-your-logs.md
- - Contribution guide: contributing.md
- - Running tests: testing.md
- - Code of Conduct: code-of-conduct.md
+ - Share your debug logs with us: share-your-logs.md
+ - Contribution guide: contributing.md
+ - Running tests: testing.md
+ - Code of Conduct: code-of-conduct.md
- Challenges:
- - Introduction: challenges/introduction.md
- - List of Challenges:
- - Memory:
- - Introduction: challenges/memory/introduction.md
- - Memory Challenge A: challenges/memory/challenge_a.md
- - Memory Challenge B: challenges/memory/challenge_b.md
- - Memory Challenge C: challenges/memory/challenge_c.md
- - Memory Challenge D: challenges/memory/challenge_d.md
- - Information retrieval:
- - Introduction: challenges/information_retrieval/introduction.md
- - Information Retrieval Challenge A: challenges/information_retrieval/challenge_a.md
- - Information Retrieval Challenge B: challenges/information_retrieval/challenge_b.md
+ - Introduction: challenges/introduction.md
+ - List of Challenges:
+ - Memory:
+ - Introduction: challenges/memory/introduction.md
+ - Memory Challenge A: challenges/memory/challenge_a.md
+ - Memory Challenge B: challenges/memory/challenge_b.md
+ - Memory Challenge C: challenges/memory/challenge_c.md
+ - Memory Challenge D: challenges/memory/challenge_d.md
+ - Information retrieval:
+ - Introduction: challenges/information_retrieval/introduction.md
+ - Information Retrieval Challenge A: challenges/information_retrieval/challenge_a.md
+ - Information Retrieval Challenge B: challenges/information_retrieval/challenge_b.md
- Submit a Challenge: challenges/submit.md
- Beat a Challenge: challenges/beat.md
- License: https://github.com/Significant-Gravitas/Auto-GPT/blob/master/LICENSE
-theme: readthedocs
+theme:
+ name: material
+ icon:
+ logo: material/book-open-variant
+ favicon: imgs/Auto_GPT_Logo.png
+ features:
+ - navigation.sections
+ - toc.follow
+ - navigation.top
+ - content.code.copy
+ palette:
+ # Palette toggle for light mode
+ - media: "(prefers-color-scheme: light)"
+ scheme: default
+ toggle:
+ icon: material/weather-night
+ name: Switch to dark mode
+
+ # Palette toggle for dark mode
+ - media: "(prefers-color-scheme: dark)"
+ scheme: slate
+ toggle:
+ icon: material/weather-sunny
+ name: Switch to light mode
markdown_extensions:
- admonition:
- codehilite:
- pymdownx.keys:
+ # Python Markdown
+ - abbr
+ - admonition
+ - attr_list
+ - def_list
+ - footnotes
+ - md_in_html
+ - toc:
+ permalink: true
+ - tables
+
+ # Python Markdown Extensions
+ - pymdownx.arithmatex:
+ generic: true
+ - pymdownx.betterem:
+ smart_enable: all
+ - pymdownx.critic
+ - pymdownx.caret
+ - pymdownx.details
+ - pymdownx.emoji:
+ emoji_index: !!python/name:materialx.emoji.twemoji
+ emoji_generator: !!python/name:materialx.emoji.to_svg
+ - pymdownx.highlight
+ - pymdownx.inlinehilite
+ - pymdownx.keys
+ - pymdownx.mark
+ - pymdownx.smartsymbols
+ - pymdownx.snippets:
+ auto_append:
+ - includes/abbreviations.md
+ - pymdownx.superfences:
+ custom_fences:
+ - name: mermaid
+ class: mermaid
+ format: !!python/name:pymdownx.superfences.fence_code_format
+ - pymdownx.tabbed:
+ alternate_style: true
+ - pymdownx.tasklist:
+ custom_checkbox: true
+ - pymdownx.tilde
+
+plugins:
+ - table-reader
+ - search
+
+extra_javascript:
+ - https://unpkg.com/tablesort@5.3.0/dist/tablesort.min.js
+ - _javascript/tablesort.js
+ - _javascript/mathjax.js
+ - https://polyfill.io/v3/polyfill.min.js?features=es6
+ - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js
diff --git a/prompt_settings.yaml b/prompt_settings.yaml
index 342d67b9e..a83ca6225 100644
--- a/prompt_settings.yaml
+++ b/prompt_settings.yaml
@@ -7,9 +7,10 @@ constraints: [
resources: [
'Internet access for searches and information gathering.',
'Long Term memory management.',
- 'File output.'
+ 'File output.',
+ 'Command execution'
]
-performance_evaluations: [
+best_practices: [
'Continuously review and analyze your actions to ensure you are performing to the best of your abilities.',
'Constructively self-criticize your big-picture behavior constantly.',
'Reflect on past decisions and strategies to refine your approach.',
diff --git a/pyproject.toml b/pyproject.toml
index da0fcdd68..ede3e62da 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
[project]
name = "agpt"
-version = "0.4.6"
+version = "0.4.7"
authors = [
{ name="Torantulino", email="support@agpt.co" },
]
diff --git a/requirements.txt b/requirements.txt
index 4af8bccd9..5dc87ff62 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,9 +1,9 @@
beautifulsoup4>=4.12.2
colorama==0.4.6
distro==1.8.0
-openai==0.27.2
+openai==0.27.8
playsound==1.2.2
-python-dotenv==1.0.0
+python-dotenv==0.21
pyyaml==6.0
PyPDF2
python-docx
@@ -31,6 +31,8 @@ en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_
prompt_toolkit>=3.0.38
pydantic
inflection
+agbenchmark
+agent-protocol>=0.1.1
# web server
fastapi
@@ -46,6 +48,8 @@ isort
gitpython==3.1.31
auto-gpt-plugin-template @ git+https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template@0.1.0
mkdocs
+mkdocs-material
+mkdocs-table-reader-plugin
pymdown-extensions
mypy
types-Markdown
diff --git a/tests/Auto-GPT-test-cassettes b/tests/Auto-GPT-test-cassettes
-Subproject 47e262905edc1380bc0539fd298fd94d99667e8
+Subproject 0e4b46dc515585902eaae068dcbc3f182dd263b
diff --git a/tests/challenges/utils.py b/tests/challenges/utils.py
index 9d1b76e7f..67d7425c8 100644
--- a/tests/challenges/utils.py
+++ b/tests/challenges/utils.py
@@ -6,9 +6,9 @@ from typing import Any, Generator
import pytest
+from agbenchmark.benchmarks import run_specific_agent
from autogpt.logs import LogCycleHandler
from autogpt.workspace import Workspace
-from benchmarks import run_task
from tests.challenges.schema import Task
@@ -38,7 +38,9 @@ def setup_mock_input(monkeypatch: pytest.MonkeyPatch, cycle_count: int) -> None:
yield from input_sequence
gen = input_generator()
- monkeypatch.setattr("autogpt.utils.session.prompt", lambda _, **kwargs: next(gen))
+ monkeypatch.setattr(
+ "autogpt.app.utils.session.prompt", lambda _, **kwargs: next(gen)
+ )
def setup_mock_log_cycle_agent_name(
@@ -75,4 +77,4 @@ def run_challenge(
setup_mock_log_cycle_agent_name(monkeypatch, challenge_name, level_to_run)
task = Task(user_input=user_input)
with contextlib.suppress(SystemExit):
- run_task(task)
+ run_specific_agent(task.user_input)
diff --git a/tests/conftest.py b/tests/conftest.py
index c3076d545..cfcebcb7f 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -52,7 +52,7 @@ def config(
if not os.environ.get("OPENAI_API_KEY"):
os.environ["OPENAI_API_KEY"] = "sk-dummy"
- Workspace.set_workspace_directory(config, workspace.root)
+ config.workspace_path = workspace.root
# HACK: this is necessary to ensure PLAIN_OUTPUT takes effect
logger.config = config
diff --git a/tests/integration/test_setup.py b/tests/integration/test_setup.py
index f4bb9a5c8..ff83eee6b 100644
--- a/tests/integration/test_setup.py
+++ b/tests/integration/test_setup.py
@@ -10,7 +10,7 @@ from autogpt.config.ai_config import AIConfig
@pytest.mark.requires_openai_api_key
def test_generate_aiconfig_automatic_default(patched_api_requestor, config):
user_inputs = [""]
- with patch("autogpt.utils.session.prompt", side_effect=user_inputs):
+ with patch("autogpt.app.utils.session.prompt", side_effect=user_inputs):
ai_config = prompt_user(config)
assert isinstance(ai_config, AIConfig)
@@ -43,7 +43,7 @@ def test_generate_aiconfig_automatic_fallback(patched_api_requestor, config):
"",
"",
]
- with patch("autogpt.utils.session.prompt", side_effect=user_inputs):
+ with patch("autogpt.app.utils.session.prompt", side_effect=user_inputs):
ai_config = prompt_user(config)
assert isinstance(ai_config, AIConfig)
@@ -64,7 +64,7 @@ def test_prompt_user_manual_mode(patched_api_requestor, config):
"",
"",
]
- with patch("autogpt.utils.session.prompt", side_effect=user_inputs):
+ with patch("autogpt.app.utils.session.prompt", side_effect=user_inputs):
ai_config = prompt_user(config)
assert isinstance(ai_config, AIConfig)
diff --git a/tests/integration/test_update_user.py b/tests/integration/test_update_user.py
new file mode 100644
index 000000000..bc9206317
--- /dev/null
+++ b/tests/integration/test_update_user.py
@@ -0,0 +1,33 @@
+from unittest.mock import MagicMock, patch
+
+from colorama import Fore
+
+from autogpt.app.main import update_user
+
+
+def test_update_user_command_name_is_none() -> None:
+ # Mock necessary objects
+ config = MagicMock()
+ ai_config = MagicMock()
+ assistant_reply_dict = MagicMock()
+
+ # Mock print_assistant_thoughts and logger.typewriter_log
+ with patch(
+ "autogpt.app.main.print_assistant_thoughts"
+ ) as mock_print_assistant_thoughts, patch(
+ "autogpt.app.main.logger.typewriter_log"
+ ) as mock_logger_typewriter_log:
+ # Test the update_user function with None command_name
+ update_user(config, ai_config, None, None, assistant_reply_dict)
+
+ # Check that print_assistant_thoughts was called once
+ mock_print_assistant_thoughts.assert_called_once_with(
+ ai_config.ai_name, assistant_reply_dict, config
+ )
+
+ # Check that logger.typewriter_log was called once with expected arguments
+ mock_logger_typewriter_log.assert_called_once_with(
+ "NO ACTION SELECTED: ",
+ Fore.RED,
+ f"The Agent failed to select an action.",
+ )
diff --git a/tests/mocks/mock_commands.py b/tests/mocks/mock_commands.py
index 278894c4d..3758c1da2 100644
--- a/tests/mocks/mock_commands.py
+++ b/tests/mocks/mock_commands.py
@@ -1,5 +1,7 @@
from autogpt.command_decorator import command
+COMMAND_CATEGORY = "mock"
+
@command(
"function_based",
diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py
index 2cdf8701a..57de732a6 100644
--- a/tests/unit/test_commands.py
+++ b/tests/unit/test_commands.py
@@ -193,7 +193,7 @@ def test_import_mock_commands_module():
registry = CommandRegistry()
mock_commands_module = "tests.mocks.mock_commands"
- registry.import_commands(mock_commands_module)
+ registry.import_command_module(mock_commands_module)
assert "function_based" in registry
assert registry.commands["function_based"].name == "function_based"
@@ -219,7 +219,7 @@ def test_import_temp_command_file_module(tmp_path: Path):
sys.path.append(str(tmp_path))
temp_commands_module = "mock_commands"
- registry.import_commands(temp_commands_module)
+ registry.import_command_module(temp_commands_module)
# Remove the temp directory from sys.path
sys.path.remove(str(tmp_path))
diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py
index 6445ae786..9d63b26a3 100644
--- a/tests/unit/test_config.py
+++ b/tests/unit/test_config.py
@@ -3,6 +3,7 @@ Test cases for the config class, which handles the configuration settings
for the AI and ensures it behaves as a singleton.
"""
import os
+from typing import Any
from unittest import mock
from unittest.mock import patch
@@ -13,7 +14,7 @@ from autogpt.config import Config, ConfigBuilder
from autogpt.workspace.workspace import Workspace
-def test_initial_values(config: Config):
+def test_initial_values(config: Config) -> None:
"""
Test if the initial values of the config class attributes are set correctly.
"""
@@ -21,10 +22,10 @@ def test_initial_values(config: Config):
assert config.continuous_mode == False
assert config.speak_mode == False
assert config.fast_llm == "gpt-3.5-turbo"
- assert config.smart_llm == "gpt-4"
+ assert config.smart_llm == "gpt-4-0314"
-def test_set_continuous_mode(config: Config):
+def test_set_continuous_mode(config: Config) -> None:
"""
Test if the set_continuous_mode() method updates the continuous_mode attribute.
"""
@@ -38,7 +39,7 @@ def test_set_continuous_mode(config: Config):
config.continuous_mode = continuous_mode
-def test_set_speak_mode(config: Config):
+def test_set_speak_mode(config: Config) -> None:
"""
Test if the set_speak_mode() method updates the speak_mode attribute.
"""
@@ -52,7 +53,7 @@ def test_set_speak_mode(config: Config):
config.speak_mode = speak_mode
-def test_set_fast_llm(config: Config):
+def test_set_fast_llm(config: Config) -> None:
"""
Test if the set_fast_llm() method updates the fast_llm attribute.
"""
@@ -66,7 +67,7 @@ def test_set_fast_llm(config: Config):
config.fast_llm = fast_llm
-def test_set_smart_llm(config: Config):
+def test_set_smart_llm(config: Config) -> None:
"""
Test if the set_smart_llm() method updates the smart_llm attribute.
"""
@@ -80,7 +81,7 @@ def test_set_smart_llm(config: Config):
config.smart_llm = smart_llm
-def test_set_debug_mode(config: Config):
+def test_set_debug_mode(config: Config) -> None:
"""
Test if the set_debug_mode() method updates the debug_mode attribute.
"""
@@ -95,7 +96,7 @@ def test_set_debug_mode(config: Config):
@patch("openai.Model.list")
-def test_smart_and_fast_llms_set_to_gpt4(mock_list_models, config: Config):
+def test_smart_and_fast_llms_set_to_gpt4(mock_list_models: Any, config: Config) -> None:
"""
Test if models update to gpt-3.5-turbo if both are set to gpt-4.
"""
@@ -132,7 +133,7 @@ def test_smart_and_fast_llms_set_to_gpt4(mock_list_models, config: Config):
config.smart_llm = smart_llm
-def test_missing_azure_config(workspace: Workspace):
+def test_missing_azure_config(workspace: Workspace) -> None:
config_file = workspace.get_path("azure_config.yaml")
with pytest.raises(FileNotFoundError):
ConfigBuilder.load_azure_config(str(config_file))
diff --git a/tests/unit/test_file_operations.py b/tests/unit/test_file_operations.py
index d7d870a59..b3f1fb8f8 100644
--- a/tests/unit/test_file_operations.py
+++ b/tests/unit/test_file_operations.py
@@ -282,24 +282,6 @@ def test_append_to_file_uses_checksum_from_appended_file(
)
-def test_delete_file(test_file_with_content_path: Path, agent: Agent):
- result = file_ops.delete_file(str(test_file_with_content_path), agent=agent)
- assert result == "File deleted successfully."
- assert os.path.exists(test_file_with_content_path) is False
-
-
-def test_delete_missing_file(agent: Agent):
- filename = "path/to/file/which/does/not/exist"
- # confuse the log
- file_ops.log_operation("write", filename, agent=agent, checksum="fake")
- try:
- os.remove(agent.workspace.get_path(filename))
- except FileNotFoundError as err:
- assert str(err) in file_ops.delete_file(filename, agent=agent)
- return
- assert False, f"Failed to test delete_file; {filename} not expected to exist"
-
-
def test_list_files(workspace: Workspace, test_directory: Path, agent: Agent):
# Case 1: Create files A and B, search for A, and ensure we don't return A and B
file_a = workspace.get_path("file_a.txt")
diff --git a/tests/unit/test_prompt_config.py b/tests/unit/test_prompt_config.py
index 4616db971..b83efd0d5 100644
--- a/tests/unit/test_prompt_config.py
+++ b/tests/unit/test_prompt_config.py
@@ -18,10 +18,10 @@ resources:
- A test resource
- Another test resource
- A third test resource
-performance_evaluations:
-- A test performance evaluation
-- Another test performance evaluation
-- A third test performance evaluation
+best_practices:
+- A test best-practice
+- Another test best-practice
+- A third test best-practice
"""
prompt_settings_file = tmp_path / "test_prompt_settings.yaml"
prompt_settings_file.write_text(yaml_content)
@@ -36,13 +36,7 @@ performance_evaluations:
assert prompt_config.resources[0] == "A test resource"
assert prompt_config.resources[1] == "Another test resource"
assert prompt_config.resources[2] == "A third test resource"
- assert len(prompt_config.performance_evaluations) == 3
- assert prompt_config.performance_evaluations[0] == "A test performance evaluation"
- assert (
- prompt_config.performance_evaluations[1]
- == "Another test performance evaluation"
- )
- assert (
- prompt_config.performance_evaluations[2]
- == "A third test performance evaluation"
- )
+ assert len(prompt_config.best_practices) == 3
+ assert prompt_config.best_practices[0] == "A test best-practice"
+ assert prompt_config.best_practices[1] == "Another test best-practice"
+ assert prompt_config.best_practices[2] == "A third test best-practice"
diff --git a/tests/unit/test_prompt_generator.py b/tests/unit/test_prompt_generator.py
index 44147e6db..d1b08f1a0 100644
--- a/tests/unit/test_prompt_generator.py
+++ b/tests/unit/test_prompt_generator.py
@@ -20,13 +20,12 @@ def test_add_command():
params = {"arg1": "value1", "arg2": "value2"}
generator = PromptGenerator()
generator.add_command(command_label, command_name, params)
- command = {
+ assert generator.commands[0].__dict__ == {
"label": command_label,
"name": command_name,
"params": params,
"function": None,
}
- assert command in generator.commands
def test_add_resource():
@@ -39,18 +38,18 @@ def test_add_resource():
assert resource in generator.resources
-def test_add_performance_evaluation():
+def test_add_best_practice():
"""
- Test if the add_performance_evaluation() method adds an evaluation to the generator's
- performance_evaluation list.
+ Test if the add_best_practice() method adds a best practice to the generator's
+ best_practices list.
"""
- evaluation = "Evaluation1"
+ practice = "Practice1"
generator = PromptGenerator()
- generator.add_performance_evaluation(evaluation)
- assert evaluation in generator.performance_evaluation
+ generator.add_best_practice(practice)
+ assert practice in generator.best_practices
-def test_generate_prompt_string(config):
+def test_generate_prompt_string():
"""
Test if the generate_prompt_string() method generates a prompt string with all the added
constraints, commands, resources, and evaluations.
@@ -82,10 +81,10 @@ def test_generate_prompt_string(config):
for resource in resources:
generator.add_resource(resource)
for evaluation in evaluations:
- generator.add_performance_evaluation(evaluation)
+ generator.add_best_practice(evaluation)
# Generate the prompt string and verify its correctness
- prompt_string = generator.generate_prompt_string(config)
+ prompt_string = generator.generate_prompt_string()
assert prompt_string is not None
# Check if all constraints, commands, resources, and evaluations are present in the prompt string
diff --git a/tests/unit/test_spinner.py b/tests/unit/test_spinner.py
index 4b22f24cb..8f894b79d 100644
--- a/tests/unit/test_spinner.py
+++ b/tests/unit/test_spinner.py
@@ -1,7 +1,7 @@
# Generated by CodiumAI
import time
-from autogpt.spinner import Spinner
+from autogpt.app.spinner import Spinner
"""
Code Analysis
diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py
index eb49908f3..43d8dc39e 100644
--- a/tests/unit/test_utils.py
+++ b/tests/unit/test_utils.py
@@ -4,15 +4,14 @@ from unittest.mock import patch
import pytest
import requests
-from autogpt.config import Config
-from autogpt.json_utils.utilities import extract_dict_from_response, validate_dict
-from autogpt.utils import (
+from autogpt.app.utils import (
get_bulletin_from_web,
get_current_git_branch,
get_latest_bulletin,
- readable_file_size,
- validate_yaml_file,
)
+from autogpt.config import Config
+from autogpt.json_utils.utilities import extract_dict_from_response, validate_dict
+from autogpt.utils import validate_yaml_file
from tests.utils import skip_in_ci
@@ -77,13 +76,6 @@ def test_validate_yaml_file_invalid():
assert "There was an issue while trying to read" in message
-def test_readable_file_size():
- size_in_bytes = 1024 * 1024 * 3.5 # 3.5 MB
- readable_size = readable_file_size(size_in_bytes)
-
- assert readable_size == "3.50 MB"
-
-
@patch("requests.get")
def test_get_bulletin_from_web_success(mock_get):
expected_content = "Test bulletin from web"
@@ -127,7 +119,7 @@ def test_get_latest_bulletin_with_file():
with open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8") as f:
f.write(expected_content)
- with patch("autogpt.utils.get_bulletin_from_web", return_value=""):
+ with patch("autogpt.app.utils.get_bulletin_from_web", return_value=""):
bulletin, is_new = get_latest_bulletin()
assert expected_content in bulletin
assert is_new == False
@@ -140,7 +132,9 @@ def test_get_latest_bulletin_with_new_bulletin():
f.write("Old bulletin")
expected_content = "New bulletin from web"
- with patch("autogpt.utils.get_bulletin_from_web", return_value=expected_content):
+ with patch(
+ "autogpt.app.utils.get_bulletin_from_web", return_value=expected_content
+ ):
bulletin, is_new = get_latest_bulletin()
assert "::NEW BULLETIN::" in bulletin
assert expected_content in bulletin
@@ -154,7 +148,9 @@ def test_get_latest_bulletin_new_bulletin_same_as_old_bulletin():
with open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8") as f:
f.write(expected_content)
- with patch("autogpt.utils.get_bulletin_from_web", return_value=expected_content):
+ with patch(
+ "autogpt.app.utils.get_bulletin_from_web", return_value=expected_content
+ ):
bulletin, is_new = get_latest_bulletin()
assert expected_content in bulletin
assert is_new == False
@@ -170,7 +166,7 @@ def test_get_current_git_branch():
assert branch_name != ""
-@patch("autogpt.utils.Repo")
+@patch("autogpt.app.utils.Repo")
def test_get_current_git_branch_success(mock_repo):
mock_repo.return_value.active_branch.name = "test-branch"
branch_name = get_current_git_branch()
@@ -178,7 +174,7 @@ def test_get_current_git_branch_success(mock_repo):
assert branch_name == "test-branch"
-@patch("autogpt.utils.Repo")
+@patch("autogpt.app.utils.Repo")
def test_get_current_git_branch_failure(mock_repo):
mock_repo.side_effect = Exception()
branch_name = get_current_git_branch()