aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Reinier van der Leer <pwuts@agpt.co> 2024-01-29 18:29:24 +0100
committerGravatar GitHub <noreply@github.com> 2024-01-29 18:29:24 +0100
commit575be818ca1f7c644e2adf94c584772547141f55 (patch)
treeabb052ae787b1107c9dff8fb4d86ee84842d515d
parentUpdate ossf-scorecard.yml (diff)
parentfix(agent/json_utils): Make `extract_dict_from_response` more robust (diff)
downloadAuto-GPT-security/analysis-workflows-sandbox.tar.gz
Auto-GPT-security/analysis-workflows-sandbox.tar.bz2
Auto-GPT-security/analysis-workflows-sandbox.zip
Merge branch 'master' into security/analysis-workflows-sandboxsecurity/analysis-workflows-sandbox
-rw-r--r--.devcontainer/Dockerfile13
-rw-r--r--.devcontainer/devcontainer.json56
-rw-r--r--.devcontainer/docker-compose.yml12
-rw-r--r--.dockerignore9
-rw-r--r--.env.template212
-rw-r--r--.envrc4
-rw-r--r--.flake82
-rw-r--r--.gitattributes6
-rw-r--r--.github/CODEOWNERS7
-rw-r--r--.github/FUNDING.yml3
-rw-r--r--.github/ISSUE_TEMPLATE/1.bug.yml44
-rw-r--r--.github/ISSUE_TEMPLATE/2.feature.yml4
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md66
-rw-r--r--.github/workflows/autogpt-ci.yml250
-rw-r--r--.github/workflows/autogpt-docker-cache-clean.yml59
-rw-r--r--.github/workflows/autogpt-docker-ci.yml162
-rw-r--r--.github/workflows/autogpt-docker-release.yml90
-rw-r--r--.github/workflows/autogpts-ci.yml68
-rw-r--r--.github/workflows/benchmark-ci.yml140
-rw-r--r--.github/workflows/benchmark_publish_package.yml55
-rw-r--r--.github/workflows/benchmarks.yml73
-rw-r--r--.github/workflows/build-frontend.yml46
-rw-r--r--.github/workflows/ci.yml255
-rw-r--r--.github/workflows/close-stale-issues.yml34
-rw-r--r--.github/workflows/docker-cache-clean.yml58
-rw-r--r--.github/workflows/docker-ci.yml124
-rw-r--r--.github/workflows/docker-release.yml81
-rw-r--r--.github/workflows/documentation-release.yml37
-rw-r--r--.github/workflows/hackathon.yml133
-rw-r--r--.github/workflows/pr-label.yml6
-rw-r--r--.github/workflows/repo-stats.yml20
-rw-r--r--.gitignore25
-rw-r--r--.gitmodules7
-rw-r--r--.isort.cfg10
-rw-r--r--.pre-commit-config.yaml42
-rw-r--r--BULLETIN.md22
-rw-r--r--CITATION.cff21
-rwxr-xr-xCLI-USAGE.md182
-rw-r--r--CODE_OF_CONDUCT.md9
-rw-r--r--CONTRIBUTING.md36
-rw-r--r--Dockerfile46
-rw-r--r--QUICKSTART.md190
-rw-r--r--README.md168
-rw-r--r--TROUBLESHOOTING.md23
-rw-r--r--arena/480bot.json6
-rw-r--r--arena/AGENT_GORDON.json6
-rw-r--r--arena/AGENT_JARVIS.json6
-rw-r--r--arena/AI.json6
-rw-r--r--arena/AKBAgent.json7
-rw-r--r--arena/ASSISTANT.json6
-rw-r--r--arena/AUTO_ENGINEER.json6
-rw-r--r--arena/AUTO_GPT_JON001.json6
-rw-r--r--arena/Adtractive_Agent.json6
-rw-r--r--arena/AgGPT.json6
-rw-r--r--arena/AgentJPark.json6
-rw-r--r--arena/AgentKD.json6
-rw-r--r--arena/Ahmad.json6
-rw-r--r--arena/Alfred.json6
-rw-r--r--arena/AlphaCISO.json6
-rw-r--r--arena/AndersLensway.json6
-rw-r--r--arena/AntlerTestGPT.json1
-rw-r--r--arena/AppleGPT.json6
-rw-r--r--arena/AquaAgent.json1
-rw-r--r--arena/ArtistManagerGPT.json6
-rw-r--r--arena/AskOpie.json6
-rw-r--r--arena/Auto.json6
-rw-r--r--arena/AutoGPT-ariel.json6
-rw-r--r--arena/AutoGPT2.json1
-rw-r--r--arena/AutoGenius.json6
-rw-r--r--arena/AutoTDD.json6
-rw-r--r--arena/AutoTestGenerator.json6
-rw-r--r--arena/AwareAgent.json6
-rw-r--r--arena/Bagi_agent.json6
-rw-r--r--arena/BanglaSgAgent.json6
-rw-r--r--arena/Baptiste.json6
-rw-r--r--arena/Bravo06.json1
-rw-r--r--arena/Brillante-AI.json1
-rw-r--r--arena/Bunny.json6
-rw-r--r--arena/CCAgent.json6
-rw-r--r--arena/CES-GPT.json6
-rw-r--r--arena/CISLERK.json6
-rw-r--r--arena/CONNECTBOT.json6
-rw-r--r--arena/CYNO_AGENT.json6
-rw-r--r--arena/ChadGPT.json1
-rw-r--r--arena/ChrisGPT.json6
-rw-r--r--arena/CodeAutoGPT.json6
-rw-r--r--arena/CreaitorMarketing.json1
-rw-r--r--arena/CurieAssistant.json6
-rw-r--r--arena/DE.json6
-rw-r--r--arena/DavidsAgent.json6
-rw-r--r--arena/Derpmaster.json6
-rw-r--r--arena/DevOpsAgent.json6
-rw-r--r--arena/Drench.json6
-rw-r--r--arena/Eduardo.json6
-rw-r--r--arena/EmbeddedAg.json1
-rw-r--r--arena/EnglishTestpaperAgent.json6
-rw-r--r--arena/ExampleAgent.json6
-rw-r--r--arena/FLASH.json6
-rw-r--r--arena/FactoryGPT.json6
-rw-r--r--arena/FcsummerGPT.json6
-rw-r--r--arena/FynAgent.json6
-rw-r--r--arena/GG.json6
-rw-r--r--arena/GPTTest.json6
-rw-r--r--arena/GameSoundGPT.json6
-rw-r--r--arena/GeorgeGPT.json6
-rw-r--r--arena/Granger.json6
-rw-r--r--arena/HACKATHON.json6
-rw-r--r--arena/HMD2.json6
-rw-r--r--arena/Heisenberg.json6
-rw-r--r--arena/HekolcuAutoGPT.json6
-rw-r--r--arena/HuitzilAiAgent.json6
-rw-r--r--arena/Hypeman.json6
-rw-r--r--arena/IncredibubbleTea.json6
-rw-r--r--arena/JackGPT.json6
-rw-r--r--arena/Jarvis.json6
-rw-r--r--arena/JarvisAgent.json6
-rw-r--r--arena/Jean-Michel.json6
-rw-r--r--arena/Job_GPT.json6
-rw-r--r--arena/JoshAgent1.json6
-rw-r--r--arena/KnowledgeExtractor.json6
-rw-r--r--arena/LAWYER_EMAD.json6
-rw-r--r--arena/LHRobot.json6
-rw-r--r--arena/Lab49Agent.json6
-rw-r--r--arena/LbAgent.json6
-rw-r--r--arena/LegalAgent.json6
-rw-r--r--arena/Light_Agent.json6
-rw-r--r--arena/LinuzGPT.json1
-rw-r--r--arena/Lirum.json6
-rw-r--r--arena/MANU.json6
-rw-r--r--arena/MEGATRON.json6
-rw-r--r--arena/MOBILE.json6
-rw-r--r--arena/Maharathi.json1
-rw-r--r--arena/MangoAI.json6
-rw-r--r--arena/MangoAgent-3.json6
-rw-r--r--arena/MangoAgent-4.json6
-rw-r--r--arena/MarketResearcherEduRob.json6
-rw-r--r--arena/Marx.json6
-rw-r--r--arena/Mary.json6
-rw-r--r--arena/Melang.json6
-rw-r--r--arena/Miao.json6
-rw-r--r--arena/MindwareGPT.json6
-rw-r--r--arena/Mira.json6
-rw-r--r--arena/MoTS.json6
-rw-r--r--arena/MojoBurrito.json6
-rw-r--r--arena/MyAgent.json6
-rw-r--r--arena/MyExample.json6
-rw-r--r--arena/MyExampleAgent.json6
-rw-r--r--arena/MyFirstAgent.json6
-rw-r--r--arena/MyFistAgent.json6
-rw-r--r--arena/MyTestAgent.json6
-rw-r--r--arena/N.json6
-rw-r--r--arena/NASAssistant2.json6
-rw-r--r--arena/NHAN_BOT.json6
-rw-r--r--arena/NadeemAgent.json6
-rw-r--r--arena/NanAutoGPT.json6
-rw-r--r--arena/NoobSupreme.json6
-rw-r--r--arena/NumberOne.json6
-rw-r--r--arena/Orange.json6
-rw-r--r--arena/PAgentAI.json6
-rw-r--r--arena/Pacific.json6
-rw-r--r--arena/ParalegalAgent.json6
-rw-r--r--arena/Pelle.json6
-rw-r--r--arena/Portalen.json1
-rw-r--r--arena/Pumu2_agent.json6
-rw-r--r--arena/Q.json6
-rw-r--r--arena/QA_AGENT.json6
-rw-r--r--arena/QuantumQuill.json6
-rw-r--r--arena/RAGOptimizer.json6
-rw-r--r--arena/RFPScanner.json6
-rw-r--r--arena/RONNIN.json6
-rw-r--r--arena/RagsToRiches.json6
-rw-r--r--arena/RandomVampirePictureBot.json6
-rw-r--r--arena/Raslebot.json6
-rw-r--r--arena/ResearchAgent.json1
-rw-r--r--arena/RosterAgent.json6
-rw-r--r--arena/SaasWebDev.json6
-rw-r--r--arena/SaveAsPDF2.json6
-rw-r--r--arena/ShiviBot.json6
-rw-r--r--arena/SkorkobaniecAgent.json6
-rw-r--r--arena/SmartAgent.json6
-rw-r--r--arena/SmartGPT.json6
-rw-r--r--arena/SouAgent.json6
-rw-r--r--arena/Stragegy_Steve.json6
-rw-r--r--arena/Susan.json6
-rw-r--r--arena/TEST_TPK.json6
-rw-r--r--arena/TLGPT.json6
-rw-r--r--arena/TMarafon.json6
-rw-r--r--arena/TRAVIS.json6
-rw-r--r--arena/TeslaBot.json6
-rw-r--r--arena/Tessa_AutoGPT_agent.json6
-rw-r--r--arena/TestAgent.json6
-rw-r--r--arena/TestLbAgent.json6
-rw-r--r--arena/TheAgency.json1
-rw-r--r--arena/TheAgent.json6
-rw-r--r--arena/TraceLLMAgent.json6
-rw-r--r--arena/UGYUJI.json6
-rw-r--r--arena/UTC-Crew.json6
-rw-r--r--arena/UmaruAgent.json6
-rw-r--r--arena/UniAgent.json6
-rw-r--r--arena/Verkiezingsprogrammas.json6
-rw-r--r--arena/WRITER.json6
-rw-r--r--arena/WYC.json6
-rw-r--r--arena/WarlockAgent.json6
-rw-r--r--arena/WeatherInformer.json6
-rw-r--r--arena/WiseAgent.json1
-rw-r--r--arena/XXY.json6
-rw-r--r--arena/YOU.json6
-rw-r--r--arena/YoudaoAutoGPT.json6
-rw-r--r--arena/YoutubePost_agent.json6
-rw-r--r--arena/Yui3.json6
-rw-r--r--arena/Yutan_agent.json6
-rw-r--r--arena/ZJgpt.json6
-rw-r--r--arena/Zeus.json6
-rw-r--r--arena/ZhaoJianAutoGPT.json6
-rw-r--r--arena/ZoeyGPT.json6
-rw-r--r--arena/Zoidberg.json6
-rw-r--r--arena/aWOL.json6
-rw-r--r--arena/a_reverent_heart.json6
-rw-r--r--arena/accidental-agent.json6
-rw-r--r--arena/actor_tester.json6
-rw-r--r--arena/admariner.json1
-rw-r--r--arena/ag1.json6
-rw-r--r--arena/agent2.json6
-rw-r--r--arena/agentSmith.json6
-rw-r--r--arena/agent_2.json6
-rw-r--r--arena/agentgpt.json6
-rw-r--r--arena/agsCehAgent.json6
-rw-r--r--arena/ai_assistant.json6
-rw-r--r--arena/aiaudit.json6
-rw-r--r--arena/aiwowo.json6
-rw-r--r--arena/aixiaoxin.json6
-rw-r--r--arena/akela.json6
-rw-r--r--arena/analystgpt.json6
-rw-r--r--arena/arbetsformedlingen.json6
-rw-r--r--arena/assistant1.json6
-rw-r--r--arena/autoai.json6
-rw-r--r--arena/autocoder.json6
-rw-r--r--arena/autogbd.json6
-rw-r--r--arena/autogpt-hackathon2.json1
-rw-r--r--arena/autogpt.json6
-rw-r--r--arena/autogpt_hackathon.json1
-rw-r--r--arena/autogpt_hackathon1.json1
-rw-r--r--arena/autogpt_warlock.json6
-rw-r--r--arena/autogptagent.json6
-rw-r--r--arena/avengaGPT.json6
-rw-r--r--arena/babe_perphorator_.json6
-rw-r--r--arena/baby_agent.json6
-rw-r--r--arena/bait.json6
-rw-r--r--arena/beyond.json6
-rw-r--r--arena/bigman.json6
-rw-r--r--arena/billy.json6
-rw-r--r--arena/bingoTesting.json6
-rw-r--r--arena/bosaeed_agent.json6
-rw-r--r--arena/bot.json6
-rw-r--r--arena/bot01.json6
-rw-r--r--arena/buddy.json6
-rw-r--r--arena/burt.json6
-rw-r--r--arena/business.json6
-rw-r--r--arena/byl.json6
-rw-r--r--arena/career-agent.json1
-rw-r--r--arena/caud.json6
-rw-r--r--arena/ccace.json6
-rw-r--r--arena/chappigpt.json6
-rw-r--r--arena/chappyAi.json6
-rw-r--r--arena/chatgpt_taller.json6
-rw-r--r--arena/chengshu.json6
-rw-r--r--arena/chenzo.json6
-rw-r--r--arena/cislerk2.json6
-rw-r--r--arena/codebutler.json1
-rw-r--r--arena/coder_first.json6
-rw-r--r--arena/contentstrategy.json1
-rw-r--r--arena/cssupdater.json6
-rw-r--r--arena/da-agent.json6
-rw-r--r--arena/date-buffer.json6
-rw-r--r--arena/davidtest1.json6
-rw-r--r--arena/davidtestagent.json6
-rw-r--r--arena/dda.json6
-rw-r--r--arena/decision-maker.json6
-rw-r--r--arena/dev_agent.json6
-rw-r--r--arena/devagent.json6
-rw-r--r--arena/dive2code.json1
-rw-r--r--arena/dndagent.json6
-rw-r--r--arena/dy_agent.json6
-rw-r--r--arena/dy_agent2.json6
-rw-r--r--arena/easn.json6
-rw-r--r--arena/eddy.json6
-rw-r--r--arena/ekc911_agent.json6
-rw-r--r--arena/engineer.json6
-rw-r--r--arena/evlyn.json6
-rw-r--r--arena/evo-ninja.json6
-rw-r--r--arena/evo.json6
-rw-r--r--arena/faran.json6
-rw-r--r--arena/first-agent.json6
-rw-r--r--arena/foobar.json6
-rw-r--r--arena/frankgarcia.json6
-rw-r--r--arena/fritzgpt.json1
-rw-r--r--arena/fst.json6
-rw-r--r--arena/fuzz_gen.json6
-rw-r--r--arena/gaby_agent.json6
-rw-r--r--arena/gen_fuzz.json6
-rw-r--r--arena/ghostcoder.json1
-rw-r--r--arena/gipity.json6
-rw-r--r--arena/gpt-dev-engineer-agent.json1
-rw-r--r--arena/gpt-eng-forge.json6
-rw-r--r--arena/gpt-engineer.json1
-rw-r--r--arena/gpt_for_beans.json6
-rw-r--r--arena/hall_oto.json6
-rw-r--r--arena/han.json6
-rw-r--r--arena/happy_guy.json6
-rw-r--r--arena/hello.json6
-rw-r--r--arena/hodri.json6
-rw-r--r--arena/houxe.json6
-rw-r--r--arena/icode.json6
-rw-r--r--arena/iku2.json6
-rw-r--r--arena/illynet.json6
-rw-r--r--arena/illynetV2.json6
-rw-r--r--arena/illyx1.json6
-rw-r--r--arena/info-retrieval.json6
-rw-r--r--arena/ivangpt_agent.json6
-rw-r--r--arena/jarvis2.json6
-rw-r--r--arena/jarvis3.json6
-rw-r--r--arena/jaxbob1.json6
-rw-r--r--arena/job_apply.json6
-rw-r--r--arena/jonesyboi.json6
-rw-r--r--arena/justwondering.json1
-rw-r--r--arena/kingmitch.json6
-rw-r--r--arena/lawk.json6
-rw-r--r--arena/lcdegpt.json6
-rw-r--r--arena/letst.json6
-rw-r--r--arena/letstest.json6
-rw-r--r--arena/lilAgent.json6
-rw-r--r--arena/linggong.json6
-rw-r--r--arena/liuzh.json6
-rw-r--r--arena/ltzAgent.json6
-rw-r--r--arena/martingpt.json6
-rw-r--r--arena/medical-agent.json6
-rw-r--r--arena/metware.json6
-rw-r--r--arena/miniAgent.json6
-rw-r--r--arena/minister_agent.json6
-rw-r--r--arena/misslu.json6
-rw-r--r--arena/mljar-agent.json1
-rw-r--r--arena/momo.json1
-rw-r--r--arena/monthly_summary.json6
-rw-r--r--arena/mrSabelotodo.json6
-rw-r--r--arena/myGPT.json6
-rw-r--r--arena/my_AutoGPT.json6
-rw-r--r--arena/my_fx_agent.json6
-rw-r--r--arena/my_gpt.json6
-rw-r--r--arena/mygent.json6
-rw-r--r--arena/nawalj.json6
-rw-r--r--arena/newAgent.json6
-rw-r--r--arena/northfork.json6
-rw-r--r--arena/od_agent_1.json6
-rw-r--r--arena/operationAgent.json6
-rw-r--r--arena/personal-al-website.json6
-rw-r--r--arena/piGPT.json6
-rw-r--r--arena/pipeline.json6
-rw-r--r--arena/podcast_agent.json6
-rw-r--r--arena/potato.json1
-rw-r--r--arena/project_assitant.json6
-rw-r--r--arena/project_master.json6
-rw-r--r--arena/project_review.json6
-rw-r--r--arena/prometheus.json6
-rw-r--r--arena/proudgpt.json6
-rw-r--r--arena/qinghu3.json6
-rw-r--r--arena/ra.json6
-rw-r--r--arena/ra1.json6
-rw-r--r--arena/rachael.json6
-rw-r--r--arena/raindrop.json6
-rw-r--r--arena/researchGPT.json6
-rw-r--r--arena/researchGPT2.json6
-rw-r--r--arena/research_analyst.json6
-rw-r--r--arena/robita.json6
-rw-r--r--arena/robot.json6
-rw-r--r--arena/searchagent.json6
-rw-r--r--arena/set.json6
-rw-r--r--arena/sgpt.json6
-rw-r--r--arena/shivi.json6
-rw-r--r--arena/sky.json6
-rw-r--r--arena/smith.json6
-rw-r--r--arena/songyalei.json6
-rw-r--r--arena/sql.json6
-rw-r--r--arena/stefan.json6
-rw-r--r--arena/stockAgent.json6
-rw-r--r--arena/swarms.json1
-rw-r--r--arena/tdev.json6
-rw-r--r--arena/teacher.json6
-rw-r--r--arena/test-tpk.json6
-rw-r--r--arena/test.json6
-rw-r--r--arena/test1.json6
-rw-r--r--arena/testGPT.json6
-rw-r--r--arena/thebestagent.json1
-rw-r--r--arena/theone.json1
-rw-r--r--arena/tiffGPT.json6
-rw-r--r--arena/trend_agent.json6
-rw-r--r--arena/umiuni_agent.json6
-rw-r--r--arena/uply.json1
-rw-r--r--arena/url-to-lead.json6
-rw-r--r--arena/v-gpt.json6
-rw-r--r--arena/victor2-0.json6
-rw-r--r--arena/web_developer.json6
-rw-r--r--arena/webagent.json6
-rw-r--r--arena/webgeek.json6
-rw-r--r--arena/wedding-planner.json6
-rw-r--r--arena/woohoo_agent.json6
-rw-r--r--arena/wyjagent.json6
-rw-r--r--arena/xmly.json6
-rw-r--r--arena/xq_agent.json6
-rw-r--r--arena/xt0m-GPT.json6
-rw-r--r--arena/xtest.json6
-rw-r--r--arena/yarbis.json6
-rw-r--r--arena/zaheer.json6
-rw-r--r--arena/zcb.json6
-rw-r--r--arena/zczc.json6
-rw-r--r--arena/zhizhi.json6
-rw-r--r--arena/zlipknot_1.json6
-rw-r--r--arena/zlipknot_test_agent_4.json6
-rw-r--r--arena/zze.json6
-rw-r--r--autogpt/__init__.py14
-rw-r--r--autogpt/__main__.py5
-rw-r--r--autogpt/agent/__init__.py3
-rw-r--r--autogpt/agent/agent.py309
-rw-r--r--autogpt/app.py114
-rw-r--r--autogpt/cli.py140
-rw-r--r--autogpt/command_decorator.py57
-rw-r--r--autogpt/commands/decorators.py64
-rw-r--r--autogpt/commands/execute_code.py303
-rw-r--r--autogpt/commands/file_operations.py340
-rw-r--r--autogpt/commands/file_operations_utils.py161
-rw-r--r--autogpt/commands/git_operations.py52
-rw-r--r--autogpt/commands/image_gen.py200
-rw-r--r--autogpt/commands/task_statuses.py33
-rw-r--r--autogpt/commands/web_search.py143
-rw-r--r--autogpt/commands/web_selenium.py237
-rw-r--r--autogpt/config/__init__.py12
-rw-r--r--autogpt/config/ai_config.py165
-rw-r--r--autogpt/config/config.py373
-rw-r--r--autogpt/config/prompt_config.py47
-rw-r--r--autogpt/configurator.py167
-rw-r--r--autogpt/core/README.md62
-rw-r--r--autogpt/core/ability/__init__.py4
-rw-r--r--autogpt/core/ability/base.py92
-rw-r--r--autogpt/core/ability/builtins/__init__.py6
-rw-r--r--autogpt/core/ability/builtins/create_new_ability.py102
-rw-r--r--autogpt/core/ability/builtins/file_operations.py167
-rw-r--r--autogpt/core/ability/builtins/query_language_model.py78
-rw-r--r--autogpt/core/ability/simple.py96
-rw-r--r--autogpt/core/agent/__init__.py3
-rw-r--r--autogpt/core/agent/simple.py391
-rw-r--r--autogpt/core/configuration/__init__.py7
-rw-r--r--autogpt/core/configuration/schema.py107
-rw-r--r--autogpt/core/memory/__init__.py3
-rw-r--r--autogpt/core/planning/__init__.py10
-rw-r--r--autogpt/core/planning/base.py76
-rw-r--r--autogpt/core/planning/schema.py76
-rw-r--r--autogpt/core/planning/simple.py182
-rw-r--r--autogpt/core/planning/strategies/__init__.py12
-rw-r--r--autogpt/core/planning/strategies/initial_plan.py190
-rw-r--r--autogpt/core/planning/strategies/name_and_goals.py139
-rw-r--r--autogpt/core/planning/strategies/next_ability.py183
-rw-r--r--autogpt/core/planning/strategies/utils.py27
-rw-r--r--autogpt/core/planning/templates.py102
-rw-r--r--autogpt/core/plugin/__init__.py2
-rw-r--r--autogpt/core/plugin/base.py155
-rw-r--r--autogpt/core/plugin/simple.py74
-rw-r--r--autogpt/core/resource/__init__.py7
-rw-r--r--autogpt/core/resource/model_providers/__init__.py44
-rw-r--r--autogpt/core/resource/model_providers/openai.py373
-rw-r--r--autogpt/core/resource/model_providers/schema.py219
-rw-r--r--autogpt/core/resource/schema.py57
-rw-r--r--autogpt/core/runner/cli_app/cli.py47
-rw-r--r--autogpt/core/runner/cli_app/main.py110
-rw-r--r--autogpt/core/runner/cli_web_app/cli.py101
-rw-r--r--autogpt/core/runner/cli_web_app/client/client.py16
-rw-r--r--autogpt/core/runner/cli_web_app/server/api.py48
-rw-r--r--autogpt/core/runner/cli_web_app/server/schema.py36
-rw-r--r--autogpt/core/runner/cli_web_app/server/services/users.py20
-rw-r--r--autogpt/core/runner/client_lib/logging.py20
-rw-r--r--autogpt/core/runner/client_lib/utils.py61
-rw-r--r--autogpt/core/workspace/__init__.py3
-rw-r--r--autogpt/core/workspace/simple.py193
-rw-r--r--autogpt/js/overlay.js29
-rw-r--r--autogpt/json_utils/llm_response_format_1.json45
-rw-r--r--autogpt/json_utils/utilities.py73
-rw-r--r--autogpt/llm/__init__.py19
-rw-r--r--autogpt/llm/api_manager.py110
-rw-r--r--autogpt/llm/base.py195
-rw-r--r--autogpt/llm/chat.py203
-rw-r--r--autogpt/llm/providers/openai.py429
-rw-r--r--autogpt/llm/utils/__init__.py217
-rw-r--r--autogpt/llm/utils/token_counter.py85
-rw-r--r--autogpt/logs/__init__.py15
-rw-r--r--autogpt/logs/formatters.py41
-rw-r--r--autogpt/logs/handlers.py47
-rw-r--r--autogpt/logs/log_cycle.py88
-rw-r--r--autogpt/logs/logger.py203
-rw-r--r--autogpt/logs/utils.py65
-rw-r--r--autogpt/main.py205
-rw-r--r--autogpt/memory/message_history.py235
-rw-r--r--autogpt/memory/vector/__init__.py153
-rw-r--r--autogpt/memory/vector/memory_item.py256
-rw-r--r--autogpt/memory/vector/providers/base.py77
-rw-r--r--autogpt/memory/vector/providers/json_file.py87
-rw-r--r--autogpt/memory/vector/utils.py67
-rw-r--r--autogpt/models/base_open_ai_plugin.py249
-rw-r--r--autogpt/models/command.py49
-rw-r--r--autogpt/models/command_parameter.py12
-rw-r--r--autogpt/models/command_registry.py114
-rw-r--r--autogpt/plugins/__init__.py320
-rw-r--r--autogpt/plugins/plugins_config.py112
-rw-r--r--autogpt/processing/html.py33
-rw-r--r--autogpt/processing/text.py245
-rw-r--r--autogpt/prompts/default_prompts.py29
-rw-r--r--autogpt/prompts/generator.py180
-rw-r--r--autogpt/prompts/prompt.py132
-rw-r--r--autogpt/setup.py238
-rw-r--r--autogpt/singleton.py22
-rw-r--r--autogpt/speech/__init__.py4
-rw-r--r--autogpt/speech/base.py60
-rw-r--r--autogpt/speech/eleven_labs.py91
-rw-r--r--autogpt/speech/gtts.py23
-rw-r--r--autogpt/speech/macos_tts.py22
-rw-r--r--autogpt/speech/say.py50
-rw-r--r--autogpt/speech/stream_elements_speech.py44
-rw-r--r--autogpt/spinner.py76
-rw-r--r--autogpt/url_utils/validators.py107
-rw-r--r--autogpt/utils.py181
-rw-r--r--autogpt/workspace/__init__.py5
-rw-r--r--autogpt/workspace/workspace.py162
-rw-r--r--autogpts/autogpt/.coveragerc (renamed from .coveragerc)0
-rw-r--r--autogpts/autogpt/.devcontainer/Dockerfile13
-rw-r--r--autogpts/autogpt/.devcontainer/devcontainer.json56
-rw-r--r--autogpts/autogpt/.devcontainer/docker-compose.yml12
-rw-r--r--autogpts/autogpt/.dockerignore14
-rw-r--r--autogpts/autogpt/.env.template233
-rw-r--r--autogpts/autogpt/.envrc4
-rw-r--r--autogpts/autogpt/.flake811
-rw-r--r--autogpts/autogpt/.gitattributes5
-rw-r--r--autogpts/autogpt/.gitignore169
-rw-r--r--autogpts/autogpt/.pre-commit-config.yaml42
-rw-r--r--autogpts/autogpt/.sourcery.yaml (renamed from .sourcery.yaml)0
-rw-r--r--autogpts/autogpt/BULLETIN.md13
-rw-r--r--autogpts/autogpt/Dockerfile56
-rw-r--r--autogpts/autogpt/README.md180
-rw-r--r--autogpts/autogpt/agbenchmark_config/.gitignore3
-rw-r--r--autogpts/autogpt/agbenchmark_config/__init__.py (renamed from autogpt/commands/__init__.py)0
-rw-r--r--autogpts/autogpt/agbenchmark_config/analyze_reports.py143
-rw-r--r--autogpts/autogpt/agbenchmark_config/benchmarks.py74
-rw-r--r--autogpts/autogpt/agbenchmark_config/config.json8
-rw-r--r--autogpts/autogpt/autogpt.bat27
-rwxr-xr-xautogpts/autogpt/autogpt.sh29
-rw-r--r--autogpts/autogpt/autogpt/__init__.py7
-rw-r--r--autogpts/autogpt/autogpt/__main__.py5
-rw-r--r--autogpts/autogpt/autogpt/agent_factory/configurators.py113
-rw-r--r--autogpts/autogpt/autogpt/agent_factory/generators.py31
-rw-r--r--autogpts/autogpt/autogpt/agent_factory/profile_generator.py255
-rw-r--r--autogpts/autogpt/autogpt/agent_manager/__init__.py3
-rw-r--r--autogpts/autogpt/autogpt/agent_manager/agent_manager.py48
-rw-r--r--autogpts/autogpt/autogpt/agents/__init__.py4
-rw-r--r--autogpts/autogpt/autogpt/agents/agent.py329
-rw-r--r--autogpts/autogpt/autogpt/agents/base.py431
-rw-r--r--autogpts/autogpt/autogpt/agents/features/context.py82
-rw-r--r--autogpts/autogpt/autogpt/agents/features/file_workspace.py65
-rw-r--r--autogpts/autogpt/autogpt/agents/features/watchdog.py76
-rw-r--r--autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py484
-rw-r--r--autogpts/autogpt/autogpt/agents/utils/agent_file_manager.py37
-rw-r--r--autogpts/autogpt/autogpt/agents/utils/exceptions.py60
-rw-r--r--autogpts/autogpt/autogpt/agents/utils/prompt_scratchpad.py108
-rw-r--r--autogpts/autogpt/autogpt/app/__init__.py6
-rw-r--r--autogpts/autogpt/autogpt/app/agent_protocol_server.py481
-rw-r--r--autogpts/autogpt/autogpt/app/cli.py283
-rw-r--r--autogpts/autogpt/autogpt/app/configurator.py201
-rw-r--r--autogpts/autogpt/autogpt/app/main.py759
-rw-r--r--autogpts/autogpt/autogpt/app/setup.py193
-rw-r--r--autogpts/autogpt/autogpt/app/spinner.py70
-rw-r--r--autogpts/autogpt/autogpt/app/utils.py190
-rw-r--r--autogpts/autogpt/autogpt/command_decorator.py70
-rw-r--r--autogpts/autogpt/autogpt/commands/__init__.py9
-rw-r--r--autogpts/autogpt/autogpt/commands/decorators.py82
-rw-r--r--autogpts/autogpt/autogpt/commands/execute_code.py349
-rw-r--r--autogpts/autogpt/autogpt/commands/file_context.py131
-rw-r--r--autogpts/autogpt/autogpt/commands/file_operations.py268
-rw-r--r--autogpts/autogpt/autogpt/commands/file_operations_utils.py151
-rw-r--r--autogpts/autogpt/autogpt/commands/git_operations.py58
-rw-r--r--autogpts/autogpt/autogpt/commands/image_gen.py212
-rw-r--r--autogpts/autogpt/autogpt/commands/system.py69
-rw-r--r--autogpts/autogpt/autogpt/commands/times.py (renamed from autogpt/commands/times.py)0
-rw-r--r--autogpts/autogpt/autogpt/commands/user_interaction.py32
-rw-r--r--autogpts/autogpt/autogpt/commands/web_search.py171
-rw-r--r--autogpts/autogpt/autogpt/commands/web_selenium.py304
-rw-r--r--autogpts/autogpt/autogpt/config/__init__.py14
-rw-r--r--autogpts/autogpt/autogpt/config/ai_directives.py48
-rw-r--r--autogpts/autogpt/autogpt/config/ai_profile.py68
-rw-r--r--autogpts/autogpt/autogpt/config/config.py336
-rw-r--r--autogpts/autogpt/autogpt/core/ARCHITECTURE_NOTES.md271
-rw-r--r--autogpts/autogpt/autogpt/core/README.md92
-rw-r--r--autogpts/autogpt/autogpt/core/__init__.py (renamed from autogpt/core/__init__.py)0
-rw-r--r--autogpts/autogpt/autogpt/core/ability/__init__.py18
-rw-r--r--autogpts/autogpt/autogpt/core/ability/base.py88
-rw-r--r--autogpts/autogpt/autogpt/core/ability/builtins/__init__.py12
-rw-r--r--autogpts/autogpt/autogpt/core/ability/builtins/create_new_ability.py107
-rw-r--r--autogpts/autogpt/autogpt/core/ability/builtins/file_operations.py170
-rw-r--r--autogpts/autogpt/autogpt/core/ability/builtins/query_language_model.py66
-rw-r--r--autogpts/autogpt/autogpt/core/ability/schema.py (renamed from autogpt/core/ability/schema.py)0
-rw-r--r--autogpts/autogpt/autogpt/core/ability/simple.py97
-rw-r--r--autogpts/autogpt/autogpt/core/agent/__init__.py9
-rw-r--r--autogpts/autogpt/autogpt/core/agent/base.py (renamed from autogpt/core/agent/base.py)0
-rw-r--r--autogpts/autogpt/autogpt/core/agent/simple.py404
-rw-r--r--autogpts/autogpt/autogpt/core/configuration/__init__.py14
-rw-r--r--autogpts/autogpt/autogpt/core/configuration/schema.py351
-rw-r--r--autogpts/autogpt/autogpt/core/memory/__init__.py9
-rw-r--r--autogpts/autogpt/autogpt/core/memory/base.py (renamed from autogpt/core/memory/base.py)0
-rw-r--r--autogpts/autogpt/autogpt/core/memory/simple.py (renamed from autogpt/core/memory/simple.py)0
-rw-r--r--autogpts/autogpt/autogpt/core/planning/__init__.py11
-rw-r--r--autogpts/autogpt/autogpt/core/planning/base.py54
-rw-r--r--autogpts/autogpt/autogpt/core/planning/prompt_strategies/__init__.py12
-rw-r--r--autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py206
-rw-r--r--autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py150
-rw-r--r--autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py203
-rw-r--r--autogpts/autogpt/autogpt/core/planning/schema.py48
-rw-r--r--autogpts/autogpt/autogpt/core/planning/simple.py188
-rw-r--r--autogpts/autogpt/autogpt/core/planning/templates.py84
-rw-r--r--autogpts/autogpt/autogpt/core/plugin/__init__.py6
-rw-r--r--autogpts/autogpt/autogpt/core/plugin/base.py162
-rw-r--r--autogpts/autogpt/autogpt/core/plugin/simple.py75
-rw-r--r--autogpts/autogpt/autogpt/core/poetry.lock1345
-rw-r--r--autogpts/autogpt/autogpt/core/prompting/__init__.py8
-rw-r--r--autogpts/autogpt/autogpt/core/prompting/base.py23
-rw-r--r--autogpts/autogpt/autogpt/core/prompting/schema.py34
-rw-r--r--autogpts/autogpt/autogpt/core/prompting/utils.py29
-rw-r--r--autogpts/autogpt/autogpt/core/pyproject.toml77
-rw-r--r--autogpts/autogpt/autogpt/core/resource/__init__.py15
-rw-r--r--autogpts/autogpt/autogpt/core/resource/model_providers/__init__.py65
-rw-r--r--autogpts/autogpt/autogpt/core/resource/model_providers/openai.py732
-rw-r--r--autogpts/autogpt/autogpt/core/resource/model_providers/schema.py358
-rw-r--r--autogpts/autogpt/autogpt/core/resource/schema.py75
-rw-r--r--autogpts/autogpt/autogpt/core/runner/__init__.py (renamed from autogpt/core/runner/__init__.py)0
-rw-r--r--autogpts/autogpt/autogpt/core/runner/cli_app/__init__.py (renamed from autogpt/core/runner/cli_app/__init__.py)0
-rw-r--r--autogpts/autogpt/autogpt/core/runner/cli_app/cli.py47
-rw-r--r--autogpts/autogpt/autogpt/core/runner/cli_app/main.py74
-rw-r--r--autogpts/autogpt/autogpt/core/runner/cli_web_app/__init__.py (renamed from autogpt/core/runner/cli_web_app/__init__.py)0
-rw-r--r--autogpts/autogpt/autogpt/core/runner/cli_web_app/cli.py58
-rw-r--r--autogpts/autogpt/autogpt/core/runner/cli_web_app/server/__init__.py (renamed from autogpt/core/runner/cli_web_app/client/__init__.py)0
-rw-r--r--autogpts/autogpt/autogpt/core/runner/cli_web_app/server/api.py97
-rw-r--r--autogpts/autogpt/autogpt/core/runner/client_lib/__init__.py (renamed from autogpt/core/runner/cli_web_app/server/__init__.py)0
-rw-r--r--autogpts/autogpt/autogpt/core/runner/client_lib/logging/__init__.py22
-rw-r--r--autogpts/autogpt/autogpt/core/runner/client_lib/logging/config.py82
-rw-r--r--autogpts/autogpt/autogpt/core/runner/client_lib/logging/helpers.py23
-rwxr-xr-xautogpts/autogpt/autogpt/core/runner/client_lib/parser.py45
-rw-r--r--autogpts/autogpt/autogpt/core/runner/client_lib/settings.py (renamed from autogpt/core/runner/client_lib/settings.py)0
-rw-r--r--autogpts/autogpt/autogpt/core/runner/client_lib/shared_click_commands.py (renamed from autogpt/core/runner/client_lib/shared_click_commands.py)0
-rw-r--r--autogpts/autogpt/autogpt/core/runner/client_lib/utils.py62
-rw-r--r--autogpts/autogpt/autogpt/core/utils/json_schema.py146
-rw-r--r--autogpts/autogpt/autogpt/core/workspace/__init__.py9
-rw-r--r--autogpts/autogpt/autogpt/core/workspace/base.py (renamed from autogpt/core/workspace/base.py)0
-rw-r--r--autogpts/autogpt/autogpt/core/workspace/simple.py194
-rw-r--r--autogpts/autogpt/autogpt/file_workspace/__init__.py46
-rw-r--r--autogpts/autogpt/autogpt/file_workspace/base.py164
-rw-r--r--autogpts/autogpt/autogpt/file_workspace/gcs.py113
-rw-r--r--autogpts/autogpt/autogpt/file_workspace/local.py71
-rw-r--r--autogpts/autogpt/autogpt/file_workspace/s3.py128
-rw-r--r--autogpts/autogpt/autogpt/json_utils/__init__.py (renamed from autogpt/core/runner/cli_web_app/server/services/__init__.py)0
-rw-r--r--autogpts/autogpt/autogpt/json_utils/utilities.py33
-rw-r--r--autogpts/autogpt/autogpt/llm/api_manager.py115
-rw-r--r--autogpts/autogpt/autogpt/llm/providers/__init__.py (renamed from autogpt/core/runner/client_lib/__init__.py)0
-rw-r--r--autogpts/autogpt/autogpt/llm/providers/openai.py28
-rw-r--r--autogpts/autogpt/autogpt/logs/__init__.py26
-rw-r--r--autogpts/autogpt/autogpt/logs/config.py203
-rw-r--r--autogpts/autogpt/autogpt/logs/filters.py12
-rw-r--r--autogpts/autogpt/autogpt/logs/formatters.py53
-rw-r--r--autogpts/autogpt/autogpt/logs/handlers.py81
-rw-r--r--autogpts/autogpt/autogpt/logs/helpers.py70
-rw-r--r--autogpts/autogpt/autogpt/logs/log_cycle.py81
-rw-r--r--autogpts/autogpt/autogpt/logs/utils.py9
-rw-r--r--autogpts/autogpt/autogpt/memory/vector/__init__.py156
-rw-r--r--autogpts/autogpt/autogpt/memory/vector/memory_item.py285
-rw-r--r--autogpts/autogpt/autogpt/memory/vector/providers/__init__.py (renamed from autogpt/memory/vector/providers/__init__.py)0
-rw-r--r--autogpts/autogpt/autogpt/memory/vector/providers/base.py79
-rw-r--r--autogpts/autogpt/autogpt/memory/vector/providers/json_file.py92
-rw-r--r--autogpts/autogpt/autogpt/memory/vector/providers/no_memory.py (renamed from autogpt/memory/vector/providers/no_memory.py)0
-rw-r--r--autogpts/autogpt/autogpt/memory/vector/utils.py98
-rw-r--r--autogpts/autogpt/autogpt/models/__init__.py (renamed from autogpt/json_utils/__init__.py)0
-rw-r--r--autogpts/autogpt/autogpt/models/action_history.py177
-rw-r--r--autogpts/autogpt/autogpt/models/base_open_ai_plugin.py251
-rw-r--r--autogpts/autogpt/autogpt/models/command.py72
-rw-r--r--autogpts/autogpt/autogpt/models/command_parameter.py17
-rw-r--r--autogpts/autogpt/autogpt/models/command_registry.py212
-rw-r--r--autogpts/autogpt/autogpt/models/context_item.py95
-rw-r--r--autogpts/autogpt/autogpt/plugins/__init__.py331
-rw-r--r--autogpts/autogpt/autogpt/plugins/plugin_config.py (renamed from autogpt/plugins/plugin_config.py)0
-rw-r--r--autogpts/autogpt/autogpt/plugins/plugins_config.py118
-rw-r--r--autogpts/autogpt/autogpt/processing/__init__.py (renamed from autogpt/llm/providers/__init__.py)0
-rw-r--r--autogpts/autogpt/autogpt/processing/html.py33
-rw-r--r--autogpts/autogpt/autogpt/processing/text.py256
-rw-r--r--autogpts/autogpt/autogpt/prompts/__init__.py (renamed from autogpt/models/__init__.py)0
-rw-r--r--autogpts/autogpt/autogpt/prompts/prompt.py5
-rw-r--r--autogpts/autogpt/autogpt/prompts/utils.py11
-rw-r--r--autogpts/autogpt/autogpt/singleton.py16
-rw-r--r--autogpts/autogpt/autogpt/speech/__init__.py4
-rw-r--r--autogpts/autogpt/autogpt/speech/base.py54
-rw-r--r--autogpts/autogpt/autogpt/speech/eleven_labs.py93
-rw-r--r--autogpts/autogpt/autogpt/speech/gtts.py24
-rw-r--r--autogpts/autogpt/autogpt/speech/macos_tts.py23
-rw-r--r--autogpts/autogpt/autogpt/speech/say.py79
-rw-r--r--autogpts/autogpt/autogpt/speech/stream_elements_speech.py54
-rw-r--r--autogpts/autogpt/autogpt/url_utils/__init__.py (renamed from autogpt/processing/__init__.py)0
-rw-r--r--autogpts/autogpt/autogpt/url_utils/validators.py92
-rw-r--r--autogpts/autogpt/autogpt/utils.py19
-rw-r--r--autogpts/autogpt/azure.yaml.template7
-rw-r--r--autogpts/autogpt/challenges_already_beaten.json3
-rw-r--r--autogpts/autogpt/codecov.yml (renamed from codecov.yml)0
-rw-r--r--autogpts/autogpt/data/.keep (renamed from data/.keep)0
-rw-r--r--autogpts/autogpt/docker-compose.yml49
-rw-r--r--autogpts/autogpt/hooks/post-checkout (renamed from hooks/post-checkout)0
-rw-r--r--autogpts/autogpt/hooks/post-rewrite (renamed from hooks/post-rewrite)0
-rw-r--r--autogpts/autogpt/plugin.png (renamed from plugin.png)bin33356 -> 33356 bytes
-rw-r--r--autogpts/autogpt/plugins/.keep (renamed from autogpt/prompts/__init__.py)0
-rw-r--r--autogpts/autogpt/poetry.lock7194
-rw-r--r--autogpts/autogpt/prompt_settings.yaml17
-rw-r--r--autogpts/autogpt/pyproject.toml162
-rwxr-xr-xautogpts/autogpt/run10
-rwxr-xr-xautogpts/autogpt/run_benchmark9
-rw-r--r--autogpts/autogpt/scripts/__init__.py (renamed from autogpt/url_utils/__init__.py)0
-rw-r--r--autogpts/autogpt/scripts/check_requirements.py38
-rw-r--r--autogpts/autogpt/scripts/install_plugin_deps.py66
-rwxr-xr-xautogpts/autogpt/setup8
-rw-r--r--autogpts/autogpt/tests/__init__.py (renamed from scripts/__init__.py)0
-rw-r--r--autogpts/autogpt/tests/conftest.py159
-rw-r--r--autogpts/autogpt/tests/context.py (renamed from tests/context.py)0
-rw-r--r--autogpts/autogpt/tests/integration/__init__.py (renamed from tests/__init__.py)0
-rw-r--r--autogpts/autogpt/tests/integration/agent_factory.py56
-rw-r--r--autogpts/autogpt/tests/integration/memory/__init__.py (renamed from tests/challenges/__init__.py)0
-rw-r--r--autogpts/autogpt/tests/integration/memory/_test_json_file_memory.py130
-rw-r--r--autogpts/autogpt/tests/integration/memory/conftest.py (renamed from tests/integration/memory/conftest.py)0
-rw-r--r--autogpts/autogpt/tests/integration/memory/utils.py44
-rw-r--r--autogpts/autogpt/tests/integration/test_execute_code.py118
-rw-r--r--autogpts/autogpt/tests/integration/test_image_gen.py235
-rw-r--r--autogpts/autogpt/tests/integration/test_setup.py70
-rw-r--r--autogpts/autogpt/tests/integration/test_web_selenium.py18
-rw-r--r--autogpts/autogpt/tests/mocks/__init__.py (renamed from tests/challenges/basic_abilities/__init__.py)0
-rw-r--r--autogpts/autogpt/tests/mocks/mock_commands.py29
-rw-r--r--autogpts/autogpt/tests/unit/__init__.py (renamed from tests/challenges/challenge_decorator/__init__.py)0
-rw-r--r--autogpts/autogpt/tests/unit/data/test_ai_config.yaml (renamed from tests/unit/data/test_ai_config.yaml)0
-rw-r--r--autogpts/autogpt/tests/unit/data/test_plugins/Auto-GPT-Plugin-Test-master.zip (renamed from tests/unit/data/test_plugins/Auto-GPT-Plugin-Test-master.zip)bin15385 -> 15385 bytes
-rw-r--r--autogpts/autogpt/tests/unit/data/test_plugins/auto_gpt_guanaco/__init__.py274
-rw-r--r--autogpts/autogpt/tests/unit/models/test_base_open_api_plugin.py81
-rw-r--r--autogpts/autogpt/tests/unit/test_ai_profile.py70
-rw-r--r--autogpts/autogpt/tests/unit/test_api_manager.py77
-rw-r--r--autogpts/autogpt/tests/unit/test_commands.py239
-rw-r--r--autogpts/autogpt/tests/unit/test_config.py223
-rw-r--r--autogpts/autogpt/tests/unit/test_file_operations.py331
-rw-r--r--autogpts/autogpt/tests/unit/test_gcs_file_workspace.py131
-rw-r--r--autogpts/autogpt/tests/unit/test_git_commands.py43
-rw-r--r--autogpts/autogpt/tests/unit/test_local_file_workspace.py92
-rw-r--r--autogpts/autogpt/tests/unit/test_logs.py36
-rw-r--r--autogpts/autogpt/tests/unit/test_plugins.py125
-rw-r--r--autogpts/autogpt/tests/unit/test_prompt_config.py42
-rw-r--r--autogpts/autogpt/tests/unit/test_s3_file_workspace.py123
-rw-r--r--autogpts/autogpt/tests/unit/test_spinner.py35
-rw-r--r--autogpts/autogpt/tests/unit/test_text_file_parsers.py170
-rw-r--r--autogpts/autogpt/tests/unit/test_url_validation.py157
-rw-r--r--autogpts/autogpt/tests/unit/test_utils.py223
-rw-r--r--autogpts/autogpt/tests/unit/test_web_search.py136
-rw-r--r--autogpts/autogpt/tests/utils.py (renamed from tests/utils.py)0
-rw-r--r--autogpts/autogpt/tests/vcr/__init__.py93
-rw-r--r--autogpts/autogpt/tests/vcr/vcr_filter.py118
m---------autogpts/autogpt/tests/vcr_cassettes0
-rwxr-xr-xautogpts/evo.ninja/setup17
-rw-r--r--autogpts/forge/.env.example8
-rw-r--r--autogpts/forge/.flake815
-rw-r--r--autogpts/forge/.gitignore176
-rw-r--r--autogpts/forge/.pre-commit-config.yaml43
-rw-r--r--autogpts/forge/Dockerfile40
-rw-r--r--autogpts/forge/README.md24
-rw-r--r--autogpts/forge/__init__.py (renamed from tests/challenges/memory/__init__.py)0
-rw-r--r--autogpts/forge/agbenchmark_config/config.json4
-rw-r--r--autogpts/forge/forge/__init__.py (renamed from tests/integration/__init__.py)0
-rw-r--r--autogpts/forge/forge/__main__.py49
-rw-r--r--autogpts/forge/forge/actions/__init__.py1
-rw-r--r--autogpts/forge/forge/actions/file_system/files.py78
-rw-r--r--autogpts/forge/forge/actions/finish.py38
-rw-r--r--autogpts/forge/forge/actions/registry.py193
-rw-r--r--autogpts/forge/forge/actions/web/web_search.py74
-rw-r--r--autogpts/forge/forge/actions/web/web_selenium.py366
-rw-r--r--autogpts/forge/forge/agent.py146
-rw-r--r--autogpts/forge/forge/app.py13
-rw-r--r--autogpts/forge/forge/db.py143
-rw-r--r--autogpts/forge/forge/llm.py61
-rw-r--r--autogpts/forge/forge/memory/__init__.py2
-rw-r--r--autogpts/forge/forge/memory/chroma_memstore.py161
-rw-r--r--autogpts/forge/forge/memory/memstore.py151
-rw-r--r--autogpts/forge/forge/memory/memstore_test.py58
-rw-r--r--autogpts/forge/forge/prompts/gpt-3.5-turbo/role_selection.j29
-rw-r--r--autogpts/forge/forge/prompts/gpt-3.5-turbo/system-format.j217
-rw-r--r--autogpts/forge/forge/prompts/gpt-3.5-turbo/task-step.j250
-rw-r--r--autogpts/forge/forge/prompts/techniques/chain-of-thought.j22
-rw-r--r--autogpts/forge/forge/prompts/techniques/expert.j21
-rw-r--r--autogpts/forge/forge/prompts/techniques/few-shot.j25
-rw-r--r--autogpts/forge/forge/sdk/__init__.py25
-rw-r--r--autogpts/forge/forge/sdk/agent.py225
-rw-r--r--autogpts/forge/forge/sdk/agent_test.py107
-rw-r--r--autogpts/forge/forge/sdk/db.py505
-rw-r--r--autogpts/forge/forge/sdk/db_test.py331
-rw-r--r--autogpts/forge/forge/sdk/errors.py60
-rw-r--r--autogpts/forge/forge/sdk/forge_log.py203
-rw-r--r--autogpts/forge/forge/sdk/middlewares.py34
-rw-r--r--autogpts/forge/forge/sdk/model.py180
-rw-r--r--autogpts/forge/forge/sdk/prompting.py117
-rw-r--r--autogpts/forge/forge/sdk/routes/__init__.py (renamed from tests/integration/memory/__init__.py)0
-rw-r--r--autogpts/forge/forge/sdk/routes/agent_protocol.py595
-rw-r--r--autogpts/forge/forge/sdk/workspace.py133
-rw-r--r--autogpts/forge/forge/sdk/workspace_test.py47
-rw-r--r--autogpts/forge/mypy.ini13
-rw-r--r--autogpts/forge/poetry.lock5005
-rw-r--r--autogpts/forge/pyproject.toml70
-rwxr-xr-xautogpts/forge/run9
-rwxr-xr-xautogpts/forge/run_benchmark9
-rwxr-xr-xautogpts/forge/setup13
-rw-r--r--autogpts/forge/tutorials/001_getting_started.md121
-rw-r--r--autogpts/forge/tutorials/002_blueprint_of_an_agent.md147
-rw-r--r--autogpts/forge/tutorials/003_crafting_agent_logic.md513
-rw-r--r--autogpts/forge/tutorials/004_memories.md75
-rw-r--r--azure.yaml.template7
-rw-r--r--benchmark/.env.example4
-rw-r--r--benchmark/.flake815
-rw-r--r--benchmark/.gitignore174
-rw-r--r--benchmark/.pre-commit-config.yaml36
-rw-r--r--benchmark/.python-version1
-rw-r--r--benchmark/.vscode/launch.json (renamed from plugins/__PUT_PLUGIN_ZIPS_HERE__)0
-rw-r--r--benchmark/.vscode/settings.json6
-rw-r--r--benchmark/LICENSE21
-rw-r--r--benchmark/README.md25
-rw-r--r--benchmark/agbenchmark/README.md69
-rw-r--r--benchmark/agbenchmark/__init__.py (renamed from tests/mocks/__init__.py)0
-rw-r--r--benchmark/agbenchmark/__main__.py228
-rw-r--r--benchmark/agbenchmark/agent_api_interface.py104
-rw-r--r--benchmark/agbenchmark/agent_interface.py27
-rw-r--r--benchmark/agbenchmark/app.py334
-rw-r--r--benchmark/agbenchmark/challenges/CHALLENGE.md85
-rw-r--r--benchmark/agbenchmark/challenges/README.md13
-rw-r--r--benchmark/agbenchmark/challenges/__init__.py56
-rw-r--r--benchmark/agbenchmark/challenges/abilities/read_file/artifacts_in/file_to_read.txt1
-rw-r--r--benchmark/agbenchmark/challenges/abilities/read_file/artifacts_out/file_to_check.txt1
-rw-r--r--benchmark/agbenchmark/challenges/abilities/read_file/artifacts_out/output.txt1
-rw-r--r--benchmark/agbenchmark/challenges/abilities/read_file/data.json34
-rw-r--r--benchmark/agbenchmark/challenges/abilities/write_file/artifacts_out/random_file.txt1
-rw-r--r--benchmark/agbenchmark/challenges/abilities/write_file/data.json33
-rw-r--r--benchmark/agbenchmark/challenges/alignment/1_distraction/artifacts_in/instructions.txt4
-rw-r--r--benchmark/agbenchmark/challenges/alignment/1_distraction/artifacts_out/goal.txt1
-rw-r--r--benchmark/agbenchmark/challenges/alignment/1_distraction/data_draft.json33
-rw-r--r--benchmark/agbenchmark/challenges/alignment/2_injection/artifacts_in/instructions.txt9
-rw-r--r--benchmark/agbenchmark/challenges/alignment/2_injection/artifacts_in/instructions_2.txt1
-rw-r--r--benchmark/agbenchmark/challenges/alignment/2_injection/artifacts_out/goal.txt1
-rw-r--r--benchmark/agbenchmark/challenges/alignment/2_injection/data_draft.json35
-rw-r--r--benchmark/agbenchmark/challenges/base.py104
-rw-r--r--benchmark/agbenchmark/challenges/builtin.py425
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/1_tesla_revenue/artifacts_out/random_file.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/1_tesla_revenue/data.json30
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/2_specific/artifacts_out/random_file.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/2_specific/data.json31
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/3_formatting/artifacts_out/random_file.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/3_formatting/data.json30
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/adapatability/a1_debug/artifacts_in/__init__.py (renamed from tests/unit/__init__.py)0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/adapatability/a1_debug/artifacts_in/sample_code.py12
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/adapatability/a1_debug/artifacts_in/test.py31
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/adapatability/a1_debug/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/adapatability/a1_debug/artifacts_out/sample_code.py11
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/adapatability/a1_debug/artifacts_out/test.py31
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/adapatability/a1_debug/data.json32
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/adapatability/a2_tesla_revenue/artifacts_out/random_file.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/adapatability/a2_tesla_revenue/data.json30
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/adapatability/a3_book_price/artifacts_out/random_file.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/adapatability/a3_book_price/data.json31
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/1_list_animals/artifacts_out/animal_list.html29
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/1_list_animals/custom_python/test.py48
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/1_list_animals/data.json28
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/1_password_generator/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/1_password_generator/artifacts_out/password_generator.py23
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/1_password_generator/custom_python/test.py25
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/1_password_generator/data.json28
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/1_return/artifacts_in/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/1_return/artifacts_in/sample_code.py2
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/1_return/artifacts_in/test.py16
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/1_return/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/1_return/artifacts_out/sample_code.py3
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/1_return/artifacts_out/test.py16
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/1_return/data.json31
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/2_file_organizer/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/2_file_organizer/artifacts_out/organize_files.py48
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/2_file_organizer/custom_python/test.py45
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/2_file_organizer/data.json28
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/2_write/artifacts_in/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/2_write/artifacts_in/sample_code.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/2_write/artifacts_in/test.py16
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/2_write/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/2_write/artifacts_out/sample_code.py3
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/2_write/artifacts_out/test.py16
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/2_write/data.json31
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/3_modify/artifacts_in/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/3_modify/artifacts_in/sample_code.py3
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/3_modify/artifacts_in/test.py29
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/3_modify/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/3_modify/artifacts_out/sample_code.py3
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/3_modify/artifacts_out/test.py29
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/3_modify/data.json33
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/4_tests/artifacts_in/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/4_tests/artifacts_in/sample_code.py3
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/4_tests/artifacts_in/testfile.py17
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/4_tests/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/4_tests/artifacts_out/sample_code.py3
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/4_tests/artifacts_out/testfile.py17
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/4_tests/custom_python/test.py29
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/4_tests/data.json33
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.1_guided/artifacts_in/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.1_guided/artifacts_in/sample_code.py12
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.1_guided/artifacts_in/test.py31
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.1_guided/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.1_guided/artifacts_out/sample_code.py11
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.1_guided/artifacts_out/test.py31
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.1_guided/data.json33
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.2_vague/artifacts_in/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.2_vague/artifacts_in/sample_code.py12
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.2_vague/artifacts_in/test.py31
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.2_vague/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.2_vague/artifacts_out/sample_code.py11
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.2_vague/artifacts_out/test.py31
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.2_vague/data.json33
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.3_import/artifacts_in/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.3_import/artifacts_in/sample_code.py12
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.3_import/artifacts_in/test.py31
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.3_import/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.3_import/artifacts_out/sample_code.py11
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.3_import/artifacts_out/test.py31
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d2.3_import/data.json33
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d3.1_three_sum/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d3.1_three_sum/artifacts_out/sample_code.py22
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d3.1_three_sum/custom_python/test.py31
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d3.1_three_sum/data.json33
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d3_two_sum/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d3_two_sum/artifacts_out/sample_code.py11
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d3_two_sum/custom_python/test.py31
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/code/d3_two_sum/data.json32
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/content_gen/2_plan/artifacts_out/output.txt5
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/content_gen/2_plan/data.json30
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/d2.1_guided/artifacts_in/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/d2.1_guided/artifacts_in/sample_code.py (renamed from tests/challenges/debug_code/data/code.py)0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/d2.1_guided/artifacts_in/test.py32
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/d2.1_guided/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/d2.1_guided/artifacts_out/sample_code.py12
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/d2.1_guided/artifacts_out/test.py32
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/d2.1_guided/data.json33
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m1_id/artifacts_in/instructions_1.txt2
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m1_id/artifacts_in/instructions_2.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m1_id/artifacts_in/instructions_3.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m1_id/artifacts_in/instructions_4.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m1_id/artifacts_in/instructions_5.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m1_id/artifacts_out/result.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m1_id/data.json30
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m2_multiple/artifacts_in/instructions_1.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m2_multiple/artifacts_in/instructions_2.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m2_multiple/artifacts_in/instructions_3.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m2_multiple/artifacts_in/instructions_4.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m2_multiple/artifacts_in/instructions_5.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m2_multiple/artifacts_out/result.txt4
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m2_multiple/data.json33
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m3_noise/artifacts_in/instructions_1.txt5
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m3_noise/artifacts_in/instructions_2.txt5
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m3_noise/artifacts_in/instructions_3.txt5
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m3_noise/artifacts_in/instructions_4.txt5
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m3_noise/artifacts_in/instructions_5.txt5
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m3_noise/artifacts_out/result.txt4
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m3_noise/data.json33
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m4_phrases/artifacts_in/instructions_1.txt5
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m4_phrases/artifacts_in/instructions_2.txt5
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m4_phrases/artifacts_in/instructions_3.txt5
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m4_phrases/artifacts_in/instructions_4.txt5
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m4_phrases/artifacts_in/instructions_5.txt5
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m4_phrases/artifacts_out/result.txt4
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/memory/m4_phrases/data.json33
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/retrieval/1_tesla_revenue/artifacts_out/random_file.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/retrieval/1_tesla_revenue/data.json30
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/retrieval/2_specific/artifacts_out/random_file.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/retrieval/2_specific/data.json31
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/retrieval/3_formatting/artifacts_out/random_file.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/retrieval/3_formatting/data.json30
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/retrieval/r1_book_price/artifacts_out/random_file.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/retrieval/r1_book_price/data.json30
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/retrieval/r3/artifacts_out/random_file.txt15
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/retrieval/r3/data.json46
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/1_simple/artifacts_in/instructions.txt4
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/1_simple/artifacts_out/goal.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/1_simple/data.json34
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/2_medium/artifacts_in/instructions.txt5
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/2_medium/artifacts_out/goal.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/2_medium/data.json34
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/3_advanced/artifacts_in/instructions.txt6
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/3_advanced/artifacts_in/instructions_2.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/3_advanced/artifacts_out/goal.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/3_advanced/data.json34
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/4_hard/artifacts_in/instructions.txt9
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/4_hard/artifacts_in/instructions_2.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/4_hard/artifacts_out/goal.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/4_hard/data.json36
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/s2_divergence/artifacts_out/1.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/s2_divergence/artifacts_out/2.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/s2_divergence/artifacts_out/3.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/s2_divergence/artifacts_out/4.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/s2_divergence/artifacts_out/5.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/s2_divergence/custom_python/test.py53
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/s2_divergence/data.json34
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/s3_instructions/artifacts_out/1.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/s3_instructions/artifacts_out/2.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/s3_instructions/artifacts_out/3.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/s3_instructions/artifacts_out/4.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/s3_instructions/artifacts_out/5.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/s3_instructions/artifacts_out/6.txt1
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/s3_instructions/custom_python/test.py42
-rw-r--r--benchmark/agbenchmark/challenges/deprecated/safety/s3_instructions/data.json33
-rw-r--r--benchmark/agbenchmark/challenges/library/README.md1
-rw-r--r--benchmark/agbenchmark/challenges/library/ethereum/check_price/artifacts_in/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/library/ethereum/check_price/artifacts_in/sample_code.py12
-rw-r--r--benchmark/agbenchmark/challenges/library/ethereum/check_price/artifacts_in/test.py34
-rw-r--r--benchmark/agbenchmark/challenges/library/ethereum/check_price/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/library/ethereum/check_price/artifacts_out/sample_code.py12
-rw-r--r--benchmark/agbenchmark/challenges/library/ethereum/check_price/artifacts_out/test.py34
-rw-r--r--benchmark/agbenchmark/challenges/library/ethereum/check_price/data.json32
-rw-r--r--benchmark/agbenchmark/challenges/optional_categories.json3
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/1_three_sum/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/1_three_sum/artifacts_out/sample_code.py23
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/1_three_sum/custom_python/test.py32
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/1_three_sum/data.json33
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/2_password_generator/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/2_password_generator/artifacts_out/password_generator.py27
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/2_password_generator/custom_python/test.py25
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/2_password_generator/data.json28
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/3_file_organizer/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/3_file_organizer/artifacts_out/organize_files.py48
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/3_file_organizer/custom_python/test.py45
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/3_file_organizer/data.json29
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/4_url_shortener/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/4_url_shortener/artifacts_out/test.py22
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/4_url_shortener/artifacts_out/url_shortener.py40
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/4_url_shortener/custom_python/test.py22
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/4_url_shortener/data.json28
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/5_tic_tac_toe/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/5_tic_tac_toe/artifacts_out/tic_tac_toe.py99
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/5_tic_tac_toe/custom_python/test.py41
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/5_tic_tac_toe/data.json29
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/6_battleship/artifacts_in/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/6_battleship/artifacts_in/abstract_class.py107
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/6_battleship/artifacts_in/conftest.py61
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/6_battleship/artifacts_in/product_requirements.txt30
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/6_battleship/artifacts_in/test_negative.py100
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/6_battleship/artifacts_in/test_positive.py149
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/6_battleship/artifacts_in/user_stories.txt31
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/6_battleship/artifacts_out/__init__.py0
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/6_battleship/artifacts_out/abstract_class.py107
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/6_battleship/artifacts_out/battleship.py149
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/6_battleship/artifacts_out/conftest.py61
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/6_battleship/artifacts_out/test_negative.py100
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/6_battleship/artifacts_out/test_positive.py149
-rw-r--r--benchmark/agbenchmark/challenges/verticals/code/6_battleship/data.json28
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/1_sort_csv/artifacts_in/input.csv5
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/1_sort_csv/artifacts_out/output.csv5
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/1_sort_csv/data.json32
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/2_label_csv/artifacts_in/input.csv12
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/2_label_csv/artifacts_out/output.csv12
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/2_label_csv/data.json32
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/3_combine_csv/artifacts_in/file1.csv4
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/3_combine_csv/artifacts_in/file2.csv4
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/3_combine_csv/artifacts_out/output.csv4
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/3_combine_csv/data.json32
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/4_answer_question_small_csv/artifacts_in/file1.csv12
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/4_answer_question_small_csv/artifacts_out/output.txt1
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/4_answer_question_small_csv/data.json32
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/5_answer_question_csv/artifacts_in/file1.csv305
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/5_answer_question_csv/artifacts_out/output.txt1
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/5_answer_question_csv/data.json31
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/6_answer_question_combine_csv/artifacts_in/file1.csv305
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/6_answer_question_combine_csv/artifacts_in/file2.csv305
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/6_answer_question_combine_csv/artifacts_out/output.txt1
-rw-r--r--benchmark/agbenchmark/challenges/verticals/data/6_answer_question_combine_csv/data.json33
-rw-r--r--benchmark/agbenchmark/challenges/verticals/scrape/1_search/artifacts_out/random_file.txt2
-rw-r--r--benchmark/agbenchmark/challenges/verticals/scrape/1_search/data.json37
-rw-r--r--benchmark/agbenchmark/challenges/verticals/scrape/2_book_price/artifacts_out/random_file.txt1
-rw-r--r--benchmark/agbenchmark/challenges/verticals/scrape/2_book_price/data.json31
-rw-r--r--benchmark/agbenchmark/challenges/verticals/scrape/3_revenue_retrieval/artifacts_out/random_file.txt1
-rw-r--r--benchmark/agbenchmark/challenges/verticals/scrape/3_revenue_retrieval/data.json31
-rw-r--r--benchmark/agbenchmark/challenges/verticals/scrape/4_revenue_retrieval_2/artifacts_out/random_file.txt15
-rw-r--r--benchmark/agbenchmark/challenges/verticals/scrape/4_revenue_retrieval_2/data.json46
-rw-r--r--benchmark/agbenchmark/challenges/verticals/scrape/5_get_information/artifacts_out/output.txt2
-rw-r--r--benchmark/agbenchmark/challenges/verticals/scrape/5_get_information/data.json35
-rw-r--r--benchmark/agbenchmark/challenges/verticals/synthesize/1_basic_content_gen/artifacts_in/challenges.txt5
-rw-r--r--benchmark/agbenchmark/challenges/verticals/synthesize/1_basic_content_gen/artifacts_in/companies.txt5
-rw-r--r--benchmark/agbenchmark/challenges/verticals/synthesize/1_basic_content_gen/artifacts_out/output.txt24
-rw-r--r--benchmark/agbenchmark/challenges/verticals/synthesize/1_basic_content_gen/data.json33
-rw-r--r--benchmark/agbenchmark/challenges/webarena.py478
-rw-r--r--benchmark/agbenchmark/challenges/webarena_selection.json523
-rw-r--r--benchmark/agbenchmark/config.py119
-rw-r--r--benchmark/agbenchmark/conftest.py334
-rw-r--r--benchmark/agbenchmark/generate_test.py26
-rw-r--r--benchmark/agbenchmark/main.py158
-rw-r--r--benchmark/agbenchmark/reports/ReportManager.py213
-rw-r--r--benchmark/agbenchmark/reports/processing/gen_combined_chart.py45
-rw-r--r--benchmark/agbenchmark/reports/processing/get_files.py34
-rw-r--r--benchmark/agbenchmark/reports/processing/graphs.py205
-rw-r--r--benchmark/agbenchmark/reports/processing/process_report.py66
-rw-r--r--benchmark/agbenchmark/reports/processing/report_types.py87
-rw-r--r--benchmark/agbenchmark/reports/processing/report_types_v2.py48
-rw-r--r--benchmark/agbenchmark/reports/reports.py143
-rw-r--r--benchmark/agbenchmark/schema.py23
-rw-r--r--benchmark/agbenchmark/utils/data_types.py46
-rw-r--r--benchmark/agbenchmark/utils/dependencies/__init__.py194
-rw-r--r--benchmark/agbenchmark/utils/dependencies/constants.py10
-rw-r--r--benchmark/agbenchmark/utils/dependencies/graphs.py445
-rw-r--r--benchmark/agbenchmark/utils/dependencies/main.py253
-rw-r--r--benchmark/agbenchmark/utils/dependencies/util.py85
-rw-r--r--benchmark/agbenchmark/utils/get_data_from_helicone.py83
-rw-r--r--benchmark/agbenchmark/utils/logging.py74
-rw-r--r--benchmark/agbenchmark/utils/prompts.py68
-rw-r--r--benchmark/agbenchmark/utils/utils.py136
-rw-r--r--benchmark/agbenchmark_config/config.json4
-rw-r--r--benchmark/agbenchmark_config/reports/20230912T190004_full_run/report.json289
-rw-r--r--benchmark/agbenchmark_config/reports/20230912T190012_full_run/radar_chart.pngbin0 -> 163335 bytes
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T174917_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T175341_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T175642_full_run/report.json1
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T175706_full_run/radar_chart.pngbin0 -> 163335 bytes
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T175706_full_run/report.json291
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T175736_full_run/radar_chart.pngbin0 -> 163335 bytes
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T175736_full_run/report.json291
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T175743_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T175811_full_run/radar_chart.pngbin0 -> 163335 bytes
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T175811_full_run/report.json291
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T180141_full_run/radar_chart.pngbin0 -> 149931 bytes
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T180141_full_run/report.json305
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T180202_full_run/radar_chart.pngbin0 -> 149931 bytes
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T180202_full_run/report.json305
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T180607_full_run/radar_chart.pngbin0 -> 149931 bytes
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T180607_full_run/report.json305
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T180913_full_run/report.json22
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T181409_full_run/radar_chart.pngbin0 -> 149931 bytes
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T181409_full_run/report.json305
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T181418_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T181537_full_run/report.json63
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T181613_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T181654_full_run/radar_chart.pngbin0 -> 149931 bytes
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T181654_full_run/report.json305
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T184327_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T185526_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T185545_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T185553_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T185602_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T185737_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T185758_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T185811_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T185817_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T190232_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T212614_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T212640_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T222833_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T222946_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T223330_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T223509_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T223644_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T223716_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T223845_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T223853_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T223908_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T223916_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T224003_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T224204_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T224236_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T224405_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T224422_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T224453_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T224557_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T224620_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T224724_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T224742_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T224756_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T225007_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T225230_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T225239_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T225334_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T225351_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T225404_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T225446_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T225523_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T225537_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T225620_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T225652_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T225715_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T231008_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T231128_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T231221_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T231245_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T231328_full_run/report.json42
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T231557_full_run/report.json21
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T231813_full_run/report.json1
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T231835_full_run/report.json41
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T231852_full_run/radar_chart.pngbin0 -> 163335 bytes
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T231852_full_run/report.json294
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T233016_full_run/radar_chart.pngbin0 -> 163335 bytes
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T233016_full_run/report.json292
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T233024_full_run/radar_chart.pngbin0 -> 163335 bytes
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T233024_full_run/report.json292
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T233031_full_run/radar_chart.pngbin0 -> 163335 bytes
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T233031_full_run/report.json292
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T234542_full_run/report.json38
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T234605_full_run/report.json38
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T234632_full_run/report.json37
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T234658_full_run/report.json37
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T234707_full_run/report.json37
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T234851_full_run/report.json37
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T234903_full_run/radar_chart.pngbin0 -> 163335 bytes
-rw-r--r--benchmark/agbenchmark_config/reports/20230913T234903_full_run/report.json287
-rw-r--r--benchmark/agbenchmark_config/reports/20230914T014354_full_run/radar_chart.pngbin0 -> 163335 bytes
-rw-r--r--benchmark/agbenchmark_config/reports/20230914T014354_full_run/report.json287
-rw-r--r--benchmark/agbenchmark_config/reports/regression_tests.json1
-rw-r--r--benchmark/agbenchmark_config/reports/success_rate.json71
-rw-r--r--benchmark/agents_to_benchmark.json47
-rw-r--r--benchmark/backend/__init__.py0
-rw-r--r--benchmark/frontend/.env.example14
-rw-r--r--benchmark/frontend/.gitignore42
-rw-r--r--benchmark/frontend/README.md7
-rw-r--r--benchmark/frontend/_eslintrc.cjs30
-rw-r--r--benchmark/frontend/next.config.mjs22
-rw-r--r--benchmark/frontend/package-lock.json4379
-rw-r--r--benchmark/frontend/package.json47
-rw-r--r--benchmark/frontend/postcss.config.cjs8
-rw-r--r--benchmark/frontend/prettier.config.cjs6
-rw-r--r--benchmark/frontend/public/favicon.icobin0 -> 15406 bytes
-rw-r--r--benchmark/frontend/public/graph.json897
-rw-r--r--benchmark/frontend/src/components/data/Dashboard.tsx45
-rw-r--r--benchmark/frontend/src/components/data/Reports.tsx28
-rw-r--r--benchmark/frontend/src/components/data/dashboard/CategorySuccess.tsx16
-rw-r--r--benchmark/frontend/src/components/data/dashboard/CurrentEnv.tsx68
-rw-r--r--benchmark/frontend/src/components/data/dashboard/RadarChart.tsx16
-rw-r--r--benchmark/frontend/src/components/index/Graph.tsx112
-rw-r--r--benchmark/frontend/src/components/index/MockCheckbox.tsx39
-rw-r--r--benchmark/frontend/src/components/index/RunButton.tsx80
-rw-r--r--benchmark/frontend/src/components/index/RunData.tsx129
-rw-r--r--benchmark/frontend/src/components/index/SelectedTask.tsx112
-rw-r--r--benchmark/frontend/src/components/index/TaskInfo.tsx164
-rw-r--r--benchmark/frontend/src/env.mjs37
-rw-r--r--benchmark/frontend/src/pages/_app.tsx9
-rw-r--r--benchmark/frontend/src/pages/data.tsx41
-rw-r--r--benchmark/frontend/src/pages/index.tsx63
-rw-r--r--benchmark/frontend/src/server/db.ts15
-rw-r--r--benchmark/frontend/src/styles/globals.css3
-rw-r--r--benchmark/frontend/tailwind.config.ts9
-rw-r--r--benchmark/frontend/tsconfig.json33
-rw-r--r--benchmark/mypy.ini13
-rw-r--r--benchmark/notebooks/LLM Score Experimentation.ipynb367
-rw-r--r--benchmark/notebooks/Visualization.ipynb431
-rw-r--r--benchmark/notebooks/combined_data.ipynb685
-rw-r--r--benchmark/notebooks/selected_logs.json30
-rw-r--r--benchmark/notebooks/selected_logs_nested.json74
-rw-r--r--benchmark/poetry.lock2763
-rw-r--r--benchmark/pyproject.toml94
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230824T032419_full_run/radar_chart.pngbin0 -> 217831 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230824T032419_full_run/report.json889
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230824T081333_full_run/radar_chart.pngbin0 -> 224555 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230824T081333_full_run/report.json886
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230825T081419_full_run/radar_chart.pngbin0 -> 228039 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230825T081419_full_run/report.json888
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230826T081326_full_run/radar_chart.pngbin0 -> 228039 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230826T081326_full_run/report.json885
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230827T081254_full_run/radar_chart.pngbin0 -> 239912 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230827T081254_full_run/report.json885
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230828T081413_full_run/radar_chart.pngbin0 -> 228039 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230828T081413_full_run/report.json888
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230829T081447_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230829T081447_full_run/report.json895
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230830T081439_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230830T081439_full_run/report.json895
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230830T130202_full_run/radar_chart.pngbin0 -> 248247 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230830T130202_full_run/report.json877
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230830T231152_full_run/radar_chart.pngbin0 -> 237700 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230830T231152_full_run/report.json876
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230831T081441_full_run/radar_chart.pngbin0 -> 178964 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230831T081441_full_run/report.json419
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230831T145222_full_run/radar_chart.pngbin0 -> 169202 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230831T145222_full_run/report.json420
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230831T153506_full_run/radar_chart.pngbin0 -> 169202 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230831T153506_full_run/report.json420
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230901T081341_full_run/radar_chart.pngbin0 -> 169202 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230901T081341_full_run/report.json422
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230901T170512_full_run/radar_chart.pngbin0 -> 169202 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230901T170512_full_run/report.json420
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230902T081222_full_run/radar_chart.pngbin0 -> 169202 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230902T081222_full_run/report.json420
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230903T081239_full_run/radar_chart.pngbin0 -> 236295 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230903T081239_full_run/report.json862
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230904T081450_full_run/radar_chart.pngbin0 -> 233896 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230904T081450_full_run/report.json869
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230905T081456_full_run/radar_chart.pngbin0 -> 228571 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230905T081456_full_run/report.json883
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230908T011647_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230908T011647_full_run/report.json315
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230909T080459_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230909T080459_full_run/report.json315
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230911T080438_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/20230911T080438_full_run/report.json315
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/regression_tests.json26
-rw-r--r--benchmark/reports/Auto-GPT-Turbo/success_rate.json721
-rw-r--r--benchmark/reports/Auto-GPT/20230817T000126_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230817T000126_full_run/report.json891
-rw-r--r--benchmark/reports/Auto-GPT/20230817T081335_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230817T081335_full_run/report.json891
-rw-r--r--benchmark/reports/Auto-GPT/20230818T081400_full_run/radar_chart.pngbin0 -> 215697 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230818T081400_full_run/report.json890
-rw-r--r--benchmark/reports/Auto-GPT/20230819T081239_full_run/radar_chart.pngbin0 -> 226812 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230819T081239_full_run/report.json883
-rw-r--r--benchmark/reports/Auto-GPT/20230820T081235_full_run/radar_chart.pngbin0 -> 226812 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230820T081235_full_run/report.json881
-rw-r--r--benchmark/reports/Auto-GPT/20230821T081455_full_run/radar_chart.pngbin0 -> 217484 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230821T081455_full_run/report.json887
-rw-r--r--benchmark/reports/Auto-GPT/20230822T081337_full_run/radar_chart.pngbin0 -> 217446 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230822T081337_full_run/report.json889
-rw-r--r--benchmark/reports/Auto-GPT/20230823T081341_full_run/radar_chart.pngbin0 -> 215697 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230823T081341_full_run/report.json890
-rw-r--r--benchmark/reports/Auto-GPT/20230824T032421_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230824T032421_full_run/report.json892
-rw-r--r--benchmark/reports/Auto-GPT/20230824T081422_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230824T081422_full_run/report.json891
-rw-r--r--benchmark/reports/Auto-GPT/20230825T081408_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230825T081408_full_run/report.json892
-rw-r--r--benchmark/reports/Auto-GPT/20230826T081228_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230826T081228_full_run/report.json892
-rw-r--r--benchmark/reports/Auto-GPT/20230827T081337_full_run/radar_chart.pngbin0 -> 223681 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230827T081337_full_run/report.json887
-rw-r--r--benchmark/reports/Auto-GPT/20230828T081454_full_run/radar_chart.pngbin0 -> 222745 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230828T081454_full_run/report.json886
-rw-r--r--benchmark/reports/Auto-GPT/20230829T081453_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230829T081453_full_run/report.json892
-rw-r--r--benchmark/reports/Auto-GPT/20230830T081508_full_run/radar_chart.pngbin0 -> 236664 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230830T081508_full_run/report.json882
-rw-r--r--benchmark/reports/Auto-GPT/20230831T081434_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230831T081434_full_run/report.json424
-rw-r--r--benchmark/reports/Auto-GPT/20230831T153538_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230831T153538_full_run/report.json424
-rw-r--r--benchmark/reports/Auto-GPT/20230901T081405_full_run/radar_chart.pngbin0 -> 152054 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230901T081405_full_run/report.json422
-rw-r--r--benchmark/reports/Auto-GPT/20230902T081212_full_run/radar_chart.pngbin0 -> 161319 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230902T081212_full_run/report.json421
-rw-r--r--benchmark/reports/Auto-GPT/20230903T081320_full_run/radar_chart.pngbin0 -> 240401 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230903T081320_full_run/report.json866
-rw-r--r--benchmark/reports/Auto-GPT/20230904T081516_full_run/radar_chart.pngbin0 -> 245259 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230904T081516_full_run/report.json860
-rw-r--r--benchmark/reports/Auto-GPT/20230905T081454_full_run/radar_chart.pngbin0 -> 229101 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230905T081454_full_run/report.json880
-rw-r--r--benchmark/reports/Auto-GPT/20230908T011709_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230908T011709_full_run/report.json314
-rw-r--r--benchmark/reports/Auto-GPT/20230908T080530_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230908T080530_full_run/report.json314
-rw-r--r--benchmark/reports/Auto-GPT/20230910T080422_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230910T080422_full_run/report.json314
-rw-r--r--benchmark/reports/Auto-GPT/20230911T080442_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/Auto-GPT/20230911T080442_full_run/report.json314
-rw-r--r--benchmark/reports/Auto-GPT/file11_07-20-23-18.json269
-rw-r--r--benchmark/reports/Auto-GPT/file12_07-20-23-45.json269
-rw-r--r--benchmark/reports/Auto-GPT/file13_07-21-00-20.json268
-rw-r--r--benchmark/reports/Auto-GPT/file14_07-21-08-18.json265
-rw-r--r--benchmark/reports/Auto-GPT/file15_07-21-18-18.json269
-rw-r--r--benchmark/reports/Auto-GPT/file16_07-22-08-16.json269
-rw-r--r--benchmark/reports/Auto-GPT/file17_07-22-15-10.json269
-rw-r--r--benchmark/reports/Auto-GPT/file18_07-23-08-17.json269
-rw-r--r--benchmark/reports/Auto-GPT/file19_07-23-16-22.json269
-rw-r--r--benchmark/reports/Auto-GPT/file1_07-18-00-18.json177
-rw-r--r--benchmark/reports/Auto-GPT/file20_07-23-19-08.json269
-rw-r--r--benchmark/reports/Auto-GPT/file21_07-23-19-27.json268
-rw-r--r--benchmark/reports/Auto-GPT/file22_07-23-19-35.json269
-rw-r--r--benchmark/reports/Auto-GPT/file23_07-23-19-53.json269
-rw-r--r--benchmark/reports/Auto-GPT/file24_07-23-21-03.json269
-rw-r--r--benchmark/reports/Auto-GPT/file24_07-23-21-06.json268
-rw-r--r--benchmark/reports/Auto-GPT/file26_07-23-22-25.json269
-rw-r--r--benchmark/reports/Auto-GPT/file26_07-23-22-26.json269
-rw-r--r--benchmark/reports/Auto-GPT/file28_07-24-08-19.json268
-rw-r--r--benchmark/reports/Auto-GPT/file29_07-24-22-11.json269
-rw-r--r--benchmark/reports/Auto-GPT/file2_07-18-02-45.json268
-rw-r--r--benchmark/reports/Auto-GPT/file30_07-24-23-51.json528
-rw-r--r--benchmark/reports/Auto-GPT/file31_07-25-01-05.json527
-rw-r--r--benchmark/reports/Auto-GPT/file32_07-25-01-35.json527
-rw-r--r--benchmark/reports/Auto-GPT/file33_07-25-03-14.json527
-rw-r--r--benchmark/reports/Auto-GPT/file34_07-25-03-35.json532
-rw-r--r--benchmark/reports/Auto-GPT/file35_07-25-03-59.json528
-rw-r--r--benchmark/reports/Auto-GPT/file36_07-25-04-20.json528
-rw-r--r--benchmark/reports/Auto-GPT/file37_07-25-08-18.json528
-rw-r--r--benchmark/reports/Auto-GPT/file38_07-25-18-10.json185
-rw-r--r--benchmark/reports/Auto-GPT/file38_07-25-18-12.json189
-rw-r--r--benchmark/reports/Auto-GPT/file38_07-25-18-14.json189
-rw-r--r--benchmark/reports/Auto-GPT/file3_07-18-08-19.json267
-rw-r--r--benchmark/reports/Auto-GPT/file41_07-26-00-53.json186
-rw-r--r--benchmark/reports/Auto-GPT/file42_07-26-03-15.json189
-rw-r--r--benchmark/reports/Auto-GPT/file43_07-26-08-18.json189
-rw-r--r--benchmark/reports/Auto-GPT/file46_07-27-18-44.json33
-rw-r--r--benchmark/reports/Auto-GPT/file47_07-27-13-31.json33
-rw-r--r--benchmark/reports/Auto-GPT/file47_07-27-19-24.json33
-rw-r--r--benchmark/reports/Auto-GPT/file48_07-27-13-38.json32
-rw-r--r--benchmark/reports/Auto-GPT/file48_07-27-19-56.json186
-rw-r--r--benchmark/reports/Auto-GPT/file49_07-28-03-53.json619
-rw-r--r--benchmark/reports/Auto-GPT/file4_07-18-16-20.json268
-rw-r--r--benchmark/reports/Auto-GPT/file50_07-28-04-10.json613
-rw-r--r--benchmark/reports/Auto-GPT/file51_07-29-08-12.json613
-rw-r--r--benchmark/reports/Auto-GPT/file52_07-29-09-24.json637
-rw-r--r--benchmark/reports/Auto-GPT/file53_07-29-09-29.json637
-rw-r--r--benchmark/reports/Auto-GPT/file54_07-29-10-18.json634
-rw-r--r--benchmark/reports/Auto-GPT/file55_07-29-10-45.json637
-rw-r--r--benchmark/reports/Auto-GPT/file56_07-29-16-09.json624
-rw-r--r--benchmark/reports/Auto-GPT/file57_07-29-17-21.json631
-rw-r--r--benchmark/reports/Auto-GPT/file59_07-30-03-06.json637
-rw-r--r--benchmark/reports/Auto-GPT/file59_07-30-08-12.json637
-rw-r--r--benchmark/reports/Auto-GPT/file5_07-19-08-18.json267
-rw-r--r--benchmark/reports/Auto-GPT/file6_07-19-20-40.json268
-rw-r--r--benchmark/reports/Auto-GPT/file7_07-19-21-56.json267
-rw-r--r--benchmark/reports/Auto-GPT/file8_07-20-20-12.json267
-rw-r--r--benchmark/reports/Auto-GPT/file9_07-20-22-44.json264
-rw-r--r--benchmark/reports/Auto-GPT/file9_07-20-22-49.json268
-rw-r--r--benchmark/reports/Auto-GPT/folder10_08-01-02-43/radar_chart.pngbin0 -> 145168 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder10_08-01-02-43/report.json195
-rw-r--r--benchmark/reports/Auto-GPT/folder10_08-01-12-47/radar_chart.pngbin0 -> 40127 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder10_08-01-12-47/report.json653
-rw-r--r--benchmark/reports/Auto-GPT/folder11_08-01-03-21/radar_chart.pngbin0 -> 162004 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder11_08-01-03-21/report.json654
-rw-r--r--benchmark/reports/Auto-GPT/folder11_08-01-13-38/radar_chart.pngbin0 -> 40882 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder11_08-01-13-38/report.json654
-rw-r--r--benchmark/reports/Auto-GPT/folder12_08-01-16-18/radar_chart.pngbin0 -> 209974 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder12_08-01-16-18/report.json656
-rw-r--r--benchmark/reports/Auto-GPT/folder13_08-01-16-58/radar_chart.pngbin0 -> 212627 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder13_08-01-16-58/report.json654
-rw-r--r--benchmark/reports/Auto-GPT/folder14_08-01-19-52/radar_chart.pngbin0 -> 212627 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder14_08-01-19-52/report.json654
-rw-r--r--benchmark/reports/Auto-GPT/folder18_08-02-02-37/radar_chart.pngbin0 -> 226536 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder18_08-02-02-37/report.json652
-rw-r--r--benchmark/reports/Auto-GPT/folder18_08-02-03-12/radar_chart.pngbin0 -> 227316 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder18_08-02-03-12/report.json651
-rw-r--r--benchmark/reports/Auto-GPT/folder19_08-02-03-58/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder19_08-02-03-58/report.json659
-rw-r--r--benchmark/reports/Auto-GPT/folder1_07-31-02-07/report.json646
-rw-r--r--benchmark/reports/Auto-GPT/folder20_08-02-08-13/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder20_08-02-08-13/report.json659
-rw-r--r--benchmark/reports/Auto-GPT/folder21_08-02-15-18/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder21_08-02-15-18/report.json659
-rw-r--r--benchmark/reports/Auto-GPT/folder22_08-02-17-21/radar_chart.pngbin0 -> 215006 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder22_08-02-17-21/report.json654
-rw-r--r--benchmark/reports/Auto-GPT/folder23_08-02-17-38/radar_chart.pngbin0 -> 213691 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder23_08-02-17-38/report.json654
-rw-r--r--benchmark/reports/Auto-GPT/folder24_08-02-00-08/radar_chart.pngbin0 -> 222106 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder24_08-02-00-08/report.json651
-rw-r--r--benchmark/reports/Auto-GPT/folder24_08-03-08-13/radar_chart.pngbin0 -> 226536 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder24_08-03-08-13/report.json688
-rw-r--r--benchmark/reports/Auto-GPT/folder25_08-02-01-35/radar_chart.pngbin0 -> 217492 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder25_08-02-01-35/report.json653
-rw-r--r--benchmark/reports/Auto-GPT/folder25_08-03-23-51/radar_chart.pngbin0 -> 227231 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder25_08-03-23-51/report.json686
-rw-r--r--benchmark/reports/Auto-GPT/folder26_08-04-03-24/radar_chart.pngbin0 -> 221519 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder26_08-04-03-24/report.json689
-rw-r--r--benchmark/reports/Auto-GPT/folder27_08-04-08-14/radar_chart.pngbin0 -> 225544 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder27_08-04-08-14/report.json687
-rw-r--r--benchmark/reports/Auto-GPT/folder28_08-05-08-12/radar_chart.pngbin0 -> 220670 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder28_08-05-08-12/report.json687
-rw-r--r--benchmark/reports/Auto-GPT/folder29_08-06-08-12/radar_chart.pngbin0 -> 227231 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder29_08-06-08-12/report.json759
-rw-r--r--benchmark/reports/Auto-GPT/folder2_07-31-03-06/report.json622
-rw-r--r--benchmark/reports/Auto-GPT/folder31_08-08-08-14/radar_chart.pngbin0 -> 226150 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder31_08-08-08-14/report.json874
-rw-r--r--benchmark/reports/Auto-GPT/folder31_08-09-08-15/radar_chart.pngbin0 -> 232761 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder31_08-09-08-15/report.json909
-rw-r--r--benchmark/reports/Auto-GPT/folder32_08-10-08-14/radar_chart.pngbin0 -> 234081 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder32_08-10-08-14/report.json908
-rw-r--r--benchmark/reports/Auto-GPT/folder33_08-11-08-13/radar_chart.pngbin0 -> 219337 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder33_08-11-08-13/report.json915
-rw-r--r--benchmark/reports/Auto-GPT/folder34_08-12-02-19/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder34_08-12-02-19/report.json915
-rw-r--r--benchmark/reports/Auto-GPT/folder35_08-12-02-51/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder35_08-12-02-51/report.json915
-rw-r--r--benchmark/reports/Auto-GPT/folder36_08-12-03-04/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder36_08-12-03-04/report.json915
-rw-r--r--benchmark/reports/Auto-GPT/folder37_08-12-03-45/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder37_08-12-03-45/report.json915
-rw-r--r--benchmark/reports/Auto-GPT/folder38_08-12-08-12/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder38_08-12-08-12/report.json915
-rw-r--r--benchmark/reports/Auto-GPT/folder39_08-12-17-24/radar_chart.pngbin0 -> 221277 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder39_08-12-17-24/report.json905
-rw-r--r--benchmark/reports/Auto-GPT/folder3_07-31-12-44/report.json14
-rw-r--r--benchmark/reports/Auto-GPT/folder40_08-13-01-10/radar_chart.pngbin0 -> 230411 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder40_08-13-01-10/report.json882
-rw-r--r--benchmark/reports/Auto-GPT/folder41_08-13-01-53/radar_chart.pngbin0 -> 230533 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder41_08-13-01-53/report.json881
-rw-r--r--benchmark/reports/Auto-GPT/folder42_08-13-02-38/radar_chart.pngbin0 -> 221655 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder42_08-13-02-38/report.json885
-rw-r--r--benchmark/reports/Auto-GPT/folder43_08-13-08-13/radar_chart.pngbin0 -> 229511 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder43_08-13-08-13/report.json880
-rw-r--r--benchmark/reports/Auto-GPT/folder44_08-14-08-14/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder44_08-14-08-14/report.json920
-rw-r--r--benchmark/reports/Auto-GPT/folder45_08-14-21-38/radar_chart.pngbin0 -> 217625 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder45_08-14-21-38/report.json919
-rw-r--r--benchmark/reports/Auto-GPT/folder46_08-15-08-15/radar_chart.pngbin0 -> 212835 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder46_08-15-08-15/report.json920
-rw-r--r--benchmark/reports/Auto-GPT/folder47_08-16-08-14/radar_chart.pngbin0 -> 212835 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder47_08-16-08-14/report.json920
-rw-r--r--benchmark/reports/Auto-GPT/folder4_07-31-13-05/radar_chart.pngbin0 -> 198260 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder4_07-31-13-05/report.json649
-rw-r--r--benchmark/reports/Auto-GPT/folder5_07-31-16-10/radar_chart.pngbin0 -> 179855 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder5_07-31-16-10/report.json653
-rw-r--r--benchmark/reports/Auto-GPT/folder6_07-31-19-06/radar_chart.pngbin0 -> 162004 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder6_07-31-19-06/report.json654
-rw-r--r--benchmark/reports/Auto-GPT/folder7_07-31-19-39/radar_chart.pngbin0 -> 179855 bytes
-rw-r--r--benchmark/reports/Auto-GPT/folder7_07-31-19-39/report.json653
-rw-r--r--benchmark/reports/Auto-GPT/regression_tests.json26
-rw-r--r--benchmark/reports/Auto-GPT/success_rate.json2366
-rw-r--r--benchmark/reports/PolyGPT/20230817T000100_full_run/radar_chart.pngbin0 -> 225869 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230817T000100_full_run/report.json884
-rw-r--r--benchmark/reports/PolyGPT/20230817T081344_full_run/radar_chart.pngbin0 -> 228411 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230817T081344_full_run/report.json884
-rw-r--r--benchmark/reports/PolyGPT/20230818T081347_full_run/radar_chart.pngbin0 -> 214382 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230818T081347_full_run/report.json888
-rw-r--r--benchmark/reports/PolyGPT/20230819T081303_full_run/radar_chart.pngbin0 -> 225869 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230819T081303_full_run/report.json884
-rw-r--r--benchmark/reports/PolyGPT/20230820T081253_full_run/radar_chart.pngbin0 -> 225869 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230820T081253_full_run/report.json884
-rw-r--r--benchmark/reports/PolyGPT/20230821T081430_full_run/radar_chart.pngbin0 -> 224555 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230821T081430_full_run/report.json886
-rw-r--r--benchmark/reports/PolyGPT/20230822T081318_full_run/radar_chart.pngbin0 -> 231334 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230822T081318_full_run/report.json885
-rw-r--r--benchmark/reports/PolyGPT/20230823T081326_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230823T081326_full_run/report.json891
-rw-r--r--benchmark/reports/PolyGPT/20230824T032533_full_run/radar_chart.pngbin0 -> 225869 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230824T032533_full_run/report.json884
-rw-r--r--benchmark/reports/PolyGPT/20230824T081402_full_run/radar_chart.pngbin0 -> 214533 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230824T081402_full_run/report.json887
-rw-r--r--benchmark/reports/PolyGPT/20230825T081411_full_run/radar_chart.pngbin0 -> 232691 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230825T081411_full_run/report.json882
-rw-r--r--benchmark/reports/PolyGPT/20230826T081258_full_run/radar_chart.pngbin0 -> 232691 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230826T081258_full_run/report.json882
-rw-r--r--benchmark/reports/PolyGPT/20230827T081204_full_run/radar_chart.pngbin0 -> 224962 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230827T081204_full_run/report.json881
-rw-r--r--benchmark/reports/PolyGPT/20230828T081533_full_run/radar_chart.pngbin0 -> 231407 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230828T081533_full_run/report.json880
-rw-r--r--benchmark/reports/PolyGPT/20230829T081440_full_run/radar_chart.pngbin0 -> 232691 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230829T081440_full_run/report.json878
-rw-r--r--benchmark/reports/PolyGPT/20230830T081320_full_run/radar_chart.pngbin0 -> 237669 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230830T081320_full_run/report.json877
-rw-r--r--benchmark/reports/PolyGPT/20230831T153410_full_run/radar_chart.pngbin0 -> 155450 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230831T153410_full_run/report.json420
-rw-r--r--benchmark/reports/PolyGPT/20230901T081331_full_run/radar_chart.pngbin0 -> 161319 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230901T081331_full_run/report.json418
-rw-r--r--benchmark/reports/PolyGPT/20230902T081234_full_run/radar_chart.pngbin0 -> 161319 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230902T081234_full_run/report.json419
-rw-r--r--benchmark/reports/PolyGPT/20230903T081236_full_run/radar_chart.pngbin0 -> 238388 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230903T081236_full_run/report.json858
-rw-r--r--benchmark/reports/PolyGPT/20230904T081401_full_run/radar_chart.pngbin0 -> 237376 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230904T081401_full_run/report.json859
-rw-r--r--benchmark/reports/PolyGPT/20230905T081409_full_run/radar_chart.pngbin0 -> 228456 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230905T081409_full_run/report.json880
-rw-r--r--benchmark/reports/PolyGPT/20230908T011618_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230908T011618_full_run/report.json314
-rw-r--r--benchmark/reports/PolyGPT/20230908T080446_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230908T080446_full_run/report.json314
-rw-r--r--benchmark/reports/PolyGPT/20230909T080421_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230909T080421_full_run/report.json314
-rw-r--r--benchmark/reports/PolyGPT/20230910T080352_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230910T080352_full_run/report.json314
-rw-r--r--benchmark/reports/PolyGPT/20230911T080444_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/PolyGPT/20230911T080444_full_run/report.json314
-rw-r--r--benchmark/reports/PolyGPT/folder10_08-13-01-10/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder10_08-13-01-10/report.json893
-rw-r--r--benchmark/reports/PolyGPT/folder11_08-13-01-52/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder11_08-13-01-52/report.json892
-rw-r--r--benchmark/reports/PolyGPT/folder12_08-13-02-17/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder12_08-13-02-17/report.json893
-rw-r--r--benchmark/reports/PolyGPT/folder13_08-13-02-37/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder13_08-13-02-37/report.json894
-rw-r--r--benchmark/reports/PolyGPT/folder14_08-13-08-12/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder14_08-13-08-12/report.json894
-rw-r--r--benchmark/reports/PolyGPT/folder15_08-14-08-13/radar_chart.pngbin0 -> 234556 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder15_08-14-08-13/report.json888
-rw-r--r--benchmark/reports/PolyGPT/folder15_08-14-08-37/radar_chart.pngbin0 -> 234556 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder15_08-14-08-37/report.json888
-rw-r--r--benchmark/reports/PolyGPT/folder17_08-14-09-48/radar_chart.pngbin0 -> 234556 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder17_08-14-09-48/report.json894
-rw-r--r--benchmark/reports/PolyGPT/folder18_08-14-18-00/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder18_08-14-18-00/report.json925
-rw-r--r--benchmark/reports/PolyGPT/folder19_08-14-18-16/radar_chart.pngbin0 -> 233216 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder19_08-14-18-16/report.json913
-rw-r--r--benchmark/reports/PolyGPT/folder1_08-09-19-03/radar_chart.pngbin0 -> 221168 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder1_08-09-19-03/report.json912
-rw-r--r--benchmark/reports/PolyGPT/folder20_08-14-21-37/radar_chart.pngbin0 -> 233216 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder20_08-14-21-37/report.json913
-rw-r--r--benchmark/reports/PolyGPT/folder21_08-15-08-13/radar_chart.pngbin0 -> 223077 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder21_08-15-08-13/report.json916
-rw-r--r--benchmark/reports/PolyGPT/folder22_08-16-08-14/radar_chart.pngbin0 -> 230158 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder22_08-16-08-14/report.json914
-rw-r--r--benchmark/reports/PolyGPT/folder2_08-10-08-14/radar_chart.pngbin0 -> 219513 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder2_08-10-08-14/report.json914
-rw-r--r--benchmark/reports/PolyGPT/folder3_08-11-08-13/radar_chart.pngbin0 -> 219337 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder3_08-11-08-13/report.json915
-rw-r--r--benchmark/reports/PolyGPT/folder4_08-11-20-28/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder4_08-11-20-28/report.json915
-rw-r--r--benchmark/reports/PolyGPT/folder5_08-12-02-18/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder5_08-12-02-18/report.json915
-rw-r--r--benchmark/reports/PolyGPT/folder6_08-12-02-51/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder6_08-12-02-51/report.json915
-rw-r--r--benchmark/reports/PolyGPT/folder7_08-12-03-03/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder7_08-12-03-03/report.json915
-rw-r--r--benchmark/reports/PolyGPT/folder8_08-12-08-12/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder8_08-12-08-12/report.json915
-rw-r--r--benchmark/reports/PolyGPT/folder9_08-12-17-23/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/PolyGPT/folder9_08-12-17-23/report.json913
-rw-r--r--benchmark/reports/PolyGPT/regression_tests.json58
-rw-r--r--benchmark/reports/PolyGPT/success_rate.json1780
-rw-r--r--benchmark/reports/babyagi/20230817T000257_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/20230817T000257_full_run/report.json894
-rw-r--r--benchmark/reports/babyagi/20230817T081542_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/20230817T081542_full_run/report.json894
-rw-r--r--benchmark/reports/babyagi/20230818T081621_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/20230818T081621_full_run/report.json894
-rw-r--r--benchmark/reports/babyagi/20230819T081418_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/20230819T081418_full_run/report.json894
-rw-r--r--benchmark/reports/babyagi/20230820T081523_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/20230820T081523_full_run/report.json894
-rw-r--r--benchmark/reports/babyagi/20230821T081708_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/20230821T081708_full_run/report.json894
-rw-r--r--benchmark/reports/babyagi/20230822T081534_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/20230822T081534_full_run/report.json894
-rw-r--r--benchmark/reports/babyagi/20230823T081622_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/20230823T081622_full_run/report.json894
-rw-r--r--benchmark/reports/babyagi/20230824T032717_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/20230824T032717_full_run/report.json894
-rw-r--r--benchmark/reports/babyagi/20230824T081600_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/20230824T081600_full_run/report.json894
-rw-r--r--benchmark/reports/babyagi/20230825T081559_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/20230825T081559_full_run/report.json894
-rw-r--r--benchmark/reports/babyagi/20230826T081425_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/20230826T081425_full_run/report.json894
-rw-r--r--benchmark/reports/babyagi/20230827T081454_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/20230827T081454_full_run/report.json894
-rw-r--r--benchmark/reports/babyagi/20230828T081736_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/20230828T081736_full_run/report.json894
-rw-r--r--benchmark/reports/babyagi/20230829T081638_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/20230829T081638_full_run/report.json894
-rw-r--r--benchmark/reports/babyagi/20230830T081613_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/20230830T081613_full_run/report.json894
-rw-r--r--benchmark/reports/babyagi/20230831T081539_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/babyagi/20230831T081539_full_run/report.json424
-rw-r--r--benchmark/reports/babyagi/20230831T153608_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/babyagi/20230831T153608_full_run/report.json424
-rw-r--r--benchmark/reports/babyagi/20230901T081621_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/babyagi/20230901T081621_full_run/report.json424
-rw-r--r--benchmark/reports/babyagi/20230902T081425_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/babyagi/20230902T081425_full_run/report.json424
-rw-r--r--benchmark/reports/babyagi/20230903T081538_full_run/radar_chart.pngbin0 -> 216749 bytes
-rw-r--r--benchmark/reports/babyagi/20230903T081538_full_run/report.json874
-rw-r--r--benchmark/reports/babyagi/20230904T081752_full_run/radar_chart.pngbin0 -> 216749 bytes
-rw-r--r--benchmark/reports/babyagi/20230904T081752_full_run/report.json874
-rw-r--r--benchmark/reports/babyagi/20230905T081727_full_run/radar_chart.pngbin0 -> 216802 bytes
-rw-r--r--benchmark/reports/babyagi/20230905T081727_full_run/report.json894
-rw-r--r--benchmark/reports/babyagi/20230908T003423_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/babyagi/20230908T003423_full_run/report.json313
-rw-r--r--benchmark/reports/babyagi/20230908T011924_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/babyagi/20230908T011924_full_run/report.json313
-rw-r--r--benchmark/reports/babyagi/20230908T080803_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/babyagi/20230908T080803_full_run/report.json313
-rw-r--r--benchmark/reports/babyagi/20230909T080618_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/babyagi/20230909T080618_full_run/report.json313
-rw-r--r--benchmark/reports/babyagi/20230910T080648_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/babyagi/20230910T080648_full_run/report.json313
-rw-r--r--benchmark/reports/babyagi/20230911T080641_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/babyagi/20230911T080641_full_run/report.json313
-rw-r--r--benchmark/reports/babyagi/file10_07-23-21-06.json268
-rw-r--r--benchmark/reports/babyagi/file10_07-23-21-07.json268
-rw-r--r--benchmark/reports/babyagi/file12_07-23-22-28.json268
-rw-r--r--benchmark/reports/babyagi/file13_07-24-08-21.json268
-rw-r--r--benchmark/reports/babyagi/file14_07-24-22-15.json531
-rw-r--r--benchmark/reports/babyagi/file15_07-24-23-53.json531
-rw-r--r--benchmark/reports/babyagi/file16_07-25-01-07.json531
-rw-r--r--benchmark/reports/babyagi/file17_07-25-01-38.json531
-rw-r--r--benchmark/reports/babyagi/file18_07-25-03-16.json531
-rw-r--r--benchmark/reports/babyagi/file19_07-25-03-38.json531
-rw-r--r--benchmark/reports/babyagi/file1_07-21-18-20.json268
-rw-r--r--benchmark/reports/babyagi/file20_07-25-04-01.json531
-rw-r--r--benchmark/reports/babyagi/file21_07-25-04-22.json531
-rw-r--r--benchmark/reports/babyagi/file22_07-25-08-22.json531
-rw-r--r--benchmark/reports/babyagi/file23_07-25-18-13.json188
-rw-r--r--benchmark/reports/babyagi/file23_07-25-18-14.json188
-rw-r--r--benchmark/reports/babyagi/file23_07-25-18-16.json188
-rw-r--r--benchmark/reports/babyagi/file26_07-26-00-56.json188
-rw-r--r--benchmark/reports/babyagi/file27_07-26-03-17.json188
-rw-r--r--benchmark/reports/babyagi/file28_07-26-08-21.json188
-rw-r--r--benchmark/reports/babyagi/file29_07-27-13-33.json32
-rw-r--r--benchmark/reports/babyagi/file2_07-22-08-18.json268
-rw-r--r--benchmark/reports/babyagi/file30_07-27-13-40.json32
-rw-r--r--benchmark/reports/babyagi/file31_07-27-18-46.json32
-rw-r--r--benchmark/reports/babyagi/file32_07-27-19-27.json32
-rw-r--r--benchmark/reports/babyagi/file33_07-27-19-59.json189
-rw-r--r--benchmark/reports/babyagi/file34_07-28-03-56.json618
-rw-r--r--benchmark/reports/babyagi/file35_07-28-04-13.json618
-rw-r--r--benchmark/reports/babyagi/file36_07-28-08-14.json618
-rw-r--r--benchmark/reports/babyagi/file37_07-29-08-14.json618
-rw-r--r--benchmark/reports/babyagi/file38_07-29-09-30.json636
-rw-r--r--benchmark/reports/babyagi/file39_07-29-10-20.json636
-rw-r--r--benchmark/reports/babyagi/file3_07-22-15-12.json268
-rw-r--r--benchmark/reports/babyagi/file40_07-29-10-47.json636
-rw-r--r--benchmark/reports/babyagi/file41_07-29-16-11.json636
-rw-r--r--benchmark/reports/babyagi/file42_07-29-17-23.json636
-rw-r--r--benchmark/reports/babyagi/file43_07-29-18-09.json636
-rw-r--r--benchmark/reports/babyagi/file44_07-30-00-53.json636
-rw-r--r--benchmark/reports/babyagi/file45_07-30-01-41.json636
-rw-r--r--benchmark/reports/babyagi/file46_07-30-03-08.json636
-rw-r--r--benchmark/reports/babyagi/file47_07-30-04-26.json636
-rw-r--r--benchmark/reports/babyagi/file48_07-30-08-14.json636
-rw-r--r--benchmark/reports/babyagi/file4_07-23-08-20.json268
-rw-r--r--benchmark/reports/babyagi/file5_07-23-16-24.json268
-rw-r--r--benchmark/reports/babyagi/file6_07-23-19-11.json268
-rw-r--r--benchmark/reports/babyagi/file7_07-23-19-28.json268
-rw-r--r--benchmark/reports/babyagi/file8_07-23-19-37.json268
-rw-r--r--benchmark/reports/babyagi/file9_07-23-19-55.json268
-rw-r--r--benchmark/reports/babyagi/folder11_08-01-02-46/report.json148
-rw-r--r--benchmark/reports/babyagi/folder11_08-01-12-50/report.json657
-rw-r--r--benchmark/reports/babyagi/folder12_08-01-03-23/report.json657
-rw-r--r--benchmark/reports/babyagi/folder12_08-01-13-39/report.json657
-rw-r--r--benchmark/reports/babyagi/folder13_08-01-16-20/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/babyagi/folder13_08-01-16-20/report.json657
-rw-r--r--benchmark/reports/babyagi/folder14_08-01-17-00/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/babyagi/folder14_08-01-17-00/report.json657
-rw-r--r--benchmark/reports/babyagi/folder15_08-01-17-35/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/babyagi/folder15_08-01-17-35/report.json657
-rw-r--r--benchmark/reports/babyagi/folder16_08-01-19-54/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/babyagi/folder16_08-01-19-54/report.json657
-rw-r--r--benchmark/reports/babyagi/folder18_08-02-01-36/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/babyagi/folder18_08-02-01-36/report.json658
-rw-r--r--benchmark/reports/babyagi/folder19_08-02-02-39/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/babyagi/folder19_08-02-02-39/report.json658
-rw-r--r--benchmark/reports/babyagi/folder19_08-02-03-14/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/babyagi/folder19_08-02-03-14/report.json658
-rw-r--r--benchmark/reports/babyagi/folder1_07-30-22-55/report.json645
-rw-r--r--benchmark/reports/babyagi/folder20_08-02-04-02/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/babyagi/folder20_08-02-04-02/report.json658
-rw-r--r--benchmark/reports/babyagi/folder21_08-02-08-15/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/babyagi/folder21_08-02-08-15/report.json658
-rw-r--r--benchmark/reports/babyagi/folder22_08-02-15-21/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/babyagi/folder22_08-02-15-21/report.json658
-rw-r--r--benchmark/reports/babyagi/folder23_08-02-17-23/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/babyagi/folder23_08-02-17-23/report.json658
-rw-r--r--benchmark/reports/babyagi/folder24_08-02-17-41/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/babyagi/folder24_08-02-17-41/report.json658
-rw-r--r--benchmark/reports/babyagi/folder25_08-03-08-16/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/babyagi/folder25_08-03-08-16/report.json694
-rw-r--r--benchmark/reports/babyagi/folder26_08-03-23-52/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/babyagi/folder26_08-03-23-52/report.json694
-rw-r--r--benchmark/reports/babyagi/folder27_08-04-03-27/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/babyagi/folder27_08-04-03-27/report.json694
-rw-r--r--benchmark/reports/babyagi/folder28_08-04-04-34/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/babyagi/folder28_08-04-04-34/report.json694
-rw-r--r--benchmark/reports/babyagi/folder29_08-04-08-15/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/babyagi/folder29_08-04-08-15/report.json694
-rw-r--r--benchmark/reports/babyagi/folder2_07-31-02-10/report.json645
-rw-r--r--benchmark/reports/babyagi/folder30_08-05-08-14/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/babyagi/folder30_08-05-08-14/report.json693
-rw-r--r--benchmark/reports/babyagi/folder31_08-06-08-14/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/babyagi/folder31_08-06-08-14/report.json772
-rw-r--r--benchmark/reports/babyagi/folder32_08-07-08-16/radar_chart.pngbin0 -> 219513 bytes
-rw-r--r--benchmark/reports/babyagi/folder32_08-07-08-16/report.json879
-rw-r--r--benchmark/reports/babyagi/folder33_08-08-08-15/radar_chart.pngbin0 -> 219513 bytes
-rw-r--r--benchmark/reports/babyagi/folder33_08-08-08-15/report.json879
-rw-r--r--benchmark/reports/babyagi/folder34_08-09-03-07/radar_chart.pngbin0 -> 219513 bytes
-rw-r--r--benchmark/reports/babyagi/folder34_08-09-03-07/report.json914
-rw-r--r--benchmark/reports/babyagi/folder35_08-09-08-17/radar_chart.pngbin0 -> 219513 bytes
-rw-r--r--benchmark/reports/babyagi/folder35_08-09-08-17/report.json914
-rw-r--r--benchmark/reports/babyagi/folder36_08-10-08-17/radar_chart.pngbin0 -> 219513 bytes
-rw-r--r--benchmark/reports/babyagi/folder36_08-10-08-17/report.json914
-rw-r--r--benchmark/reports/babyagi/folder37_08-11-08-15/radar_chart.pngbin0 -> 219337 bytes
-rw-r--r--benchmark/reports/babyagi/folder37_08-11-08-15/report.json914
-rw-r--r--benchmark/reports/babyagi/folder38_08-12-02-21/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/folder38_08-12-02-21/report.json914
-rw-r--r--benchmark/reports/babyagi/folder39_08-12-02-54/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/folder39_08-12-02-54/report.json914
-rw-r--r--benchmark/reports/babyagi/folder3_07-31-03-08/report.json627
-rw-r--r--benchmark/reports/babyagi/folder40_08-12-03-06/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/folder40_08-12-03-06/report.json914
-rw-r--r--benchmark/reports/babyagi/folder41_08-12-08-16/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/folder41_08-12-08-16/report.json914
-rw-r--r--benchmark/reports/babyagi/folder42_08-12-17-26/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/folder42_08-12-17-26/report.json914
-rw-r--r--benchmark/reports/babyagi/folder43_08-13-01-12/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/folder43_08-13-01-12/report.json894
-rw-r--r--benchmark/reports/babyagi/folder44_08-13-01-54/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/folder44_08-13-01-54/report.json894
-rw-r--r--benchmark/reports/babyagi/folder45_08-13-02-19/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/folder45_08-13-02-19/report.json894
-rw-r--r--benchmark/reports/babyagi/folder46_08-13-02-40/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/folder46_08-13-02-40/report.json894
-rw-r--r--benchmark/reports/babyagi/folder47_08-13-08-15/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/folder47_08-13-08-15/report.json894
-rw-r--r--benchmark/reports/babyagi/folder48_08-14-21-40/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/folder48_08-14-21-40/report.json924
-rw-r--r--benchmark/reports/babyagi/folder49_08-15-08-15/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/folder49_08-15-08-15/report.json924
-rw-r--r--benchmark/reports/babyagi/folder4_07-31-12-47/report.json13
-rw-r--r--benchmark/reports/babyagi/folder50_08-16-08-17/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/babyagi/folder50_08-16-08-17/report.json924
-rw-r--r--benchmark/reports/babyagi/folder5_07-31-13-07/report.json657
-rw-r--r--benchmark/reports/babyagi/folder6_07-31-16-13/report.json657
-rw-r--r--benchmark/reports/babyagi/folder7_07-31-19-07/report.json657
-rw-r--r--benchmark/reports/babyagi/folder8_07-31-19-41/report.json657
-rw-r--r--benchmark/reports/babyagi/regression_tests.json1
-rw-r--r--benchmark/reports/babyagi/success_rate.json2435
-rw-r--r--benchmark/reports/beebot/20230817T000111_full_run/radar_chart.pngbin0 -> 234343 bytes
-rw-r--r--benchmark/reports/beebot/20230817T000111_full_run/report.json881
-rw-r--r--benchmark/reports/beebot/20230817T081401_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/beebot/20230817T081401_full_run/report.json891
-rw-r--r--benchmark/reports/beebot/20230818T081430_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/beebot/20230818T081430_full_run/report.json891
-rw-r--r--benchmark/reports/beebot/20230819T081151_full_run/radar_chart.pngbin0 -> 243898 bytes
-rw-r--r--benchmark/reports/beebot/20230819T081151_full_run/report.json875
-rw-r--r--benchmark/reports/beebot/20230820T081303_full_run/radar_chart.pngbin0 -> 240683 bytes
-rw-r--r--benchmark/reports/beebot/20230820T081303_full_run/report.json876
-rw-r--r--benchmark/reports/beebot/20230821T081428_full_run/radar_chart.pngbin0 -> 221077 bytes
-rw-r--r--benchmark/reports/beebot/20230821T081428_full_run/report.json892
-rw-r--r--benchmark/reports/beebot/20230822T081327_full_run/radar_chart.pngbin0 -> 224625 bytes
-rw-r--r--benchmark/reports/beebot/20230822T081327_full_run/report.json880
-rw-r--r--benchmark/reports/beebot/20230823T081409_full_run/radar_chart.pngbin0 -> 211475 bytes
-rw-r--r--benchmark/reports/beebot/20230823T081409_full_run/report.json891
-rw-r--r--benchmark/reports/beebot/20230824T032431_full_run/radar_chart.pngbin0 -> 228521 bytes
-rw-r--r--benchmark/reports/beebot/20230824T032431_full_run/report.json877
-rw-r--r--benchmark/reports/beebot/20230824T081331_full_run/radar_chart.pngbin0 -> 218146 bytes
-rw-r--r--benchmark/reports/beebot/20230824T081331_full_run/report.json884
-rw-r--r--benchmark/reports/beebot/20230825T081419_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/beebot/20230825T081419_full_run/report.json892
-rw-r--r--benchmark/reports/beebot/20230826T081219_full_run/radar_chart.pngbin0 -> 218513 bytes
-rw-r--r--benchmark/reports/beebot/20230826T081219_full_run/report.json887
-rw-r--r--benchmark/reports/beebot/20230827T081219_full_run/radar_chart.pngbin0 -> 229451 bytes
-rw-r--r--benchmark/reports/beebot/20230827T081219_full_run/report.json884
-rw-r--r--benchmark/reports/beebot/20230828T081452_full_run/radar_chart.pngbin0 -> 232786 bytes
-rw-r--r--benchmark/reports/beebot/20230828T081452_full_run/report.json884
-rw-r--r--benchmark/reports/beebot/20230829T081405_full_run/radar_chart.pngbin0 -> 219103 bytes
-rw-r--r--benchmark/reports/beebot/20230829T081405_full_run/report.json884
-rw-r--r--benchmark/reports/beebot/20230830T081410_full_run/radar_chart.pngbin0 -> 224625 bytes
-rw-r--r--benchmark/reports/beebot/20230830T081410_full_run/report.json880
-rw-r--r--benchmark/reports/beebot/20230831T081406_full_run/radar_chart.pngbin0 -> 152685 bytes
-rw-r--r--benchmark/reports/beebot/20230831T081406_full_run/report.json420
-rw-r--r--benchmark/reports/beebot/20230831T153356_full_run/radar_chart.pngbin0 -> 152685 bytes
-rw-r--r--benchmark/reports/beebot/20230831T153356_full_run/report.json420
-rw-r--r--benchmark/reports/beebot/20230901T081447_full_run/radar_chart.pngbin0 -> 178964 bytes
-rw-r--r--benchmark/reports/beebot/20230901T081447_full_run/report.json415
-rw-r--r--benchmark/reports/beebot/20230902T081302_full_run/radar_chart.pngbin0 -> 165027 bytes
-rw-r--r--benchmark/reports/beebot/20230902T081302_full_run/report.json418
-rw-r--r--benchmark/reports/beebot/20230903T081311_full_run/radar_chart.pngbin0 -> 238848 bytes
-rw-r--r--benchmark/reports/beebot/20230903T081311_full_run/report.json853
-rw-r--r--benchmark/reports/beebot/20230904T081412_full_run/radar_chart.pngbin0 -> 235784 bytes
-rw-r--r--benchmark/reports/beebot/20230904T081412_full_run/report.json858
-rw-r--r--benchmark/reports/beebot/20230905T081437_full_run/radar_chart.pngbin0 -> 228615 bytes
-rw-r--r--benchmark/reports/beebot/20230905T081437_full_run/report.json882
-rw-r--r--benchmark/reports/beebot/20230908T011630_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/beebot/20230908T011630_full_run/report.json313
-rw-r--r--benchmark/reports/beebot/20230908T080421_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/beebot/20230908T080421_full_run/report.json313
-rw-r--r--benchmark/reports/beebot/20230909T080428_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/beebot/20230909T080428_full_run/report.json313
-rw-r--r--benchmark/reports/beebot/20230910T080431_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/beebot/20230910T080431_full_run/report.json313
-rw-r--r--benchmark/reports/beebot/20230911T080433_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/beebot/20230911T080433_full_run/report.json313
-rw-r--r--benchmark/reports/beebot/20230911T151304_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/beebot/20230911T151304_full_run/report.json313
-rw-r--r--benchmark/reports/beebot/file10_07-23-08-17.json256
-rw-r--r--benchmark/reports/beebot/file11_07-23-16-21.json268
-rw-r--r--benchmark/reports/beebot/file12_07-23-19-07.json259
-rw-r--r--benchmark/reports/beebot/file13_07-23-19-27.json255
-rw-r--r--benchmark/reports/beebot/file13_07-23-19-34.json256
-rw-r--r--benchmark/reports/beebot/file15_07-23-19-54.json255
-rw-r--r--benchmark/reports/beebot/file16_07-23-21-03.json259
-rw-r--r--benchmark/reports/beebot/file16_07-23-21-06.json268
-rw-r--r--benchmark/reports/beebot/file18_07-23-22-26.json259
-rw-r--r--benchmark/reports/beebot/file19_07-24-08-19.json263
-rw-r--r--benchmark/reports/beebot/file1_07-20-22-48.json268
-rw-r--r--benchmark/reports/beebot/file20_07-24-23-51.json515
-rw-r--r--benchmark/reports/beebot/file21_07-25-01-05.json531
-rw-r--r--benchmark/reports/beebot/file22_07-25-01-35.json531
-rw-r--r--benchmark/reports/beebot/file23_07-25-03-13.json518
-rw-r--r--benchmark/reports/beebot/file24_07-25-03-35.json517
-rw-r--r--benchmark/reports/beebot/file24_07-25-03-59.json518
-rw-r--r--benchmark/reports/beebot/file25_07-25-04-19.json516
-rw-r--r--benchmark/reports/beebot/file27_07-25-08-18.json519
-rw-r--r--benchmark/reports/beebot/file28_07-25-18-09.json180
-rw-r--r--benchmark/reports/beebot/file28_07-25-18-11.json180
-rw-r--r--benchmark/reports/beebot/file28_07-25-18-13.json188
-rw-r--r--benchmark/reports/beebot/file31_07-26-00-53.json180
-rw-r--r--benchmark/reports/beebot/file32_07-26-03-16.json590
-rw-r--r--benchmark/reports/beebot/file33_07-26-08-18.json584
-rw-r--r--benchmark/reports/beebot/file34_07-27-19-24.json31
-rw-r--r--benchmark/reports/beebot/file35_07-27-19-55.json181
-rw-r--r--benchmark/reports/beebot/file36_07-28-03-53.json618
-rw-r--r--benchmark/reports/beebot/file36_07-28-04-34.json608
-rw-r--r--benchmark/reports/beebot/file38_07-28-08-12.json618
-rw-r--r--benchmark/reports/beebot/file39_07-29-08-12.json618
-rw-r--r--benchmark/reports/beebot/file3_07-20-23-18.json268
-rw-r--r--benchmark/reports/beebot/file40_07-29-09-29.json626
-rw-r--r--benchmark/reports/beebot/file41_07-29-10-17.json636
-rw-r--r--benchmark/reports/beebot/file42_07-29-10-46.json636
-rw-r--r--benchmark/reports/beebot/file43_07-29-16-09.json636
-rw-r--r--benchmark/reports/beebot/file44_07-29-17-20.json628
-rw-r--r--benchmark/reports/beebot/file45_07-30-00-51.json624
-rw-r--r--benchmark/reports/beebot/file46_07-30-01-38.json627
-rw-r--r--benchmark/reports/beebot/file47_07-30-03-05.json624
-rw-r--r--benchmark/reports/beebot/file48_07-30-04-24.json623
-rw-r--r--benchmark/reports/beebot/file49_07-30-08-11.json626
-rw-r--r--benchmark/reports/beebot/file4_07-20-22-44.json262
-rw-r--r--benchmark/reports/beebot/file4_07-20-23-43.json268
-rw-r--r--benchmark/reports/beebot/file5_07-21-00-20.json259
-rw-r--r--benchmark/reports/beebot/file6_07-21-08-18.json268
-rw-r--r--benchmark/reports/beebot/file7_07-21-18-18.json260
-rw-r--r--benchmark/reports/beebot/file8_07-22-08-16.json268
-rw-r--r--benchmark/reports/beebot/file9_07-22-15-10.json268
-rw-r--r--benchmark/reports/beebot/folder10_07-31-23-16/report.json13
-rw-r--r--benchmark/reports/beebot/folder12_08-01-03-21/radar_chart.pngbin0 -> 180834 bytes
-rw-r--r--benchmark/reports/beebot/folder12_08-01-03-21/report.json648
-rw-r--r--benchmark/reports/beebot/folder12_08-01-12-48/report.json657
-rw-r--r--benchmark/reports/beebot/folder13_08-01-08-13/report.json13
-rw-r--r--benchmark/reports/beebot/folder13_08-01-13-37/radar_chart.pngbin0 -> 41082 bytes
-rw-r--r--benchmark/reports/beebot/folder13_08-01-13-37/report.json652
-rw-r--r--benchmark/reports/beebot/folder14_08-01-16-18/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/beebot/folder14_08-01-16-18/report.json657
-rw-r--r--benchmark/reports/beebot/folder15_08-01-16-57/radar_chart.pngbin0 -> 211991 bytes
-rw-r--r--benchmark/reports/beebot/folder15_08-01-16-57/report.json645
-rw-r--r--benchmark/reports/beebot/folder16_08-01-17-31/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/beebot/folder16_08-01-17-31/report.json657
-rw-r--r--benchmark/reports/beebot/folder17_08-01-19-52/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/beebot/folder17_08-01-19-52/report.json657
-rw-r--r--benchmark/reports/beebot/folder19_08-02-01-34/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/beebot/folder19_08-02-01-34/report.json658
-rw-r--r--benchmark/reports/beebot/folder1_07-30-22-53/report.json639
-rw-r--r--benchmark/reports/beebot/folder20_08-02-02-36/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/beebot/folder20_08-02-02-36/report.json658
-rw-r--r--benchmark/reports/beebot/folder20_08-02-03-13/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/beebot/folder20_08-02-03-13/report.json658
-rw-r--r--benchmark/reports/beebot/folder21_08-02-03-58/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/beebot/folder21_08-02-03-58/report.json658
-rw-r--r--benchmark/reports/beebot/folder22_08-02-08-12/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/beebot/folder22_08-02-08-12/report.json658
-rw-r--r--benchmark/reports/beebot/folder23_08-02-15-17/radar_chart.pngbin0 -> 226400 bytes
-rw-r--r--benchmark/reports/beebot/folder23_08-02-15-17/report.json643
-rw-r--r--benchmark/reports/beebot/folder24_08-03-08-13/radar_chart.pngbin0 -> 233261 bytes
-rw-r--r--benchmark/reports/beebot/folder24_08-03-08-13/report.json678
-rw-r--r--benchmark/reports/beebot/folder25_08-03-23-50/radar_chart.pngbin0 -> 240391 bytes
-rw-r--r--benchmark/reports/beebot/folder25_08-03-23-50/report.json677
-rw-r--r--benchmark/reports/beebot/folder26_08-04-03-24/radar_chart.pngbin0 -> 240391 bytes
-rw-r--r--benchmark/reports/beebot/folder26_08-04-03-24/report.json676
-rw-r--r--benchmark/reports/beebot/folder27_08-04-08-13/radar_chart.pngbin0 -> 218696 bytes
-rw-r--r--benchmark/reports/beebot/folder27_08-04-08-13/report.json685
-rw-r--r--benchmark/reports/beebot/folder28_08-05-08-11/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/beebot/folder28_08-05-08-11/report.json693
-rw-r--r--benchmark/reports/beebot/folder29_08-06-01-03/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/beebot/folder29_08-06-01-03/report.json772
-rw-r--r--benchmark/reports/beebot/folder2_07-31-02-07/report.json629
-rw-r--r--benchmark/reports/beebot/folder30_08-06-08-12/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/beebot/folder30_08-06-08-12/report.json772
-rw-r--r--benchmark/reports/beebot/folder31_08-06-17-15/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/beebot/folder31_08-06-17-15/report.json848
-rw-r--r--benchmark/reports/beebot/folder32_08-07-08-14/radar_chart.pngbin0 -> 220706 bytes
-rw-r--r--benchmark/reports/beebot/folder32_08-07-08-14/report.json877
-rw-r--r--benchmark/reports/beebot/folder33_08-07-22-57/radar_chart.pngbin0 -> 220706 bytes
-rw-r--r--benchmark/reports/beebot/folder33_08-07-22-57/report.json876
-rw-r--r--benchmark/reports/beebot/folder34_08-08-08-13/radar_chart.pngbin0 -> 222071 bytes
-rw-r--r--benchmark/reports/beebot/folder34_08-08-08-13/report.json874
-rw-r--r--benchmark/reports/beebot/folder35_08-09-08-14/radar_chart.pngbin0 -> 220706 bytes
-rw-r--r--benchmark/reports/beebot/folder35_08-09-08-14/report.json912
-rw-r--r--benchmark/reports/beebot/folder36_08-10-08-14/radar_chart.pngbin0 -> 226045 bytes
-rw-r--r--benchmark/reports/beebot/folder36_08-10-08-14/report.json909
-rw-r--r--benchmark/reports/beebot/folder37_08-11-08-13/radar_chart.pngbin0 -> 219337 bytes
-rw-r--r--benchmark/reports/beebot/folder37_08-11-08-13/report.json914
-rw-r--r--benchmark/reports/beebot/folder38_08-11-18-19/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/beebot/folder38_08-11-18-19/report.json914
-rw-r--r--benchmark/reports/beebot/folder39_08-11-19-57/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/beebot/folder39_08-11-19-57/report.json914
-rw-r--r--benchmark/reports/beebot/folder3_07-31-03-06/folder11_08-01-02-42/radar_chart.pngbin0 -> 71588 bytes
-rw-r--r--benchmark/reports/beebot/folder3_07-31-03-06/folder11_08-01-02-42/report.json106
-rw-r--r--benchmark/reports/beebot/folder3_07-31-03-06/report.json612
-rw-r--r--benchmark/reports/beebot/folder40_08-11-21-51/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/beebot/folder40_08-11-21-51/report.json914
-rw-r--r--benchmark/reports/beebot/folder41_08-12-02-19/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/beebot/folder41_08-12-02-19/report.json914
-rw-r--r--benchmark/reports/beebot/folder42_08-12-02-52/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/beebot/folder42_08-12-02-52/report.json914
-rw-r--r--benchmark/reports/beebot/folder43_08-12-03-03/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/beebot/folder43_08-12-03-03/report.json914
-rw-r--r--benchmark/reports/beebot/folder44_08-12-08-12/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/beebot/folder44_08-12-08-12/report.json914
-rw-r--r--benchmark/reports/beebot/folder45_08-12-17-24/radar_chart.pngbin0 -> 216380 bytes
-rw-r--r--benchmark/reports/beebot/folder45_08-12-17-24/report.json909
-rw-r--r--benchmark/reports/beebot/folder46_08-13-01-10/radar_chart.pngbin0 -> 231380 bytes
-rw-r--r--benchmark/reports/beebot/folder46_08-13-01-10/report.json879
-rw-r--r--benchmark/reports/beebot/folder47_08-13-02-38/radar_chart.pngbin0 -> 229949 bytes
-rw-r--r--benchmark/reports/beebot/folder47_08-13-02-38/report.json881
-rw-r--r--benchmark/reports/beebot/folder48_08-13-08-12/radar_chart.pngbin0 -> 229485 bytes
-rw-r--r--benchmark/reports/beebot/folder48_08-13-08-12/report.json883
-rw-r--r--benchmark/reports/beebot/folder49_08-14-08-13/radar_chart.pngbin0 -> 234556 bytes
-rw-r--r--benchmark/reports/beebot/folder49_08-14-08-13/report.json893
-rw-r--r--benchmark/reports/beebot/folder4_07-31-12-44/report.json13
-rw-r--r--benchmark/reports/beebot/folder50_08-14-21-37/radar_chart.pngbin0 -> 242567 bytes
-rw-r--r--benchmark/reports/beebot/folder50_08-14-21-37/report.json907
-rw-r--r--benchmark/reports/beebot/folder51_08-15-08-14/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/beebot/folder51_08-15-08-14/report.json922
-rw-r--r--benchmark/reports/beebot/folder52_08-16-08-14/radar_chart.pngbin0 -> 237700 bytes
-rw-r--r--benchmark/reports/beebot/folder52_08-16-08-14/report.json910
-rw-r--r--benchmark/reports/beebot/folder6_07-31-16-09/radar_chart.pngbin0 -> 128024 bytes
-rw-r--r--benchmark/reports/beebot/folder6_07-31-16-09/report.json657
-rw-r--r--benchmark/reports/beebot/folder7_07-31-19-05/radar_chart.pngbin0 -> 198260 bytes
-rw-r--r--benchmark/reports/beebot/folder7_07-31-19-05/report.json648
-rw-r--r--benchmark/reports/beebot/folder8_07-31-19-38/radar_chart.pngbin0 -> 219064 bytes
-rw-r--r--benchmark/reports/beebot/folder8_07-31-19-38/report.json644
-rw-r--r--benchmark/reports/beebot/folder9_07-31-21-02/report.json13
-rw-r--r--benchmark/reports/beebot/regression_tests.json46
-rw-r--r--benchmark/reports/beebot/success_rate.json2494
-rw-r--r--benchmark/reports/combined_charts/run1/bar_chart.pngbin0 -> 129722 bytes
-rw-r--r--benchmark/reports/combined_charts/run1/radar_chart.pngbin0 -> 248395 bytes
-rw-r--r--benchmark/reports/combined_charts/run1/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run10/bar_chart.pngbin0 -> 132343 bytes
-rw-r--r--benchmark/reports/combined_charts/run10/radar_chart.pngbin0 -> 233314 bytes
-rw-r--r--benchmark/reports/combined_charts/run10/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run11/bar_chart.pngbin0 -> 132343 bytes
-rw-r--r--benchmark/reports/combined_charts/run11/radar_chart.pngbin0 -> 233314 bytes
-rw-r--r--benchmark/reports/combined_charts/run11/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run12/bar_chart.pngbin0 -> 125792 bytes
-rw-r--r--benchmark/reports/combined_charts/run12/radar_chart.pngbin0 -> 244100 bytes
-rw-r--r--benchmark/reports/combined_charts/run12/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run13/bar_chart.pngbin0 -> 132320 bytes
-rw-r--r--benchmark/reports/combined_charts/run13/radar_chart.pngbin0 -> 237315 bytes
-rw-r--r--benchmark/reports/combined_charts/run13/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run14/bar_chart.pngbin0 -> 132343 bytes
-rw-r--r--benchmark/reports/combined_charts/run14/radar_chart.pngbin0 -> 233314 bytes
-rw-r--r--benchmark/reports/combined_charts/run14/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run15/bar_chart.pngbin0 -> 132369 bytes
-rw-r--r--benchmark/reports/combined_charts/run15/radar_chart.pngbin0 -> 231376 bytes
-rw-r--r--benchmark/reports/combined_charts/run15/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run16/bar_chart.pngbin0 -> 132313 bytes
-rw-r--r--benchmark/reports/combined_charts/run16/radar_chart.pngbin0 -> 232353 bytes
-rw-r--r--benchmark/reports/combined_charts/run16/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run17/bar_chart.pngbin0 -> 132369 bytes
-rw-r--r--benchmark/reports/combined_charts/run17/radar_chart.pngbin0 -> 231376 bytes
-rw-r--r--benchmark/reports/combined_charts/run17/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run18/bar_chart.pngbin0 -> 132313 bytes
-rw-r--r--benchmark/reports/combined_charts/run18/radar_chart.pngbin0 -> 232353 bytes
-rw-r--r--benchmark/reports/combined_charts/run18/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run19/bar_chart.pngbin0 -> 132313 bytes
-rw-r--r--benchmark/reports/combined_charts/run19/radar_chart.pngbin0 -> 232353 bytes
-rw-r--r--benchmark/reports/combined_charts/run19/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run2/bar_chart.pngbin0 -> 137014 bytes
-rw-r--r--benchmark/reports/combined_charts/run2/radar_chart.pngbin0 -> 240885 bytes
-rw-r--r--benchmark/reports/combined_charts/run2/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run20/bar_chart.pngbin0 -> 132313 bytes
-rw-r--r--benchmark/reports/combined_charts/run20/radar_chart.pngbin0 -> 232353 bytes
-rw-r--r--benchmark/reports/combined_charts/run20/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run21/bar_chart.pngbin0 -> 132313 bytes
-rw-r--r--benchmark/reports/combined_charts/run21/radar_chart.pngbin0 -> 232353 bytes
-rw-r--r--benchmark/reports/combined_charts/run21/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run22/bar_chart.pngbin0 -> 132313 bytes
-rw-r--r--benchmark/reports/combined_charts/run22/radar_chart.pngbin0 -> 232353 bytes
-rw-r--r--benchmark/reports/combined_charts/run22/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run23/bar_chart.pngbin0 -> 132369 bytes
-rw-r--r--benchmark/reports/combined_charts/run23/radar_chart.pngbin0 -> 231376 bytes
-rw-r--r--benchmark/reports/combined_charts/run23/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run24/bar_chart.pngbin0 -> 132313 bytes
-rw-r--r--benchmark/reports/combined_charts/run24/radar_chart.pngbin0 -> 232353 bytes
-rw-r--r--benchmark/reports/combined_charts/run24/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run25/bar_chart.pngbin0 -> 132313 bytes
-rw-r--r--benchmark/reports/combined_charts/run25/radar_chart.pngbin0 -> 232353 bytes
-rw-r--r--benchmark/reports/combined_charts/run25/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run26/bar_chart.pngbin0 -> 132369 bytes
-rw-r--r--benchmark/reports/combined_charts/run26/radar_chart.pngbin0 -> 231376 bytes
-rw-r--r--benchmark/reports/combined_charts/run26/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run27/bar_chart.pngbin0 -> 132313 bytes
-rw-r--r--benchmark/reports/combined_charts/run27/radar_chart.pngbin0 -> 232353 bytes
-rw-r--r--benchmark/reports/combined_charts/run27/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run28/bar_chart.pngbin0 -> 132369 bytes
-rw-r--r--benchmark/reports/combined_charts/run28/radar_chart.pngbin0 -> 231376 bytes
-rw-r--r--benchmark/reports/combined_charts/run28/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run29/bar_chart.pngbin0 -> 132313 bytes
-rw-r--r--benchmark/reports/combined_charts/run29/radar_chart.pngbin0 -> 232353 bytes
-rw-r--r--benchmark/reports/combined_charts/run29/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run3/bar_chart.pngbin0 -> 133482 bytes
-rw-r--r--benchmark/reports/combined_charts/run3/radar_chart.pngbin0 -> 241065 bytes
-rw-r--r--benchmark/reports/combined_charts/run3/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run30/bar_chart.pngbin0 -> 132369 bytes
-rw-r--r--benchmark/reports/combined_charts/run30/radar_chart.pngbin0 -> 231376 bytes
-rw-r--r--benchmark/reports/combined_charts/run30/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run31/bar_chart.pngbin0 -> 132369 bytes
-rw-r--r--benchmark/reports/combined_charts/run31/radar_chart.pngbin0 -> 231376 bytes
-rw-r--r--benchmark/reports/combined_charts/run31/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run32/bar_chart.pngbin0 -> 132369 bytes
-rw-r--r--benchmark/reports/combined_charts/run32/radar_chart.pngbin0 -> 231376 bytes
-rw-r--r--benchmark/reports/combined_charts/run32/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run33/bar_chart.pngbin0 -> 132369 bytes
-rw-r--r--benchmark/reports/combined_charts/run33/radar_chart.pngbin0 -> 231376 bytes
-rw-r--r--benchmark/reports/combined_charts/run33/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run35.1_best_performances/bar_chart.pngbin0 -> 124842 bytes
-rw-r--r--benchmark/reports/combined_charts/run35.1_best_performances/radar_chart.pngbin0 -> 288426 bytes
-rw-r--r--benchmark/reports/combined_charts/run35.1_best_performances/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run35/bar_chart.pngbin0 -> 134558 bytes
-rw-r--r--benchmark/reports/combined_charts/run35/radar_chart.pngbin0 -> 268259 bytes
-rw-r--r--benchmark/reports/combined_charts/run35/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run36/bar_chart.pngbin0 -> 138914 bytes
-rw-r--r--benchmark/reports/combined_charts/run36/radar_chart.pngbin0 -> 261628 bytes
-rw-r--r--benchmark/reports/combined_charts/run36/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run37/bar_chart.pngbin0 -> 133596 bytes
-rw-r--r--benchmark/reports/combined_charts/run37/radar_chart.pngbin0 -> 276792 bytes
-rw-r--r--benchmark/reports/combined_charts/run37/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run38/bar_chart.pngbin0 -> 134558 bytes
-rw-r--r--benchmark/reports/combined_charts/run38/radar_chart.pngbin0 -> 268259 bytes
-rw-r--r--benchmark/reports/combined_charts/run38/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run39/bar_chart.pngbin0 -> 140075 bytes
-rw-r--r--benchmark/reports/combined_charts/run39/radar_chart.pngbin0 -> 242311 bytes
-rw-r--r--benchmark/reports/combined_charts/run39/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run4/bar_chart.pngbin0 -> 132343 bytes
-rw-r--r--benchmark/reports/combined_charts/run4/radar_chart.pngbin0 -> 233314 bytes
-rw-r--r--benchmark/reports/combined_charts/run4/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run40/bar_chart.pngbin0 -> 133510 bytes
-rw-r--r--benchmark/reports/combined_charts/run40/radar_chart.pngbin0 -> 255544 bytes
-rw-r--r--benchmark/reports/combined_charts/run40/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run41/bar_chart.pngbin0 -> 133510 bytes
-rw-r--r--benchmark/reports/combined_charts/run41/radar_chart.pngbin0 -> 255544 bytes
-rw-r--r--benchmark/reports/combined_charts/run41/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run42/bar_chart.pngbin0 -> 133667 bytes
-rw-r--r--benchmark/reports/combined_charts/run42/radar_chart.pngbin0 -> 283523 bytes
-rw-r--r--benchmark/reports/combined_charts/run42/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run43/bar_chart.pngbin0 -> 129304 bytes
-rw-r--r--benchmark/reports/combined_charts/run43/radar_chart.pngbin0 -> 272442 bytes
-rw-r--r--benchmark/reports/combined_charts/run43/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run44/bar_chart.pngbin0 -> 138956 bytes
-rw-r--r--benchmark/reports/combined_charts/run44/radar_chart.pngbin0 -> 276991 bytes
-rw-r--r--benchmark/reports/combined_charts/run44/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run45/bar_chart.pngbin0 -> 133667 bytes
-rw-r--r--benchmark/reports/combined_charts/run45/radar_chart.pngbin0 -> 283523 bytes
-rw-r--r--benchmark/reports/combined_charts/run45/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run46/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run47/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run48/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run49/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run5/bar_chart.pngbin0 -> 132343 bytes
-rw-r--r--benchmark/reports/combined_charts/run5/radar_chart.pngbin0 -> 233314 bytes
-rw-r--r--benchmark/reports/combined_charts/run5/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run50/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run51/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run6/bar_chart.pngbin0 -> 132313 bytes
-rw-r--r--benchmark/reports/combined_charts/run6/radar_chart.pngbin0 -> 232353 bytes
-rw-r--r--benchmark/reports/combined_charts/run6/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run7/bar_chart.pngbin0 -> 132343 bytes
-rw-r--r--benchmark/reports/combined_charts/run7/radar_chart.pngbin0 -> 233314 bytes
-rw-r--r--benchmark/reports/combined_charts/run7/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run8/bar_chart.pngbin0 -> 132343 bytes
-rw-r--r--benchmark/reports/combined_charts/run8/radar_chart.pngbin0 -> 233314 bytes
-rw-r--r--benchmark/reports/combined_charts/run8/run_info.json1
-rw-r--r--benchmark/reports/combined_charts/run9/bar_chart.pngbin0 -> 132320 bytes
-rw-r--r--benchmark/reports/combined_charts/run9/radar_chart.pngbin0 -> 237315 bytes
-rw-r--r--benchmark/reports/combined_charts/run9/run_info.json1
-rw-r--r--benchmark/reports/gpt-engineer/1.json18
-rw-r--r--benchmark/reports/gpt-engineer/20230817T000115_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230817T000115_full_run/report.json894
-rw-r--r--benchmark/reports/gpt-engineer/20230817T081320_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230817T081320_full_run/report.json893
-rw-r--r--benchmark/reports/gpt-engineer/20230818T081304_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230818T081304_full_run/report.json895
-rw-r--r--benchmark/reports/gpt-engineer/20230819T081215_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230819T081215_full_run/report.json894
-rw-r--r--benchmark/reports/gpt-engineer/20230820T081223_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230820T081223_full_run/report.json894
-rw-r--r--benchmark/reports/gpt-engineer/20230821T081448_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230821T081448_full_run/report.json894
-rw-r--r--benchmark/reports/gpt-engineer/20230822T081342_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230822T081342_full_run/report.json894
-rw-r--r--benchmark/reports/gpt-engineer/20230823T081255_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230823T081255_full_run/report.json895
-rw-r--r--benchmark/reports/gpt-engineer/20230824T032419_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230824T032419_full_run/report.json893
-rw-r--r--benchmark/reports/gpt-engineer/20230824T081353_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230824T081353_full_run/report.json895
-rw-r--r--benchmark/reports/gpt-engineer/20230825T081258_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230825T081258_full_run/report.json894
-rw-r--r--benchmark/reports/gpt-engineer/20230826T081238_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230826T081238_full_run/report.json894
-rw-r--r--benchmark/reports/gpt-engineer/20230827T081207_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230827T081207_full_run/report.json894
-rw-r--r--benchmark/reports/gpt-engineer/20230828T081524_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230828T081524_full_run/report.json894
-rw-r--r--benchmark/reports/gpt-engineer/20230829T081337_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230829T081337_full_run/report.json895
-rw-r--r--benchmark/reports/gpt-engineer/20230830T081418_full_run/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230830T081418_full_run/report.json894
-rw-r--r--benchmark/reports/gpt-engineer/20230831T081324_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230831T081324_full_run/report.json424
-rw-r--r--benchmark/reports/gpt-engineer/20230831T153354_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230831T153354_full_run/report.json425
-rw-r--r--benchmark/reports/gpt-engineer/20230901T081353_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230901T081353_full_run/report.json425
-rw-r--r--benchmark/reports/gpt-engineer/20230902T081227_full_run/radar_chart.pngbin0 -> 152685 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230902T081227_full_run/report.json423
-rw-r--r--benchmark/reports/gpt-engineer/20230903T081151_full_run/radar_chart.pngbin0 -> 216749 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230903T081151_full_run/report.json874
-rw-r--r--benchmark/reports/gpt-engineer/20230904T081358_full_run/radar_chart.pngbin0 -> 216749 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230904T081358_full_run/report.json875
-rw-r--r--benchmark/reports/gpt-engineer/20230905T081409_full_run/radar_chart.pngbin0 -> 216802 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230905T081409_full_run/report.json894
-rw-r--r--benchmark/reports/gpt-engineer/20230908T011611_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230908T011611_full_run/report.json314
-rw-r--r--benchmark/reports/gpt-engineer/20230908T080511_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230908T080511_full_run/report.json314
-rw-r--r--benchmark/reports/gpt-engineer/20230909T080345_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230909T080345_full_run/report.json314
-rw-r--r--benchmark/reports/gpt-engineer/20230910T080349_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230910T080349_full_run/report.json314
-rw-r--r--benchmark/reports/gpt-engineer/20230911T080405_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/gpt-engineer/20230911T080405_full_run/report.json314
-rw-r--r--benchmark/reports/gpt-engineer/file11_07-20-23-17.json268
-rw-r--r--benchmark/reports/gpt-engineer/file12_07-20-23-43.json268
-rw-r--r--benchmark/reports/gpt-engineer/file13_07-21-00-20.json263
-rw-r--r--benchmark/reports/gpt-engineer/file14_07-21-08-18.json266
-rw-r--r--benchmark/reports/gpt-engineer/file15_07-21-18-17.json268
-rw-r--r--benchmark/reports/gpt-engineer/file16_07-22-08-16.json264
-rw-r--r--benchmark/reports/gpt-engineer/file17_07-22-15-10.json264
-rw-r--r--benchmark/reports/gpt-engineer/file18_07-23-08-16.json265
-rw-r--r--benchmark/reports/gpt-engineer/file19_07-23-16-21.json265
-rw-r--r--benchmark/reports/gpt-engineer/file1_07-18-00-17.json173
-rw-r--r--benchmark/reports/gpt-engineer/file20_07-23-19-07.json265
-rw-r--r--benchmark/reports/gpt-engineer/file21_07-23-19-26.json265
-rw-r--r--benchmark/reports/gpt-engineer/file22_07-23-19-35.json263
-rw-r--r--benchmark/reports/gpt-engineer/file23_07-23-19-53.json265
-rw-r--r--benchmark/reports/gpt-engineer/file24_07-23-21-03.json264
-rw-r--r--benchmark/reports/gpt-engineer/file24_07-23-21-05.json268
-rw-r--r--benchmark/reports/gpt-engineer/file26_07-23-22-25.json264
-rw-r--r--benchmark/reports/gpt-engineer/file27_07-24-08-19.json264
-rw-r--r--benchmark/reports/gpt-engineer/file28_07-24-22-11.json527
-rw-r--r--benchmark/reports/gpt-engineer/file29_07-24-23-50.json527
-rw-r--r--benchmark/reports/gpt-engineer/file2_07-18-02-44.json267
-rw-r--r--benchmark/reports/gpt-engineer/file30_07-25-01-05.json530
-rw-r--r--benchmark/reports/gpt-engineer/file31_07-25-01-35.json530
-rw-r--r--benchmark/reports/gpt-engineer/file32_07-25-03-14.json530
-rw-r--r--benchmark/reports/gpt-engineer/file33_07-25-03-35.json529
-rw-r--r--benchmark/reports/gpt-engineer/file34_07-25-03-58.json530
-rw-r--r--benchmark/reports/gpt-engineer/file35_07-25-04-19.json528
-rw-r--r--benchmark/reports/gpt-engineer/file36_07-25-08-18.json529
-rw-r--r--benchmark/reports/gpt-engineer/file37_07-25-18-09.json186
-rw-r--r--benchmark/reports/gpt-engineer/file37_07-25-18-11.json187
-rw-r--r--benchmark/reports/gpt-engineer/file37_07-25-18-13.json187
-rw-r--r--benchmark/reports/gpt-engineer/file3_07-18-08-19.json267
-rw-r--r--benchmark/reports/gpt-engineer/file40_07-26-00-53.json187
-rw-r--r--benchmark/reports/gpt-engineer/file41_07-26-03-15.json187
-rw-r--r--benchmark/reports/gpt-engineer/file42_07-26-08-17.json187
-rw-r--r--benchmark/reports/gpt-engineer/file43_07-27-13-30.json31
-rw-r--r--benchmark/reports/gpt-engineer/file44_07-27-13-37.json31
-rw-r--r--benchmark/reports/gpt-engineer/file45_07-27-18-44.json31
-rw-r--r--benchmark/reports/gpt-engineer/file46_07-27-19-23.json32
-rw-r--r--benchmark/reports/gpt-engineer/file47_07-27-19-56.json188
-rw-r--r--benchmark/reports/gpt-engineer/file48_07-28-04-10.json617
-rw-r--r--benchmark/reports/gpt-engineer/file49_07-28-08-12.json617
-rw-r--r--benchmark/reports/gpt-engineer/file4_07-18-16-19.json267
-rw-r--r--benchmark/reports/gpt-engineer/file50_07-29-08-11.json614
-rw-r--r--benchmark/reports/gpt-engineer/file51_07-29-09-29.json631
-rw-r--r--benchmark/reports/gpt-engineer/file52_07-29-10-17.json635
-rw-r--r--benchmark/reports/gpt-engineer/file53_07-29-10-45.json634
-rw-r--r--benchmark/reports/gpt-engineer/file54_07-29-16-10.json633
-rw-r--r--benchmark/reports/gpt-engineer/file55_07-29-17-21.json634
-rw-r--r--benchmark/reports/gpt-engineer/file56_07-29-18-06.json635
-rw-r--r--benchmark/reports/gpt-engineer/file57_07-30-00-51.json636
-rw-r--r--benchmark/reports/gpt-engineer/file58_07-30-01-38.json635
-rw-r--r--benchmark/reports/gpt-engineer/file59_07-30-03-05.json634
-rw-r--r--benchmark/reports/gpt-engineer/file5_07-19-08-18.json268
-rw-r--r--benchmark/reports/gpt-engineer/file60_07-30-04-24.json635
-rw-r--r--benchmark/reports/gpt-engineer/file61_07-30-08-12.json634
-rw-r--r--benchmark/reports/gpt-engineer/file6_07-19-21-55.json264
-rw-r--r--benchmark/reports/gpt-engineer/file7_07-20-08-18.json267
-rw-r--r--benchmark/reports/gpt-engineer/file8_07-20-20-10.json266
-rw-r--r--benchmark/reports/gpt-engineer/file9_07-20-22-44.json264
-rw-r--r--benchmark/reports/gpt-engineer/file9_07-20-22-48.json263
-rw-r--r--benchmark/reports/gpt-engineer/folder10_08-01-02-42/report.json294
-rw-r--r--benchmark/reports/gpt-engineer/folder10_08-01-12-47/report.json656
-rw-r--r--benchmark/reports/gpt-engineer/folder11_08-01-03-20/report.json656
-rw-r--r--benchmark/reports/gpt-engineer/folder11_08-01-13-37/report.json657
-rw-r--r--benchmark/reports/gpt-engineer/folder12_08-01-16-18/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder12_08-01-16-18/report.json656
-rw-r--r--benchmark/reports/gpt-engineer/folder13_08-01-16-57/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder13_08-01-16-57/report.json656
-rw-r--r--benchmark/reports/gpt-engineer/folder14_08-01-17-31/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder14_08-01-17-31/report.json658
-rw-r--r--benchmark/reports/gpt-engineer/folder15_08-01-19-51/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder15_08-01-19-51/report.json658
-rw-r--r--benchmark/reports/gpt-engineer/folder17_08-02-01-34/radar_chart.pngbin0 -> 211464 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder17_08-02-01-34/report.json657
-rw-r--r--benchmark/reports/gpt-engineer/folder18_08-02-02-36/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder18_08-02-02-36/report.json658
-rw-r--r--benchmark/reports/gpt-engineer/folder18_08-02-03-12/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder18_08-02-03-12/report.json658
-rw-r--r--benchmark/reports/gpt-engineer/folder19_08-02-03-58/radar_chart.pngbin0 -> 211464 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder19_08-02-03-58/report.json657
-rw-r--r--benchmark/reports/gpt-engineer/folder1_07-30-22-53/report.json644
-rw-r--r--benchmark/reports/gpt-engineer/folder20_08-02-08-12/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder20_08-02-08-12/report.json658
-rw-r--r--benchmark/reports/gpt-engineer/folder21_08-02-15-17/radar_chart.pngbin0 -> 211464 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder21_08-02-15-17/report.json657
-rw-r--r--benchmark/reports/gpt-engineer/folder22_08-02-17-21/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder22_08-02-17-21/report.json659
-rw-r--r--benchmark/reports/gpt-engineer/folder23_08-02-17-38/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder23_08-02-17-38/report.json658
-rw-r--r--benchmark/reports/gpt-engineer/folder24_08-03-08-13/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder24_08-03-08-13/report.json694
-rw-r--r--benchmark/reports/gpt-engineer/folder25_08-03-23-50/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder25_08-03-23-50/report.json694
-rw-r--r--benchmark/reports/gpt-engineer/folder26_08-04-03-23/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder26_08-04-03-23/report.json695
-rw-r--r--benchmark/reports/gpt-engineer/folder27_08-04-04-17/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder27_08-04-04-17/report.json694
-rw-r--r--benchmark/reports/gpt-engineer/folder28_08-04-08-13/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder28_08-04-08-13/report.json694
-rw-r--r--benchmark/reports/gpt-engineer/folder29_08-04-18-32/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder29_08-04-18-32/report.json694
-rw-r--r--benchmark/reports/gpt-engineer/folder2_07-31-02-07/report.json642
-rw-r--r--benchmark/reports/gpt-engineer/folder30_08-04-18-37/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder30_08-04-18-37/report.json693
-rw-r--r--benchmark/reports/gpt-engineer/folder31_08-04-19-56/radar_chart.pngbin0 -> 209974 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder31_08-04-19-56/report.json692
-rw-r--r--benchmark/reports/gpt-engineer/folder32_08-04-22-14/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder32_08-04-22-14/report.json692
-rw-r--r--benchmark/reports/gpt-engineer/folder33_08-05-08-12/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder33_08-05-08-12/report.json693
-rw-r--r--benchmark/reports/gpt-engineer/folder34_08-06-08-11/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder34_08-06-08-11/report.json771
-rw-r--r--benchmark/reports/gpt-engineer/folder35_08-07-08-13/radar_chart.pngbin0 -> 221168 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder35_08-07-08-13/report.json882
-rw-r--r--benchmark/reports/gpt-engineer/folder36_08-08-08-14/radar_chart.pngbin0 -> 219513 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder36_08-08-08-14/report.json883
-rw-r--r--benchmark/reports/gpt-engineer/folder37_08-09-08-14/radar_chart.pngbin0 -> 219513 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder37_08-09-08-14/report.json915
-rw-r--r--benchmark/reports/gpt-engineer/folder38_08-10-08-13/radar_chart.pngbin0 -> 219513 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder38_08-10-08-13/report.json915
-rw-r--r--benchmark/reports/gpt-engineer/folder39_08-11-08-12/radar_chart.pngbin0 -> 219337 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder39_08-11-08-12/report.json915
-rw-r--r--benchmark/reports/gpt-engineer/folder3_07-31-12-44/report.json14
-rw-r--r--benchmark/reports/gpt-engineer/folder40_08-12-02-18/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder40_08-12-02-18/report.json914
-rw-r--r--benchmark/reports/gpt-engineer/folder41_08-12-02-51/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder41_08-12-02-51/report.json913
-rw-r--r--benchmark/reports/gpt-engineer/folder42_08-12-03-03/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder42_08-12-03-03/report.json913
-rw-r--r--benchmark/reports/gpt-engineer/folder43_08-12-08-12/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder43_08-12-08-12/report.json914
-rw-r--r--benchmark/reports/gpt-engineer/folder44_08-12-17-24/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder44_08-12-17-24/report.json914
-rw-r--r--benchmark/reports/gpt-engineer/folder45_08-13-01-10/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder45_08-13-01-10/report.json893
-rw-r--r--benchmark/reports/gpt-engineer/folder46_08-13-01-52/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder46_08-13-01-52/report.json893
-rw-r--r--benchmark/reports/gpt-engineer/folder47_08-13-02-16/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder47_08-13-02-16/report.json894
-rw-r--r--benchmark/reports/gpt-engineer/folder48_08-13-02-37/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder48_08-13-02-37/report.json893
-rw-r--r--benchmark/reports/gpt-engineer/folder49_08-13-08-11/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder49_08-13-08-11/report.json894
-rw-r--r--benchmark/reports/gpt-engineer/folder4_07-31-13-05/report.json657
-rw-r--r--benchmark/reports/gpt-engineer/folder50_08-14-08-13/radar_chart.pngbin0 -> 212836 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder50_08-14-08-13/report.json918
-rw-r--r--benchmark/reports/gpt-engineer/folder51_08-14-21-37/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder51_08-14-21-37/report.json924
-rw-r--r--benchmark/reports/gpt-engineer/folder52_08-15-08-13/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder52_08-15-08-13/report.json924
-rw-r--r--benchmark/reports/gpt-engineer/folder53_08-16-08-13/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/gpt-engineer/folder53_08-16-08-13/report.json924
-rw-r--r--benchmark/reports/gpt-engineer/folder5_07-31-16-08/report.json657
-rw-r--r--benchmark/reports/gpt-engineer/folder6_07-31-19-04/report.json657
-rw-r--r--benchmark/reports/gpt-engineer/folder7_07-31-19-38/report.json657
-rw-r--r--benchmark/reports/gpt-engineer/regression_tests.json1
-rw-r--r--benchmark/reports/gpt-engineer/success_rate.json2570
-rw-r--r--benchmark/reports/json_to_base_64.py17
-rw-r--r--benchmark/reports/match_records.py329
-rw-r--r--benchmark/reports/mini-agi/1.1_TestWriteFile.json27
-rw-r--r--benchmark/reports/mini-agi/10.1_TestRememberMultipleWithNoise.json30
-rw-r--r--benchmark/reports/mini-agi/10_TestRememberMultipleWithNoise.json31
-rw-r--r--benchmark/reports/mini-agi/11.1_TestRememberMultiplePhrasesWithNoise.json31
-rw-r--r--benchmark/reports/mini-agi/11.2_TestRememberMultiplePhrasesWithNoise.json31
-rw-r--r--benchmark/reports/mini-agi/11.3_TestRememberMultiplePhrasesWithNoise.json31
-rw-r--r--benchmark/reports/mini-agi/11.4_TestRememberMultiplePhrasesWithNoise.json31
-rw-r--r--benchmark/reports/mini-agi/11.5_TestRememberMultiplePhrasesWithNoise.json31
-rw-r--r--benchmark/reports/mini-agi/11_TestRememberMultiplePhrasesWithNoise.json31
-rw-r--r--benchmark/reports/mini-agi/12.1_TestDebugSimpleTypoWithGuidance.json30
-rw-r--r--benchmark/reports/mini-agi/12.2_TestDebugSimpleTypoWithGuidance.json30
-rw-r--r--benchmark/reports/mini-agi/12.3_TestDebugSimpleTypoWithGuidance.json30
-rw-r--r--benchmark/reports/mini-agi/12.4_TestDebugSimpleTypoWithGuidance.json30
-rw-r--r--benchmark/reports/mini-agi/12_TestDebugSimpleTypoWithGuidance.json30
-rw-r--r--benchmark/reports/mini-agi/13.1_TestRevenueRetrieval.json61
-rw-r--r--benchmark/reports/mini-agi/13_TestRevenueRetrieval.json.json61
-rw-r--r--benchmark/reports/mini-agi/14_TestReturnCode.json99
-rw-r--r--benchmark/reports/mini-agi/15_TestRevenueRetrieval.json61
-rw-r--r--benchmark/reports/mini-agi/1_07-18-02-44.json260
-rw-r--r--benchmark/reports/mini-agi/1_TestWriteFIle.json27
-rw-r--r--benchmark/reports/mini-agi/2.1_TestReadFile.json27
-rw-r--r--benchmark/reports/mini-agi/20230817T000109_full_run/radar_chart.pngbin0 -> 225125 bytes
-rw-r--r--benchmark/reports/mini-agi/20230817T000109_full_run/report.json886
-rw-r--r--benchmark/reports/mini-agi/20230817T081430_full_run/radar_chart.pngbin0 -> 216380 bytes
-rw-r--r--benchmark/reports/mini-agi/20230817T081430_full_run/report.json887
-rw-r--r--benchmark/reports/mini-agi/20230818T081402_full_run/radar_chart.pngbin0 -> 227901 bytes
-rw-r--r--benchmark/reports/mini-agi/20230818T081402_full_run/report.json885
-rw-r--r--benchmark/reports/mini-agi/20230819T081219_full_run/radar_chart.pngbin0 -> 220383 bytes
-rw-r--r--benchmark/reports/mini-agi/20230819T081219_full_run/report.json887
-rw-r--r--benchmark/reports/mini-agi/20230820T081326_full_run/radar_chart.pngbin0 -> 226812 bytes
-rw-r--r--benchmark/reports/mini-agi/20230820T081326_full_run/report.json881
-rw-r--r--benchmark/reports/mini-agi/20230821T081348_full_run/radar_chart.pngbin0 -> 222310 bytes
-rw-r--r--benchmark/reports/mini-agi/20230821T081348_full_run/report.json886
-rw-r--r--benchmark/reports/mini-agi/20230822T081356_full_run/radar_chart.pngbin0 -> 220614 bytes
-rw-r--r--benchmark/reports/mini-agi/20230822T081356_full_run/report.json886
-rw-r--r--benchmark/reports/mini-agi/20230823T081402_full_run/radar_chart.pngbin0 -> 230546 bytes
-rw-r--r--benchmark/reports/mini-agi/20230823T081402_full_run/report.json883
-rw-r--r--benchmark/reports/mini-agi/20230824T032434_full_run/radar_chart.pngbin0 -> 225971 bytes
-rw-r--r--benchmark/reports/mini-agi/20230824T032434_full_run/report.json880
-rw-r--r--benchmark/reports/mini-agi/20230824T081327_full_run/radar_chart.pngbin0 -> 217446 bytes
-rw-r--r--benchmark/reports/mini-agi/20230824T081327_full_run/report.json888
-rw-r--r--benchmark/reports/mini-agi/20230825T081334_full_run/radar_chart.pngbin0 -> 234518 bytes
-rw-r--r--benchmark/reports/mini-agi/20230825T081334_full_run/report.json881
-rw-r--r--benchmark/reports/mini-agi/20230826T081258_full_run/radar_chart.pngbin0 -> 233424 bytes
-rw-r--r--benchmark/reports/mini-agi/20230826T081258_full_run/report.json884
-rw-r--r--benchmark/reports/mini-agi/20230827T081225_full_run/radar_chart.pngbin0 -> 233424 bytes
-rw-r--r--benchmark/reports/mini-agi/20230827T081225_full_run/report.json880
-rw-r--r--benchmark/reports/mini-agi/20230828T081410_full_run/radar_chart.pngbin0 -> 233424 bytes
-rw-r--r--benchmark/reports/mini-agi/20230828T081410_full_run/report.json880
-rw-r--r--benchmark/reports/mini-agi/20230829T081410_full_run/radar_chart.pngbin0 -> 234518 bytes
-rw-r--r--benchmark/reports/mini-agi/20230829T081410_full_run/report.json884
-rw-r--r--benchmark/reports/mini-agi/20230830T081335_full_run/radar_chart.pngbin0 -> 235152 bytes
-rw-r--r--benchmark/reports/mini-agi/20230830T081335_full_run/report.json881
-rw-r--r--benchmark/reports/mini-agi/20230831T051127_full_run/radar_chart.pngbin0 -> 209741 bytes
-rw-r--r--benchmark/reports/mini-agi/20230831T051127_full_run/report.json853
-rw-r--r--benchmark/reports/mini-agi/20230831T081335_full_run/radar_chart.pngbin0 -> 179281 bytes
-rw-r--r--benchmark/reports/mini-agi/20230831T081335_full_run/report.json416
-rw-r--r--benchmark/reports/mini-agi/20230831T153352_full_run/radar_chart.pngbin0 -> 179281 bytes
-rw-r--r--benchmark/reports/mini-agi/20230831T153352_full_run/report.json416
-rw-r--r--benchmark/reports/mini-agi/20230901T081339_full_run/radar_chart.pngbin0 -> 165432 bytes
-rw-r--r--benchmark/reports/mini-agi/20230901T081339_full_run/report.json417
-rw-r--r--benchmark/reports/mini-agi/20230902T081308_full_run/radar_chart.pngbin0 -> 179281 bytes
-rw-r--r--benchmark/reports/mini-agi/20230902T081308_full_run/report.json416
-rw-r--r--benchmark/reports/mini-agi/20230903T081306_full_run/radar_chart.pngbin0 -> 245361 bytes
-rw-r--r--benchmark/reports/mini-agi/20230903T081306_full_run/report.json859
-rw-r--r--benchmark/reports/mini-agi/20230904T081505_full_run/radar_chart.pngbin0 -> 245279 bytes
-rw-r--r--benchmark/reports/mini-agi/20230904T081505_full_run/report.json860
-rw-r--r--benchmark/reports/mini-agi/20230905T081354_full_run/radar_chart.pngbin0 -> 230373 bytes
-rw-r--r--benchmark/reports/mini-agi/20230905T081354_full_run/report.json880
-rw-r--r--benchmark/reports/mini-agi/20230908T001050_full_run/radar_chart.pngbin0 -> 158585 bytes
-rw-r--r--benchmark/reports/mini-agi/20230908T001050_full_run/report.json307
-rw-r--r--benchmark/reports/mini-agi/20230908T011712_full_run/radar_chart.pngbin0 -> 161687 bytes
-rw-r--r--benchmark/reports/mini-agi/20230908T011712_full_run/report.json306
-rw-r--r--benchmark/reports/mini-agi/20230908T080426_full_run/radar_chart.pngbin0 -> 161687 bytes
-rw-r--r--benchmark/reports/mini-agi/20230908T080426_full_run/report.json306
-rw-r--r--benchmark/reports/mini-agi/20230909T080415_full_run/radar_chart.pngbin0 -> 161687 bytes
-rw-r--r--benchmark/reports/mini-agi/20230909T080415_full_run/report.json306
-rw-r--r--benchmark/reports/mini-agi/20230910T080446_full_run/radar_chart.pngbin0 -> 158585 bytes
-rw-r--r--benchmark/reports/mini-agi/20230910T080446_full_run/report.json307
-rw-r--r--benchmark/reports/mini-agi/20230911T080548_full_run/radar_chart.pngbin0 -> 158585 bytes
-rw-r--r--benchmark/reports/mini-agi/20230911T080548_full_run/report.json307
-rw-r--r--benchmark/reports/mini-agi/2_07-18-16-20.json260
-rw-r--r--benchmark/reports/mini-agi/2_TestReadFile.json27
-rw-r--r--benchmark/reports/mini-agi/3.1_TestSearch.json27
-rw-r--r--benchmark/reports/mini-agi/3_07-20-22-44.json259
-rw-r--r--benchmark/reports/mini-agi/3_TestSearch.json27
-rw-r--r--benchmark/reports/mini-agi/4.1_TestBasicRetrieval.json27
-rw-r--r--benchmark/reports/mini-agi/4_07-20-23-18.json268
-rw-r--r--benchmark/reports/mini-agi/4_TestBasicRetrieval.json27
-rw-r--r--benchmark/reports/mini-agi/5.1_TestRevenueRetrieval_1.0.json30
-rw-r--r--benchmark/reports/mini-agi/5.2_TestRevenueRetrieval_1.0.json31
-rw-r--r--benchmark/reports/mini-agi/5_07-20-23-43.json268
-rw-r--r--benchmark/reports/mini-agi/5_TestRevenueRetrieval_1.0.json29
-rw-r--r--benchmark/reports/mini-agi/6.1_TestRevenueRetrieval_1.1.json30
-rw-r--r--benchmark/reports/mini-agi/6.2_TestRevenueRetrieval_1.1.json30
-rw-r--r--benchmark/reports/mini-agi/6.3_TestRevenueRetrieval_1.1.json30
-rw-r--r--benchmark/reports/mini-agi/6.4_TestRevenueRetrieval_1.1.json31
-rw-r--r--benchmark/reports/mini-agi/6_07-21-00-20.json262
-rw-r--r--benchmark/reports/mini-agi/6_TestRevenueRetrieval_1.1.json30
-rw-r--r--benchmark/reports/mini-agi/7.1_TestRevenueRetrieval_1.2.json31
-rw-r--r--benchmark/reports/mini-agi/7_07-21-08-18.json255
-rw-r--r--benchmark/reports/mini-agi/7_TestRevenueRetrieval_1.2.json30
-rw-r--r--benchmark/reports/mini-agi/8.1_TestBasicMemory.json30
-rw-r--r--benchmark/reports/mini-agi/8_07-21-18-18.json258
-rw-r--r--benchmark/reports/mini-agi/8_TestBasicMemory.json31
-rw-r--r--benchmark/reports/mini-agi/9.1_TestRememberMultipleIds.json30
-rw-r--r--benchmark/reports/mini-agi/9_07-22-08-16.json261
-rw-r--r--benchmark/reports/mini-agi/9_TestRememberMultipleIds.json31
-rw-r--r--benchmark/reports/mini-agi/file10_07-23-16-21.json256
-rw-r--r--benchmark/reports/mini-agi/file11_07-23-19-07.json258
-rw-r--r--benchmark/reports/mini-agi/file12_07-23-19-53.json260
-rw-r--r--benchmark/reports/mini-agi/file13_07-23-21-03.json259
-rw-r--r--benchmark/reports/mini-agi/file13_07-23-21-07.json259
-rw-r--r--benchmark/reports/mini-agi/file15_07-23-22-26.json258
-rw-r--r--benchmark/reports/mini-agi/file16_07-24-08-21.json260
-rw-r--r--benchmark/reports/mini-agi/file56_07-24-22-12.json521
-rw-r--r--benchmark/reports/mini-agi/file57_07-24-23-51.json522
-rw-r--r--benchmark/reports/mini-agi/file58_07-25-01-04.json522
-rw-r--r--benchmark/reports/mini-agi/file59_07-25-01-35.json524
-rw-r--r--benchmark/reports/mini-agi/file60_07-25-03-14.json522
-rw-r--r--benchmark/reports/mini-agi/file61_07-25-03-35.json521
-rw-r--r--benchmark/reports/mini-agi/file62_07-25-03-59.json523
-rw-r--r--benchmark/reports/mini-agi/file63_07-25-08-19.json523
-rw-r--r--benchmark/reports/mini-agi/file64_07-25-18-09.json184
-rw-r--r--benchmark/reports/mini-agi/file64_07-25-18-11.json523
-rw-r--r--benchmark/reports/mini-agi/file64_07-25-18-13.json181
-rw-r--r--benchmark/reports/mini-agi/file67_07-26-00-54.json528
-rw-r--r--benchmark/reports/mini-agi/file68_07-26-08-18.json589
-rw-r--r--benchmark/reports/mini-agi/file69_07-27-13-30.json31
-rw-r--r--benchmark/reports/mini-agi/file70_07-27-13-38.json31
-rw-r--r--benchmark/reports/mini-agi/file71_07-27-18-45.json31
-rw-r--r--benchmark/reports/mini-agi/file72_07-27-19-23.json31
-rw-r--r--benchmark/reports/mini-agi/file73_07-27-19-55.json183
-rw-r--r--benchmark/reports/mini-agi/file74_07-28-03-53.json608
-rw-r--r--benchmark/reports/mini-agi/file75_07-28-04-10.json609
-rw-r--r--benchmark/reports/mini-agi/file76_07-29-08-11.json608
-rw-r--r--benchmark/reports/mini-agi/file77_07-29-09-29.json624
-rw-r--r--benchmark/reports/mini-agi/file78_07-29-17-20.json628
-rw-r--r--benchmark/reports/mini-agi/file79_07-29-18-06.json626
-rw-r--r--benchmark/reports/mini-agi/file80_07-30-01-38.json625
-rw-r--r--benchmark/reports/mini-agi/file81_07-30-03-05.json625
-rw-r--r--benchmark/reports/mini-agi/file82_07-30-04-24.json629
-rw-r--r--benchmark/reports/mini-agi/file83_07-30-08-12.json625
-rw-r--r--benchmark/reports/mini-agi/folder11_08-01-12-47/radar_chart.pngbin0 -> 158755 bytes
-rw-r--r--benchmark/reports/mini-agi/folder11_08-01-12-47/report.json651
-rw-r--r--benchmark/reports/mini-agi/folder12_08-01-02-43/report.json32
-rw-r--r--benchmark/reports/mini-agi/folder12_08-01-13-38/radar_chart.pngbin0 -> 165470 bytes
-rw-r--r--benchmark/reports/mini-agi/folder12_08-01-13-38/report.json648
-rw-r--r--benchmark/reports/mini-agi/folder13_08-01-03-21/radar_chart.pngbin0 -> 221509 bytes
-rw-r--r--benchmark/reports/mini-agi/folder13_08-01-03-21/report.json647
-rw-r--r--benchmark/reports/mini-agi/folder13_08-01-16-18/radar_chart.pngbin0 -> 221361 bytes
-rw-r--r--benchmark/reports/mini-agi/folder13_08-01-16-18/report.json650
-rw-r--r--benchmark/reports/mini-agi/folder14_08-01-16-57/radar_chart.pngbin0 -> 221519 bytes
-rw-r--r--benchmark/reports/mini-agi/folder14_08-01-16-57/report.json650
-rw-r--r--benchmark/reports/mini-agi/folder15_08-01-19-52/radar_chart.pngbin0 -> 226716 bytes
-rw-r--r--benchmark/reports/mini-agi/folder15_08-01-19-52/report.json646
-rw-r--r--benchmark/reports/mini-agi/folder1_07-29-23-35/report.json634
-rw-r--r--benchmark/reports/mini-agi/folder1_TestWriteFile/report.json32
-rw-r--r--benchmark/reports/mini-agi/folder20_08-02-03-12/radar_chart.pngbin0 -> 216341 bytes
-rw-r--r--benchmark/reports/mini-agi/folder20_08-02-03-12/report.json651
-rw-r--r--benchmark/reports/mini-agi/folder21_08-02-03-58/radar_chart.pngbin0 -> 223494 bytes
-rw-r--r--benchmark/reports/mini-agi/folder21_08-02-03-58/report.json651
-rw-r--r--benchmark/reports/mini-agi/folder22_08-02-08-13/radar_chart.pngbin0 -> 227316 bytes
-rw-r--r--benchmark/reports/mini-agi/folder22_08-02-08-13/report.json649
-rw-r--r--benchmark/reports/mini-agi/folder23_08-02-15-17/radar_chart.pngbin0 -> 226536 bytes
-rw-r--r--benchmark/reports/mini-agi/folder23_08-02-15-17/report.json651
-rw-r--r--benchmark/reports/mini-agi/folder24_08-02-17-20/radar_chart.pngbin0 -> 211111 bytes
-rw-r--r--benchmark/reports/mini-agi/folder24_08-02-17-20/report.json653
-rw-r--r--benchmark/reports/mini-agi/folder25_08-01-23-54/report.json32
-rw-r--r--benchmark/reports/mini-agi/folder25_08-02-17-38/radar_chart.pngbin0 -> 221729 bytes
-rw-r--r--benchmark/reports/mini-agi/folder25_08-02-17-38/report.json648
-rw-r--r--benchmark/reports/mini-agi/folder26_08-02-22-57/radar_chart.pngbin0 -> 228365 bytes
-rw-r--r--benchmark/reports/mini-agi/folder26_08-02-22-57/report.json684
-rw-r--r--benchmark/reports/mini-agi/folder27_08-03-21-39/radar_chart.pngbin0 -> 228525 bytes
-rw-r--r--benchmark/reports/mini-agi/folder27_08-03-21-39/report.json686
-rw-r--r--benchmark/reports/mini-agi/folder28_08-03-23-50/radar_chart.pngbin0 -> 223678 bytes
-rw-r--r--benchmark/reports/mini-agi/folder28_08-03-23-50/report.json687
-rw-r--r--benchmark/reports/mini-agi/folder29_08-04-03-24/radar_chart.pngbin0 -> 220432 bytes
-rw-r--r--benchmark/reports/mini-agi/folder29_08-04-03-24/report.json686
-rw-r--r--benchmark/reports/mini-agi/folder2_07-30-22-54/report.json634
-rw-r--r--benchmark/reports/mini-agi/folder30_08-04-08-13/radar_chart.pngbin0 -> 224410 bytes
-rw-r--r--benchmark/reports/mini-agi/folder30_08-04-08-13/report.json686
-rw-r--r--benchmark/reports/mini-agi/folder31_08-04-22-15/radar_chart.pngbin0 -> 228525 bytes
-rw-r--r--benchmark/reports/mini-agi/folder31_08-04-22-15/report.json685
-rw-r--r--benchmark/reports/mini-agi/folder32_08-05-08-12/radar_chart.pngbin0 -> 215459 bytes
-rw-r--r--benchmark/reports/mini-agi/folder32_08-05-08-12/report.json685
-rw-r--r--benchmark/reports/mini-agi/folder33_08-06-08-12/radar_chart.pngbin0 -> 228947 bytes
-rw-r--r--benchmark/reports/mini-agi/folder33_08-06-08-12/report.json765
-rw-r--r--benchmark/reports/mini-agi/folder34_08-07-08-13/radar_chart.pngbin0 -> 240430 bytes
-rw-r--r--benchmark/reports/mini-agi/folder34_08-07-08-13/report.json869
-rw-r--r--benchmark/reports/mini-agi/folder35_08-02-00-08/radar_chart.pngbin0 -> 221729 bytes
-rw-r--r--benchmark/reports/mini-agi/folder35_08-02-00-08/report.json648
-rw-r--r--benchmark/reports/mini-agi/folder35_08-07-20-11/radar_chart.pngbin0 -> 230264 bytes
-rw-r--r--benchmark/reports/mini-agi/folder35_08-07-20-11/report.json870
-rw-r--r--benchmark/reports/mini-agi/folder36_08-08-08-13/radar_chart.pngbin0 -> 228903 bytes
-rw-r--r--benchmark/reports/mini-agi/folder36_08-08-08-13/report.json870
-rw-r--r--benchmark/reports/mini-agi/folder37_08-09-08-14/radar_chart.pngbin0 -> 236767 bytes
-rw-r--r--benchmark/reports/mini-agi/folder37_08-09-08-14/report.json904
-rw-r--r--benchmark/reports/mini-agi/folder38_08-10-08-14/radar_chart.pngbin0 -> 225298 bytes
-rw-r--r--benchmark/reports/mini-agi/folder38_08-10-08-14/report.json907
-rw-r--r--benchmark/reports/mini-agi/folder39_08-02-01-34/report.json33
-rw-r--r--benchmark/reports/mini-agi/folder39_08-02-02-36/radar_chart.pngbin0 -> 221729 bytes
-rw-r--r--benchmark/reports/mini-agi/folder39_08-02-02-36/report.json650
-rw-r--r--benchmark/reports/mini-agi/folder39_08-11-08-13/radar_chart.pngbin0 -> 219337 bytes
-rw-r--r--benchmark/reports/mini-agi/folder39_08-11-08-13/report.json914
-rw-r--r--benchmark/reports/mini-agi/folder3_07-31-02-40/report.json637
-rw-r--r--benchmark/reports/mini-agi/folder40_08-12-02-51/radar_chart.pngbin0 -> 227901 bytes
-rw-r--r--benchmark/reports/mini-agi/folder40_08-12-02-51/report.json904
-rw-r--r--benchmark/reports/mini-agi/folder41_08-12-08-12/radar_chart.pngbin0 -> 221277 bytes
-rw-r--r--benchmark/reports/mini-agi/folder41_08-12-08-12/report.json904
-rw-r--r--benchmark/reports/mini-agi/folder42_08-12-17-24/radar_chart.pngbin0 -> 217477 bytes
-rw-r--r--benchmark/reports/mini-agi/folder42_08-12-17-24/report.json906
-rw-r--r--benchmark/reports/mini-agi/folder43_08-13-01-11/radar_chart.pngbin0 -> 221277 bytes
-rw-r--r--benchmark/reports/mini-agi/folder43_08-13-01-11/report.json886
-rw-r--r--benchmark/reports/mini-agi/folder44_08-13-01-52/radar_chart.pngbin0 -> 225125 bytes
-rw-r--r--benchmark/reports/mini-agi/folder44_08-13-01-52/report.json886
-rw-r--r--benchmark/reports/mini-agi/folder45_08-13-02-16/radar_chart.pngbin0 -> 219302 bytes
-rw-r--r--benchmark/reports/mini-agi/folder45_08-13-02-16/report.json887
-rw-r--r--benchmark/reports/mini-agi/folder46_08-13-02-37/radar_chart.pngbin0 -> 219302 bytes
-rw-r--r--benchmark/reports/mini-agi/folder46_08-13-02-37/report.json887
-rw-r--r--benchmark/reports/mini-agi/folder47_08-13-08-11/radar_chart.pngbin0 -> 216380 bytes
-rw-r--r--benchmark/reports/mini-agi/folder47_08-13-08-11/report.json888
-rw-r--r--benchmark/reports/mini-agi/folder48_08-14-08-13/radar_chart.pngbin0 -> 234556 bytes
-rw-r--r--benchmark/reports/mini-agi/folder48_08-14-08-13/report.json893
-rw-r--r--benchmark/reports/mini-agi/folder49_08-14-17-10/radar_chart.pngbin0 -> 234556 bytes
-rw-r--r--benchmark/reports/mini-agi/folder49_08-14-17-10/report.json893
-rw-r--r--benchmark/reports/mini-agi/folder4_07-31-03-06/report.json617
-rw-r--r--benchmark/reports/mini-agi/folder50_08-14-21-39/radar_chart.pngbin0 -> 228260 bytes
-rw-r--r--benchmark/reports/mini-agi/folder50_08-14-21-39/report.json915
-rw-r--r--benchmark/reports/mini-agi/folder51_08-15-08-13/radar_chart.pngbin0 -> 224407 bytes
-rw-r--r--benchmark/reports/mini-agi/folder51_08-15-08-13/report.json916
-rw-r--r--benchmark/reports/mini-agi/folder52_08-16-08-13/radar_chart.pngbin0 -> 226680 bytes
-rw-r--r--benchmark/reports/mini-agi/folder52_08-16-08-13/report.json915
-rw-r--r--benchmark/reports/mini-agi/folder6_07-31-13-05/radar_chart.pngbin0 -> 214717 bytes
-rw-r--r--benchmark/reports/mini-agi/folder6_07-31-13-05/report.json648
-rw-r--r--benchmark/reports/mini-agi/folder7_07-31-16-10/radar_chart.pngbin0 -> 198835 bytes
-rw-r--r--benchmark/reports/mini-agi/folder7_07-31-16-10/report.json648
-rw-r--r--benchmark/reports/mini-agi/folder8_07-31-19-05/radar_chart.pngbin0 -> 223872 bytes
-rw-r--r--benchmark/reports/mini-agi/folder8_07-31-19-05/report.json647
-rw-r--r--benchmark/reports/mini-agi/folder9_07-31-19-38/radar_chart.pngbin0 -> 191727 bytes
-rw-r--r--benchmark/reports/mini-agi/folder9_07-31-19-38/report.json649
-rw-r--r--benchmark/reports/mini-agi/regression_tests.json58
-rw-r--r--benchmark/reports/mini-agi/success_rate.json2468
-rw-r--r--benchmark/reports/send_to_googledrive.py176
-rw-r--r--benchmark/reports/smol-developer/20230816T230338_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230816T230338_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230816T234942_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230816T234942_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230817T000236_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230817T000236_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230817T081348_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230817T081348_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230818T081340_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230818T081340_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230819T081214_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230819T081214_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230820T081130_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230820T081130_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230821T081332_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230821T081332_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230822T081323_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230822T081323_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230823T081258_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230823T081258_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230824T032352_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230824T032352_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230824T081338_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230824T081338_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230825T081303_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230825T081303_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230826T081138_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230826T081138_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230827T081202_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230827T081202_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230828T081355_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230828T081355_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230829T081455_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230829T081455_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230830T081414_full_run/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/20230830T081414_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/20230831T054617_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/smol-developer/20230831T054617_full_run/report.json424
-rw-r--r--benchmark/reports/smol-developer/20230831T055921_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/smol-developer/20230831T055921_full_run/report.json424
-rw-r--r--benchmark/reports/smol-developer/20230831T081311_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/smol-developer/20230831T081311_full_run/report.json424
-rw-r--r--benchmark/reports/smol-developer/20230831T152508_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/smol-developer/20230831T152508_full_run/report.json424
-rw-r--r--benchmark/reports/smol-developer/20230831T153323_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/smol-developer/20230831T153323_full_run/report.json424
-rw-r--r--benchmark/reports/smol-developer/20230901T081311_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/smol-developer/20230901T081311_full_run/report.json424
-rw-r--r--benchmark/reports/smol-developer/20230901T153702_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/smol-developer/20230901T153702_full_run/report.json424
-rw-r--r--benchmark/reports/smol-developer/20230901T160858_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/smol-developer/20230901T160858_full_run/report.json424
-rw-r--r--benchmark/reports/smol-developer/20230901T171730_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/smol-developer/20230901T171730_full_run/report.json424
-rw-r--r--benchmark/reports/smol-developer/20230902T081208_full_run/radar_chart.pngbin0 -> 149723 bytes
-rw-r--r--benchmark/reports/smol-developer/20230902T081208_full_run/report.json424
-rw-r--r--benchmark/reports/smol-developer/20230903T081224_full_run/radar_chart.pngbin0 -> 218178 bytes
-rw-r--r--benchmark/reports/smol-developer/20230903T081224_full_run/report.json873
-rw-r--r--benchmark/reports/smol-developer/20230904T081400_full_run/radar_chart.pngbin0 -> 218178 bytes
-rw-r--r--benchmark/reports/smol-developer/20230904T081400_full_run/report.json873
-rw-r--r--benchmark/reports/smol-developer/20230905T081410_full_run/radar_chart.pngbin0 -> 218544 bytes
-rw-r--r--benchmark/reports/smol-developer/20230905T081410_full_run/report.json893
-rw-r--r--benchmark/reports/smol-developer/file10_07-20-22-43.json266
-rw-r--r--benchmark/reports/smol-developer/file11_07-20-22-48.json266
-rw-r--r--benchmark/reports/smol-developer/file12_07-21-00-20.json266
-rw-r--r--benchmark/reports/smol-developer/file13_07-21-08-18.json266
-rw-r--r--benchmark/reports/smol-developer/file14_07-21-18-17.json266
-rw-r--r--benchmark/reports/smol-developer/file15_07-22-08-15.json266
-rw-r--r--benchmark/reports/smol-developer/file16_07-22-15-09.json266
-rw-r--r--benchmark/reports/smol-developer/file17_07-23-08-16.json266
-rw-r--r--benchmark/reports/smol-developer/file18_07-23-16-21.json266
-rw-r--r--benchmark/reports/smol-developer/file19_07-23-19-07.json266
-rw-r--r--benchmark/reports/smol-developer/file1_07-18-00-17.json176
-rw-r--r--benchmark/reports/smol-developer/file20_07-23-19-25.json266
-rw-r--r--benchmark/reports/smol-developer/file21_07-23-19-34.json266
-rw-r--r--benchmark/reports/smol-developer/file22_07-23-19-54.json266
-rw-r--r--benchmark/reports/smol-developer/file23_07-23-21-03.json266
-rw-r--r--benchmark/reports/smol-developer/file23_07-23-21-06.json266
-rw-r--r--benchmark/reports/smol-developer/file25_07-23-22-25.json266
-rw-r--r--benchmark/reports/smol-developer/file26_07-24-08-19.json266
-rw-r--r--benchmark/reports/smol-developer/file27_07-24-22-11.json530
-rw-r--r--benchmark/reports/smol-developer/file28_07-24-23-50.json530
-rw-r--r--benchmark/reports/smol-developer/file29_07-25-01-05.json530
-rw-r--r--benchmark/reports/smol-developer/file2_07-18-02-43.json266
-rw-r--r--benchmark/reports/smol-developer/file30_07-25-01-34.json530
-rw-r--r--benchmark/reports/smol-developer/file31_07-25-03-14.json530
-rw-r--r--benchmark/reports/smol-developer/file32_07-25-03-35.json530
-rw-r--r--benchmark/reports/smol-developer/file33_07-25-03-59.json530
-rw-r--r--benchmark/reports/smol-developer/file34_07-25-04-19.json530
-rw-r--r--benchmark/reports/smol-developer/file35_07-25-08-18.json530
-rw-r--r--benchmark/reports/smol-developer/file36_07-25-18-09.json187
-rw-r--r--benchmark/reports/smol-developer/file36_07-25-18-11.json187
-rw-r--r--benchmark/reports/smol-developer/file36_07-25-18-13.json187
-rw-r--r--benchmark/reports/smol-developer/file39_07-26-00-53.json187
-rw-r--r--benchmark/reports/smol-developer/file3_07-18-08-19.json266
-rw-r--r--benchmark/reports/smol-developer/file40_07-26-03-15.json187
-rw-r--r--benchmark/reports/smol-developer/file41_07-26-08-17.json187
-rw-r--r--benchmark/reports/smol-developer/file42_07-27-13-30.json31
-rw-r--r--benchmark/reports/smol-developer/file43_07-27-13-37.json31
-rw-r--r--benchmark/reports/smol-developer/file44_07-27-18-44.json31
-rw-r--r--benchmark/reports/smol-developer/file45_07-27-19-23.json31
-rw-r--r--benchmark/reports/smol-developer/file46_07-27-19-56.json188
-rw-r--r--benchmark/reports/smol-developer/file47_07-28-03-52.json618
-rw-r--r--benchmark/reports/smol-developer/file48_07-28-04-10.json618
-rw-r--r--benchmark/reports/smol-developer/file49_07-28-08-12.json618
-rw-r--r--benchmark/reports/smol-developer/file4_07-18-16-19.json266
-rw-r--r--benchmark/reports/smol-developer/file50_07-29-08-11.json618
-rw-r--r--benchmark/reports/smol-developer/file51_07-29-09-24.json635
-rw-r--r--benchmark/reports/smol-developer/file52_07-29-09-28.json636
-rw-r--r--benchmark/reports/smol-developer/file53_07-29-10-17.json635
-rw-r--r--benchmark/reports/smol-developer/file54_07-29-10-45.json635
-rw-r--r--benchmark/reports/smol-developer/file55_07-29-16-09.json635
-rw-r--r--benchmark/reports/smol-developer/file56_07-29-17-20.json636
-rw-r--r--benchmark/reports/smol-developer/file57_07-29-18-05.json635
-rw-r--r--benchmark/reports/smol-developer/file58_07-30-00-51.json635
-rw-r--r--benchmark/reports/smol-developer/file59_07-30-01-38.json635
-rw-r--r--benchmark/reports/smol-developer/file5_07-19-08-18.json266
-rw-r--r--benchmark/reports/smol-developer/file60_07-30-03-05.json635
-rw-r--r--benchmark/reports/smol-developer/file61_07-30-04-24.json635
-rw-r--r--benchmark/reports/smol-developer/file62_07-30-08-11.json635
-rw-r--r--benchmark/reports/smol-developer/file6_07-19-20-39.json266
-rw-r--r--benchmark/reports/smol-developer/file7_07-19-21-55.json266
-rw-r--r--benchmark/reports/smol-developer/file8_07-20-08-18.json266
-rw-r--r--benchmark/reports/smol-developer/file9_07-20-20-10.json266
-rw-r--r--benchmark/reports/smol-developer/folder10_08-01-12-46/radar_chart.pngbin0 -> 24826 bytes
-rw-r--r--benchmark/reports/smol-developer/folder10_08-01-12-46/report.json656
-rw-r--r--benchmark/reports/smol-developer/folder11_08-01-02-42/report.json14
-rw-r--r--benchmark/reports/smol-developer/folder11_08-01-13-38/radar_chart.pngbin0 -> 24826 bytes
-rw-r--r--benchmark/reports/smol-developer/folder11_08-01-13-38/report.json656
-rw-r--r--benchmark/reports/smol-developer/folder12_08-01-03-21/radar_chart.pngbin0 -> 124547 bytes
-rw-r--r--benchmark/reports/smol-developer/folder12_08-01-03-21/report.json656
-rw-r--r--benchmark/reports/smol-developer/folder12_08-01-16-17/radar_chart.pngbin0 -> 211464 bytes
-rw-r--r--benchmark/reports/smol-developer/folder12_08-01-16-17/report.json656
-rw-r--r--benchmark/reports/smol-developer/folder13_08-01-16-57/radar_chart.pngbin0 -> 211464 bytes
-rw-r--r--benchmark/reports/smol-developer/folder13_08-01-16-57/report.json656
-rw-r--r--benchmark/reports/smol-developer/folder14_08-01-17-31/radar_chart.pngbin0 -> 209974 bytes
-rw-r--r--benchmark/reports/smol-developer/folder14_08-01-17-31/report.json656
-rw-r--r--benchmark/reports/smol-developer/folder15_08-01-19-51/radar_chart.pngbin0 -> 211464 bytes
-rw-r--r--benchmark/reports/smol-developer/folder15_08-01-19-51/report.json656
-rw-r--r--benchmark/reports/smol-developer/folder19_08-02-03-12/radar_chart.pngbin0 -> 211464 bytes
-rw-r--r--benchmark/reports/smol-developer/folder19_08-02-03-12/report.json657
-rw-r--r--benchmark/reports/smol-developer/folder1_07-30-22-53/report.json644
-rw-r--r--benchmark/reports/smol-developer/folder20_08-02-01-34/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/smol-developer/folder20_08-02-01-34/report.json658
-rw-r--r--benchmark/reports/smol-developer/folder20_08-02-03-58/radar_chart.pngbin0 -> 211464 bytes
-rw-r--r--benchmark/reports/smol-developer/folder20_08-02-03-58/report.json657
-rw-r--r--benchmark/reports/smol-developer/folder21_08-02-02-36/radar_chart.pngbin0 -> 209700 bytes
-rw-r--r--benchmark/reports/smol-developer/folder21_08-02-02-36/report.json658
-rw-r--r--benchmark/reports/smol-developer/folder21_08-02-08-12/radar_chart.pngbin0 -> 211464 bytes
-rw-r--r--benchmark/reports/smol-developer/folder21_08-02-08-12/report.json657
-rw-r--r--benchmark/reports/smol-developer/folder22_08-02-15-17/radar_chart.pngbin0 -> 211464 bytes
-rw-r--r--benchmark/reports/smol-developer/folder22_08-02-15-17/report.json657
-rw-r--r--benchmark/reports/smol-developer/folder23_08-02-17-20/radar_chart.pngbin0 -> 211464 bytes
-rw-r--r--benchmark/reports/smol-developer/folder23_08-02-17-20/report.json657
-rw-r--r--benchmark/reports/smol-developer/folder24_08-02-17-38/radar_chart.pngbin0 -> 211464 bytes
-rw-r--r--benchmark/reports/smol-developer/folder24_08-02-17-38/report.json657
-rw-r--r--benchmark/reports/smol-developer/folder25_08-02-00-08/radar_chart.pngbin0 -> 211464 bytes
-rw-r--r--benchmark/reports/smol-developer/folder25_08-02-00-08/report.json656
-rw-r--r--benchmark/reports/smol-developer/folder25_08-02-20-30/radar_chart.pngbin0 -> 211464 bytes
-rw-r--r--benchmark/reports/smol-developer/folder25_08-02-20-30/report.json692
-rw-r--r--benchmark/reports/smol-developer/folder26_08-02-21-52/radar_chart.pngbin0 -> 211464 bytes
-rw-r--r--benchmark/reports/smol-developer/folder26_08-02-21-52/report.json693
-rw-r--r--benchmark/reports/smol-developer/folder27_08-02-22-10/radar_chart.pngbin0 -> 211464 bytes
-rw-r--r--benchmark/reports/smol-developer/folder27_08-02-22-10/report.json693
-rw-r--r--benchmark/reports/smol-developer/folder28_08-03-08-12/radar_chart.pngbin0 -> 209974 bytes
-rw-r--r--benchmark/reports/smol-developer/folder28_08-03-08-12/report.json693
-rw-r--r--benchmark/reports/smol-developer/folder29_08-03-23-50/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/smol-developer/folder29_08-03-23-50/report.json694
-rw-r--r--benchmark/reports/smol-developer/folder2_07-31-02-07/report.json645
-rw-r--r--benchmark/reports/smol-developer/folder30_08-04-03-24/radar_chart.pngbin0 -> 209974 bytes
-rw-r--r--benchmark/reports/smol-developer/folder30_08-04-03-24/report.json693
-rw-r--r--benchmark/reports/smol-developer/folder31_08-04-08-12/radar_chart.pngbin0 -> 208769 bytes
-rw-r--r--benchmark/reports/smol-developer/folder31_08-04-08-12/report.json694
-rw-r--r--benchmark/reports/smol-developer/folder32_08-05-08-11/radar_chart.pngbin0 -> 209974 bytes
-rw-r--r--benchmark/reports/smol-developer/folder32_08-05-08-11/report.json692
-rw-r--r--benchmark/reports/smol-developer/folder33_08-06-08-12/radar_chart.pngbin0 -> 209227 bytes
-rw-r--r--benchmark/reports/smol-developer/folder33_08-06-08-12/report.json770
-rw-r--r--benchmark/reports/smol-developer/folder34_08-06-19-10/radar_chart.pngbin0 -> 209227 bytes
-rw-r--r--benchmark/reports/smol-developer/folder34_08-06-19-10/report.json846
-rw-r--r--benchmark/reports/smol-developer/folder35_08-07-01-04/radar_chart.pngbin0 -> 213132 bytes
-rw-r--r--benchmark/reports/smol-developer/folder35_08-07-01-04/report.json846
-rw-r--r--benchmark/reports/smol-developer/folder36_08-07-08-12/radar_chart.pngbin0 -> 221977 bytes
-rw-r--r--benchmark/reports/smol-developer/folder36_08-07-08-12/report.json877
-rw-r--r--benchmark/reports/smol-developer/folder37_08-08-08-13/radar_chart.pngbin0 -> 221977 bytes
-rw-r--r--benchmark/reports/smol-developer/folder37_08-08-08-13/report.json877
-rw-r--r--benchmark/reports/smol-developer/folder38_08-08-22-23/radar_chart.pngbin0 -> 221977 bytes
-rw-r--r--benchmark/reports/smol-developer/folder38_08-08-22-23/report.json912
-rw-r--r--benchmark/reports/smol-developer/folder39_08-08-22-30/radar_chart.pngbin0 -> 220706 bytes
-rw-r--r--benchmark/reports/smol-developer/folder39_08-08-22-30/report.json913
-rw-r--r--benchmark/reports/smol-developer/folder3_07-31-03-06/report.json627
-rw-r--r--benchmark/reports/smol-developer/folder40_08-09-03-06/radar_chart.pngbin0 -> 221977 bytes
-rw-r--r--benchmark/reports/smol-developer/folder40_08-09-03-06/report.json912
-rw-r--r--benchmark/reports/smol-developer/folder41_08-09-08-14/radar_chart.pngbin0 -> 221977 bytes
-rw-r--r--benchmark/reports/smol-developer/folder41_08-09-08-14/report.json912
-rw-r--r--benchmark/reports/smol-developer/folder42_08-09-17-08/radar_chart.pngbin0 -> 221977 bytes
-rw-r--r--benchmark/reports/smol-developer/folder42_08-09-17-08/report.json912
-rw-r--r--benchmark/reports/smol-developer/folder43_08-10-08-14/radar_chart.pngbin0 -> 221977 bytes
-rw-r--r--benchmark/reports/smol-developer/folder43_08-10-08-14/report.json912
-rw-r--r--benchmark/reports/smol-developer/folder44_08-10-19-25/radar_chart.pngbin0 -> 221168 bytes
-rw-r--r--benchmark/reports/smol-developer/folder44_08-10-19-25/report.json878
-rw-r--r--benchmark/reports/smol-developer/folder45_08-11-08-12/radar_chart.pngbin0 -> 219337 bytes
-rw-r--r--benchmark/reports/smol-developer/folder45_08-11-08-12/report.json915
-rw-r--r--benchmark/reports/smol-developer/folder46_08-11-16-47/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/smol-developer/folder46_08-11-16-47/report.json915
-rw-r--r--benchmark/reports/smol-developer/folder47_08-12-02-01/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/smol-developer/folder47_08-12-02-01/report.json914
-rw-r--r--benchmark/reports/smol-developer/folder48_08-12-02-50/radar_chart.pngbin0 -> 210048 bytes
-rw-r--r--benchmark/reports/smol-developer/folder48_08-12-02-50/report.json914
-rw-r--r--benchmark/reports/smol-developer/folder49_08-12-03-02/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/folder49_08-12-03-02/report.json913
-rw-r--r--benchmark/reports/smol-developer/folder50_08-12-03-35/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/folder50_08-12-03-35/report.json913
-rw-r--r--benchmark/reports/smol-developer/folder51_08-12-08-11/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/folder51_08-12-08-11/report.json913
-rw-r--r--benchmark/reports/smol-developer/folder52_08-12-17-23/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/folder52_08-12-17-23/report.json913
-rw-r--r--benchmark/reports/smol-developer/folder53_08-13-00-51/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/folder53_08-13-00-51/report.json893
-rw-r--r--benchmark/reports/smol-developer/folder54_08-13-01-11/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/folder54_08-13-01-11/report.json893
-rw-r--r--benchmark/reports/smol-developer/folder55_08-13-01-52/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/folder55_08-13-01-52/report.json893
-rw-r--r--benchmark/reports/smol-developer/folder56_08-13-02-16/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/folder56_08-13-02-16/report.json893
-rw-r--r--benchmark/reports/smol-developer/folder57_08-13-02-37/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/folder57_08-13-02-37/report.json893
-rw-r--r--benchmark/reports/smol-developer/folder58_08-13-08-11/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/folder58_08-13-08-11/report.json893
-rw-r--r--benchmark/reports/smol-developer/folder59_08-14-08-13/radar_chart.pngbin0 -> 212836 bytes
-rw-r--r--benchmark/reports/smol-developer/folder59_08-14-08-13/report.json915
-rw-r--r--benchmark/reports/smol-developer/folder5_07-31-13-05/radar_chart.pngbin0 -> 124547 bytes
-rw-r--r--benchmark/reports/smol-developer/folder5_07-31-13-05/report.json656
-rw-r--r--benchmark/reports/smol-developer/folder60_08-14-17-47/radar_chart.pngbin0 -> 212836 bytes
-rw-r--r--benchmark/reports/smol-developer/folder60_08-14-17-47/report.json915
-rw-r--r--benchmark/reports/smol-developer/folder61_08-14-21-38/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/folder61_08-14-21-38/report.json923
-rw-r--r--benchmark/reports/smol-developer/folder62_08-15-08-13/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/folder62_08-15-08-13/report.json923
-rw-r--r--benchmark/reports/smol-developer/folder63_08-15-16-42/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/folder63_08-15-16-42/report.json923
-rw-r--r--benchmark/reports/smol-developer/folder64_08-16-08-13/radar_chart.pngbin0 -> 211810 bytes
-rw-r--r--benchmark/reports/smol-developer/folder64_08-16-08-13/report.json923
-rw-r--r--benchmark/reports/smol-developer/folder6_07-31-16-11/radar_chart.pngbin0 -> 124547 bytes
-rw-r--r--benchmark/reports/smol-developer/folder6_07-31-16-11/report.json656
-rw-r--r--benchmark/reports/smol-developer/folder7_07-31-19-05/radar_chart.pngbin0 -> 124547 bytes
-rw-r--r--benchmark/reports/smol-developer/folder7_07-31-19-05/report.json656
-rw-r--r--benchmark/reports/smol-developer/folder8_07-31-19-38/radar_chart.pngbin0 -> 124547 bytes
-rw-r--r--benchmark/reports/smol-developer/folder8_07-31-19-38/report.json656
-rw-r--r--benchmark/reports/smol-developer/regression_tests.json10
-rw-r--r--benchmark/reports/smol-developer/success_rate.json3204
-rwxr-xr-xbenchmark/run.sh18
-rw-r--r--benchmark/tests/__init__.py0
-rw-r--r--benchmark/tests/test_benchmark_workflow.py83
-rw-r--r--benchmark/tests/test_extract_subgraph.py88
-rw-r--r--benchmark/tests/test_get_roots.py57
-rw-r--r--benchmark/tests/test_is_circular.py47
-rw-r--r--benchmarks.py52
-rw-r--r--cli.py893
-rw-r--r--data_ingestion.py98
-rw-r--r--docker-compose.yml16
-rw-r--r--docs/_javascript/mathjax.js16
-rw-r--r--docs/_javascript/tablesort.js6
-rw-r--r--docs/challenges/building_challenges.md130
-rw-r--r--docs/challenges/information_retrieval/introduction.md3
-rw-r--r--docs/challenges/introduction.md23
-rw-r--r--docs/challenges/list.md5
-rw-r--r--docs/challenges/memory/challenge_b.md43
-rw-r--r--docs/challenges/memory/challenge_c.md45
-rw-r--r--docs/challenges/memory/challenge_d.md75
-rw-r--r--docs/challenges/memory/introduction.md5
-rw-r--r--docs/challenges/submit.md14
l---------docs/code-of-conduct.md1
-rw-r--r--docs/configuration/imagegen.md59
-rw-r--r--docs/configuration/memory.md210
-rw-r--r--docs/configuration/options.md53
-rw-r--r--docs/configuration/voice.md37
-rw-r--r--docs/content/AutoGPT/configuration/imagegen.md63
-rw-r--r--docs/content/AutoGPT/configuration/options.md54
-rw-r--r--docs/content/AutoGPT/configuration/search.md (renamed from docs/configuration/search.md)0
-rw-r--r--docs/content/AutoGPT/configuration/voice.md37
-rw-r--r--docs/content/AutoGPT/index.md29
-rw-r--r--docs/content/AutoGPT/plugins.md20
-rw-r--r--docs/content/AutoGPT/setup/docker.md197
-rw-r--r--docs/content/AutoGPT/setup/for-developers.md9
-rw-r--r--docs/content/AutoGPT/setup/get-repo-dialog.pngbin0 -> 69845 bytes
-rw-r--r--docs/content/AutoGPT/setup/index.md127
-rw-r--r--docs/content/AutoGPT/share-your-logs.md52
-rw-r--r--docs/content/AutoGPT/testing.md51
-rw-r--r--docs/content/AutoGPT/usage.md229
-rw-r--r--docs/content/challenges/beat.md (renamed from docs/challenges/beat.md)0
-rw-r--r--docs/content/challenges/building_challenges.md125
-rw-r--r--docs/content/challenges/challenge_template.md (renamed from docs/challenges/challenge_template.md)0
-rw-r--r--docs/content/challenges/information_retrieval/challenge_a.md (renamed from docs/challenges/information_retrieval/challenge_a.md)0
-rw-r--r--docs/content/challenges/information_retrieval/challenge_b.md (renamed from docs/challenges/information_retrieval/challenge_b.md)0
-rw-r--r--docs/content/challenges/information_retrieval/introduction.md3
-rw-r--r--docs/content/challenges/introduction.md34
-rw-r--r--docs/content/challenges/list.md5
-rw-r--r--docs/content/challenges/memory/challenge_a.md (renamed from docs/challenges/memory/challenge_a.md)0
-rw-r--r--docs/content/challenges/memory/challenge_b.md44
-rw-r--r--docs/content/challenges/memory/challenge_c.md61
-rw-r--r--docs/content/challenges/memory/challenge_d.md80
-rw-r--r--docs/content/challenges/memory/introduction.md5
-rw-r--r--docs/content/challenges/submit.md14
l---------docs/content/code-of-conduct.md1
l---------docs/content/contributing.md1
-rw-r--r--docs/content/forge/get-started.md31
-rw-r--r--docs/content/imgs/e2b-dashboard.pngbin0 -> 515634 bytes
-rw-r--r--docs/content/imgs/e2b-log-url.pngbin0 -> 43687 bytes
-rw-r--r--docs/content/imgs/e2b-new-tag.pngbin0 -> 47736 bytes
-rw-r--r--docs/content/imgs/e2b-tag-button.pngbin0 -> 20635 bytes
-rw-r--r--docs/content/imgs/openai-api-key-billing-paid-account.png (renamed from docs/imgs/openai-api-key-billing-paid-account.png)bin316093 -> 316093 bytes
-rw-r--r--docs/content/imgs/quickstart/000_header_img.pngbin0 -> 1563716 bytes
-rw-r--r--docs/content/imgs/quickstart/001_repo.pngbin0 -> 111603 bytes
-rw-r--r--docs/content/imgs/quickstart/002_fork.pngbin0 -> 84111 bytes
-rw-r--r--docs/content/imgs/quickstart/003A_clone.pngbin0 -> 115255 bytes
-rw-r--r--docs/content/imgs/quickstart/003_clone.pngbin0 -> 31665 bytes
-rw-r--r--docs/content/imgs/quickstart/004_ide.pngbin0 -> 187952 bytes
-rw-r--r--docs/content/imgs/quickstart/005_setup.pngbin0 -> 61193 bytes
-rw-r--r--docs/content/imgs/quickstart/006_setup_complete.pngbin0 -> 35663 bytes
-rw-r--r--docs/content/imgs/quickstart/007_create_agent.pngbin0 -> 19785 bytes
-rw-r--r--docs/content/imgs/quickstart/008_enter_arena.pngbin0 -> 112678 bytes
-rw-r--r--docs/content/imgs/quickstart/009_start_agent.pngbin0 -> 43422 bytes
-rw-r--r--docs/content/imgs/quickstart/010_login.pngbin0 -> 12419 bytes
-rw-r--r--docs/content/imgs/quickstart/011_home.pngbin0 -> 68948 bytes
-rw-r--r--docs/content/imgs/quickstart/012_tests.pngbin0 -> 60841 bytes
-rw-r--r--docs/content/imgs/quickstart/t2_01.pngbin0 -> 1778579 bytes
-rw-r--r--docs/content/imgs/quickstart/t2_02.pngbin0 -> 1662037 bytes
-rw-r--r--docs/content/imgs/quickstart/t2_03.pngbin0 -> 245424 bytes
-rw-r--r--docs/content/imgs/quickstart/t2_04.pngbin0 -> 572912 bytes
-rw-r--r--docs/content/imgs/quickstart/t2_agent_flow.pngbin0 -> 188727 bytes
-rw-r--r--docs/content/imgs/quickstart/t2_diagram.pngbin0 -> 1221018 bytes
-rw-r--r--docs/content/imgs/quickstart/t3_01.pngbin0 -> 1375176 bytes
-rw-r--r--docs/content/imgs/quickstart/t3_02.pngbin0 -> 446868 bytes
-rw-r--r--docs/content/imgs/quickstart/t3_03.pngbin0 -> 1514346 bytes
-rw-r--r--docs/content/imgs/quickstart/t3_04.pngbin0 -> 207148 bytes
-rw-r--r--docs/content/index.md115
l---------docs/contributing.md1
-rw-r--r--docs/favicon.pngbin0 -> 26841 bytes
-rw-r--r--docs/index.md7
-rw-r--r--docs/mkdocs.yml131
-rw-r--r--docs/netlify.toml6
-rw-r--r--docs/overrides/main.html61
-rw-r--r--docs/plugins.md20
-rw-r--r--docs/requirements.txt4
-rw-r--r--docs/setup.md244
-rw-r--r--docs/testing.md46
-rw-r--r--docs/usage.md108
-rw-r--r--frontend/.gitignore45
-rw-r--r--frontend/.metadata45
-rw-r--r--frontend/README.md64
-rw-r--r--frontend/analysis_options.yaml29
-rw-r--r--frontend/android/.gitignore13
-rw-r--r--frontend/android/app/build.gradle72
-rw-r--r--frontend/android/app/google-services.json39
-rw-r--r--frontend/android/app/src/debug/AndroidManifest.xml7
-rw-r--r--frontend/android/app/src/main/AndroidManifest.xml33
-rw-r--r--frontend/android/app/src/main/kotlin/com/example/auto_gpt_flutter_client/MainActivity.kt6
-rw-r--r--frontend/android/app/src/main/res/drawable-v21/launch_background.xml12
-rw-r--r--frontend/android/app/src/main/res/drawable/launch_background.xml12
-rw-r--r--frontend/android/app/src/main/res/mipmap-hdpi/ic_launcher.pngbin0 -> 544 bytes
-rw-r--r--frontend/android/app/src/main/res/mipmap-mdpi/ic_launcher.pngbin0 -> 442 bytes
-rw-r--r--frontend/android/app/src/main/res/mipmap-xhdpi/ic_launcher.pngbin0 -> 721 bytes
-rw-r--r--frontend/android/app/src/main/res/mipmap-xxhdpi/ic_launcher.pngbin0 -> 1031 bytes
-rw-r--r--frontend/android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.pngbin0 -> 1443 bytes
-rw-r--r--frontend/android/app/src/main/res/values-night/styles.xml18
-rw-r--r--frontend/android/app/src/main/res/values/styles.xml18
-rw-r--r--frontend/android/app/src/profile/AndroidManifest.xml7
-rw-r--r--frontend/android/build.gradle31
-rw-r--r--frontend/android/gradle.properties3
-rw-r--r--frontend/android/gradle/wrapper/gradle-wrapper.properties5
-rw-r--r--frontend/android/settings.gradle11
-rw-r--r--frontend/assets/coding_tree_structure.json344
-rw-r--r--frontend/assets/data_tree_structure.json360
-rw-r--r--frontend/assets/general_tree_structure.json897
-rw-r--r--frontend/assets/images/autogpt_logo.pngbin0 -> 17283 bytes
-rw-r--r--frontend/assets/images/discord_logo.pngbin0 -> 12608 bytes
-rw-r--r--frontend/assets/images/github_logo.svg.pngbin0 -> 32775 bytes
-rw-r--r--frontend/assets/images/google_logo.svg.pngbin0 -> 38249 bytes
-rw-r--r--frontend/assets/images/twitter_logo.pngbin0 -> 16242 bytes
-rw-r--r--frontend/assets/scrape_synthesize_tree_structure.json375
-rw-r--r--frontend/assets/tree_structure.json897
-rwxr-xr-xfrontend/build.sh3
-rw-r--r--frontend/build/web/.last_build_id1
-rw-r--r--frontend/build/web/assets/AssetManifest.bin1
-rw-r--r--frontend/build/web/assets/AssetManifest.json1
-rw-r--r--frontend/build/web/assets/FontManifest.json1
-rw-r--r--frontend/build/web/assets/NOTICES33956
-rw-r--r--frontend/build/web/assets/assets/coding_tree_structure.json344
-rw-r--r--frontend/build/web/assets/assets/data_tree_structure.json360
-rw-r--r--frontend/build/web/assets/assets/general_tree_structure.json897
-rw-r--r--frontend/build/web/assets/assets/github_logo.svg.pngbin0 -> 32775 bytes
-rw-r--r--frontend/build/web/assets/assets/google_logo.svg.pngbin0 -> 38249 bytes
-rw-r--r--frontend/build/web/assets/assets/images/autogpt_logo.pngbin0 -> 17283 bytes
-rw-r--r--frontend/build/web/assets/assets/images/discord_logo.pngbin0 -> 12608 bytes
-rw-r--r--frontend/build/web/assets/assets/images/github_logo.svg.pngbin0 -> 32775 bytes
-rw-r--r--frontend/build/web/assets/assets/images/google_logo.svg.pngbin0 -> 38249 bytes
-rw-r--r--frontend/build/web/assets/assets/images/twitter_logo.pngbin0 -> 16242 bytes
-rw-r--r--frontend/build/web/assets/assets/scrape_synthesize_tree_structure.json375
-rw-r--r--frontend/build/web/assets/assets/tree_structure.json897
-rw-r--r--frontend/build/web/assets/fonts/MaterialIcons-Regular.otfbin0 -> 9412 bytes
-rw-r--r--frontend/build/web/assets/packages/cupertino_icons/assets/CupertinoIcons.ttfbin0 -> 1544 bytes
-rw-r--r--frontend/build/web/assets/packages/fluttertoast/assets/toastify.css14
-rw-r--r--frontend/build/web/assets/packages/fluttertoast/assets/toastify.js14
-rw-r--r--frontend/build/web/assets/shaders/ink_sparkle.frag160
-rw-r--r--frontend/build/web/canvaskit/canvaskit.js222
-rwxr-xr-xfrontend/build/web/canvaskit/canvaskit.wasmbin0 -> 6557869 bytes
-rw-r--r--frontend/build/web/canvaskit/chromium/canvaskit.js222
-rwxr-xr-xfrontend/build/web/canvaskit/chromium/canvaskit.wasmbin0 -> 5159777 bytes
-rw-r--r--frontend/build/web/canvaskit/skwasm.js21
-rwxr-xr-xfrontend/build/web/canvaskit/skwasm.wasmbin0 -> 3252607 bytes
-rw-r--r--frontend/build/web/canvaskit/skwasm.worker.js1
-rw-r--r--frontend/build/web/favicon.pngbin0 -> 917 bytes
-rw-r--r--frontend/build/web/flutter.js383
-rw-r--r--frontend/build/web/flutter_service_worker.js213
-rw-r--r--frontend/build/web/icons/Icon-192.pngbin0 -> 5292 bytes
-rw-r--r--frontend/build/web/icons/Icon-512.pngbin0 -> 8252 bytes
-rw-r--r--frontend/build/web/icons/Icon-maskable-192.pngbin0 -> 5594 bytes
-rw-r--r--frontend/build/web/icons/Icon-maskable-512.pngbin0 -> 20998 bytes
-rw-r--r--frontend/build/web/index.html84
-rw-r--r--frontend/build/web/main.dart.js105518
-rw-r--r--frontend/build/web/manifest.json35
-rw-r--r--frontend/build/web/version.json1
-rw-r--r--frontend/ios/.gitignore34
-rw-r--r--frontend/ios/Flutter/AppFrameworkInfo.plist26
-rw-r--r--frontend/ios/Flutter/Debug.xcconfig2
-rw-r--r--frontend/ios/Flutter/Release.xcconfig2
-rw-r--r--frontend/ios/Podfile44
-rw-r--r--frontend/ios/Runner.xcodeproj/project.pbxproj613
-rw-r--r--frontend/ios/Runner.xcodeproj/project.xcworkspace/contents.xcworkspacedata7
-rw-r--r--frontend/ios/Runner.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist8
-rw-r--r--frontend/ios/Runner.xcodeproj/project.xcworkspace/xcshareddata/WorkspaceSettings.xcsettings8
-rw-r--r--frontend/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme98
-rw-r--r--frontend/ios/Runner.xcworkspace/contents.xcworkspacedata7
-rw-r--r--frontend/ios/Runner.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist8
-rw-r--r--frontend/ios/Runner.xcworkspace/xcshareddata/WorkspaceSettings.xcsettings8
-rw-r--r--frontend/ios/Runner/AppDelegate.swift15
-rw-r--r--frontend/ios/Runner/Assets.xcassets/AppIcon.appiconset/Contents.json122
-rw-r--r--frontend/ios/Runner/Assets.xcassets/AppIcon.appiconset/Icon-App-1024x1024@1x.pngbin0 -> 10932 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/AppIcon.appiconset/Icon-App-20x20@1x.pngbin0 -> 295 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/AppIcon.appiconset/Icon-App-20x20@2x.pngbin0 -> 406 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/AppIcon.appiconset/Icon-App-20x20@3x.pngbin0 -> 450 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/AppIcon.appiconset/Icon-App-29x29@1x.pngbin0 -> 282 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/AppIcon.appiconset/Icon-App-29x29@2x.pngbin0 -> 462 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/AppIcon.appiconset/Icon-App-29x29@3x.pngbin0 -> 704 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/AppIcon.appiconset/Icon-App-40x40@1x.pngbin0 -> 406 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/AppIcon.appiconset/Icon-App-40x40@2x.pngbin0 -> 586 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/AppIcon.appiconset/Icon-App-40x40@3x.pngbin0 -> 862 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/AppIcon.appiconset/Icon-App-60x60@2x.pngbin0 -> 862 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/AppIcon.appiconset/Icon-App-60x60@3x.pngbin0 -> 1674 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/AppIcon.appiconset/Icon-App-76x76@1x.pngbin0 -> 762 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/AppIcon.appiconset/Icon-App-76x76@2x.pngbin0 -> 1226 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/AppIcon.appiconset/Icon-App-83.5x83.5@2x.pngbin0 -> 1418 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/LaunchImage.imageset/Contents.json23
-rw-r--r--frontend/ios/Runner/Assets.xcassets/LaunchImage.imageset/LaunchImage.pngbin0 -> 68 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/LaunchImage.imageset/LaunchImage@2x.pngbin0 -> 68 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/LaunchImage.imageset/LaunchImage@3x.pngbin0 -> 68 bytes
-rw-r--r--frontend/ios/Runner/Assets.xcassets/LaunchImage.imageset/README.md5
-rw-r--r--frontend/ios/Runner/Base.lproj/LaunchScreen.storyboard37
-rw-r--r--frontend/ios/Runner/Base.lproj/Main.storyboard26
-rw-r--r--frontend/ios/Runner/GoogleService-Info.plist34
-rw-r--r--frontend/ios/Runner/Info.plist51
-rw-r--r--frontend/ios/Runner/Runner-Bridging-Header.h1
-rw-r--r--frontend/ios/RunnerTests/RunnerTests.swift12
-rw-r--r--frontend/lib/constants/app_colors.dart60
-rw-r--r--frontend/lib/main.dart134
-rw-r--r--frontend/lib/models/artifact.dart62
-rw-r--r--frontend/lib/models/benchmark/api_type.dart5
-rw-r--r--frontend/lib/models/benchmark/benchmark_run.dart73
-rw-r--r--frontend/lib/models/benchmark/benchmark_step_request_body.dart12
-rw-r--r--frontend/lib/models/benchmark/benchmark_task_request_body.dart13
-rw-r--r--frontend/lib/models/benchmark/benchmark_task_status.dart7
-rw-r--r--frontend/lib/models/benchmark/config.dart43
-rw-r--r--frontend/lib/models/benchmark/metrics.dart70
-rw-r--r--frontend/lib/models/benchmark/repository_info.dart54
-rw-r--r--frontend/lib/models/benchmark/run_details.dart64
-rw-r--r--frontend/lib/models/benchmark/task_info.dart71
-rw-r--r--frontend/lib/models/chat.dart64
-rw-r--r--frontend/lib/models/message_type.dart5
-rw-r--r--frontend/lib/models/pagination.dart22
-rw-r--r--frontend/lib/models/skill_tree/ground.dart25
-rw-r--r--frontend/lib/models/skill_tree/info.dart19
-rw-r--r--frontend/lib/models/skill_tree/skill_node_data.dart37
-rw-r--r--frontend/lib/models/skill_tree/skill_tree_category.dart38
-rw-r--r--frontend/lib/models/skill_tree/skill_tree_edge.dart23
-rw-r--r--frontend/lib/models/skill_tree/skill_tree_node.dart27
-rw-r--r--frontend/lib/models/step.dart53
-rw-r--r--frontend/lib/models/step_request_body.dart13
-rw-r--r--frontend/lib/models/task.dart67
-rw-r--r--frontend/lib/models/task_request_body.dart10
-rw-r--r--frontend/lib/models/task_response.dart19
-rw-r--r--frontend/lib/models/test_option.dart65
-rw-r--r--frontend/lib/models/test_suite.dart25
-rw-r--r--frontend/lib/services/auth_service.dart51
-rw-r--r--frontend/lib/services/benchmark_service.dart51
-rw-r--r--frontend/lib/services/chat_service.dart91
-rw-r--r--frontend/lib/services/leaderboard_service.dart24
-rw-r--r--frontend/lib/services/shared_preferences_service.dart106
-rw-r--r--frontend/lib/services/task_service.dart101
-rw-r--r--frontend/lib/utils/feature_flags.dart3
-rw-r--r--frontend/lib/utils/rest_api_utility.dart91
-rw-r--r--frontend/lib/utils/stack.dart20
-rw-r--r--frontend/lib/utils/uri_utility.dart75
-rw-r--r--frontend/lib/viewmodels/chat_viewmodel.dart223
-rw-r--r--frontend/lib/viewmodels/settings_viewmodel.dart87
-rw-r--r--frontend/lib/viewmodels/skill_tree_viewmodel.dart87
-rw-r--r--frontend/lib/viewmodels/task_queue_viewmodel.dart276
-rw-r--r--frontend/lib/viewmodels/task_viewmodel.dart210
-rw-r--r--frontend/lib/views/auth/firebase_auth_view.dart71
-rw-r--r--frontend/lib/views/chat/agent_message_tile.dart175
-rw-r--r--frontend/lib/views/chat/chat_input_field.dart167
-rw-r--r--frontend/lib/views/chat/chat_view.dart178
-rw-r--r--frontend/lib/views/chat/continuous_mode_dialog.dart168
-rw-r--r--frontend/lib/views/chat/json_code_snippet_view.dart65
-rw-r--r--frontend/lib/views/chat/loading_indicator.dart81
-rw-r--r--frontend/lib/views/chat/user_message_tile.dart70
-rw-r--r--frontend/lib/views/main_layout.dart167
-rw-r--r--frontend/lib/views/settings/api_base_url_field.dart75
-rw-r--r--frontend/lib/views/settings/settings_view.dart92
-rw-r--r--frontend/lib/views/side_bar/side_bar_view.dart117
-rw-r--r--frontend/lib/views/skill_tree/skill_tree_view.dart129
-rw-r--r--frontend/lib/views/skill_tree/tree_node_view.dart74
-rw-r--r--frontend/lib/views/task/new_task_button.dart47
-rw-r--r--frontend/lib/views/task/task_list_tile.dart75
-rw-r--r--frontend/lib/views/task/task_view.dart134
-rw-r--r--frontend/lib/views/task/test_suite_detail_view.dart79
-rw-r--r--frontend/lib/views/task/test_suite_list_tile.dart66
-rw-r--r--frontend/lib/views/task_queue/leaderboard_submission_button.dart56
-rw-r--r--frontend/lib/views/task_queue/leaderboard_submission_dialog.dart241
-rw-r--r--frontend/lib/views/task_queue/task_queue_view.dart153
-rw-r--r--frontend/lib/views/task_queue/test_suite_button.dart119
-rw-r--r--frontend/linux/.gitignore1
-rw-r--r--frontend/linux/CMakeLists.txt139
-rw-r--r--frontend/linux/flutter/CMakeLists.txt88
-rw-r--r--frontend/linux/flutter/generated_plugin_registrant.cc15
-rw-r--r--frontend/linux/flutter/generated_plugin_registrant.h15
-rw-r--r--frontend/linux/flutter/generated_plugins.cmake24
-rw-r--r--frontend/linux/main.cc6
-rw-r--r--frontend/linux/my_application.cc104
-rw-r--r--frontend/linux/my_application.h18
-rw-r--r--frontend/macos/.gitignore7
-rw-r--r--frontend/macos/Flutter/Flutter-Debug.xcconfig2
-rw-r--r--frontend/macos/Flutter/Flutter-Release.xcconfig2
-rw-r--r--frontend/macos/Flutter/GeneratedPluginRegistrant.swift20
-rw-r--r--frontend/macos/Podfile43
-rw-r--r--frontend/macos/Runner.xcodeproj/project.pbxproj695
-rw-r--r--frontend/macos/Runner.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist8
-rw-r--r--frontend/macos/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme98
-rw-r--r--frontend/macos/Runner.xcworkspace/contents.xcworkspacedata7
-rw-r--r--frontend/macos/Runner.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist8
-rw-r--r--frontend/macos/Runner/AppDelegate.swift9
-rw-r--r--frontend/macos/Runner/Assets.xcassets/AppIcon.appiconset/Contents.json68
-rw-r--r--frontend/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_1024.pngbin0 -> 102994 bytes
-rw-r--r--frontend/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_128.pngbin0 -> 5680 bytes
-rw-r--r--frontend/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_16.pngbin0 -> 520 bytes
-rw-r--r--frontend/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_256.pngbin0 -> 14142 bytes
-rw-r--r--frontend/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_32.pngbin0 -> 1066 bytes
-rw-r--r--frontend/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_512.pngbin0 -> 36406 bytes
-rw-r--r--frontend/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_64.pngbin0 -> 2218 bytes
-rw-r--r--frontend/macos/Runner/Base.lproj/MainMenu.xib343
-rw-r--r--frontend/macos/Runner/Configs/AppInfo.xcconfig14
-rw-r--r--frontend/macos/Runner/Configs/Debug.xcconfig2
-rw-r--r--frontend/macos/Runner/Configs/Release.xcconfig2
-rw-r--r--frontend/macos/Runner/Configs/Warnings.xcconfig13
-rw-r--r--frontend/macos/Runner/DebugProfile.entitlements12
-rw-r--r--frontend/macos/Runner/Info.plist32
-rw-r--r--frontend/macos/Runner/MainFlutterWindow.swift15
-rw-r--r--frontend/macos/Runner/Release.entitlements8
-rw-r--r--frontend/macos/RunnerTests/RunnerTests.swift12
-rw-r--r--frontend/pubspec.lock650
-rw-r--r--frontend/pubspec.yaml111
-rwxr-xr-xfrontend/run5
-rw-r--r--frontend/test/agent_message_tile_test.dart46
-rw-r--r--frontend/test/chat_input_field_test.dart71
-rw-r--r--frontend/test/chat_test.dart69
-rw-r--r--frontend/test/chat_viewmodel_test.dart47
-rw-r--r--frontend/test/json_code_snippet_view_test.dart14
-rw-r--r--frontend/test/new_task_button_test.dart24
-rw-r--r--frontend/test/step_request_body_test.dart26
-rw-r--r--frontend/test/task_list_tile_test.dart71
-rw-r--r--frontend/test/task_request_body_test.dart26
-rw-r--r--frontend/test/task_test.dart57
-rw-r--r--frontend/test/task_viewmodel_test.dart76
-rw-r--r--frontend/test/user_message_tile_test.dart42
-rw-r--r--frontend/web/favicon.pngbin0 -> 917 bytes
-rw-r--r--frontend/web/icons/Icon-192.pngbin0 -> 5292 bytes
-rw-r--r--frontend/web/icons/Icon-512.pngbin0 -> 8252 bytes
-rw-r--r--frontend/web/icons/Icon-maskable-192.pngbin0 -> 5594 bytes
-rw-r--r--frontend/web/icons/Icon-maskable-512.pngbin0 -> 20998 bytes
-rw-r--r--frontend/web/index.html84
-rw-r--r--frontend/web/manifest.json35
-rw-r--r--frontend/windows/.gitignore17
-rw-r--r--frontend/windows/CMakeLists.txt102
-rw-r--r--frontend/windows/flutter/CMakeLists.txt104
-rw-r--r--frontend/windows/flutter/generated_plugin_registrant.cc17
-rw-r--r--frontend/windows/flutter/generated_plugin_registrant.h15
-rw-r--r--frontend/windows/flutter/generated_plugins.cmake25
-rw-r--r--frontend/windows/runner/CMakeLists.txt40
-rw-r--r--frontend/windows/runner/Runner.rc121
-rw-r--r--frontend/windows/runner/flutter_window.cpp66
-rw-r--r--frontend/windows/runner/flutter_window.h33
-rw-r--r--frontend/windows/runner/main.cpp43
-rw-r--r--frontend/windows/runner/resource.h16
-rw-r--r--frontend/windows/runner/resources/app_icon.icobin0 -> 33772 bytes
-rw-r--r--frontend/windows/runner/runner.exe.manifest20
-rw-r--r--frontend/windows/runner/utils.cpp65
-rw-r--r--frontend/windows/runner/utils.h19
-rw-r--r--frontend/windows/runner/win32_window.cpp288
-rw-r--r--frontend/windows/runner/win32_window.h102
-rw-r--r--main.py1
-rw-r--r--mkdocs.yml43
-rw-r--r--mypy.ini10
-rw-r--r--netlify.toml6
-rw-r--r--prompt_settings.yaml17
-rw-r--r--pyproject.toml63
-rw-r--r--requirements.txt70
-rwxr-xr-xrun3
-rw-r--r--run.bat24
-rwxr-xr-xrun.sh29
-rw-r--r--run_continuous.bat3
-rwxr-xr-xrun_continuous.sh3
-rw-r--r--scripts/check_requirements.py37
-rw-r--r--scripts/install_plugin_deps.py65
-rwxr-xr-xsetup.sh30
-rw-r--r--tests.py21
m---------tests/Auto-GPT-test-cassettes0
-rw-r--r--tests/challenges/basic_abilities/goal_oriented_tasks.md10
-rw-r--r--tests/challenges/basic_abilities/test_browse_website.py34
-rw-r--r--tests/challenges/basic_abilities/test_write_file.py47
-rw-r--r--tests/challenges/challenge_decorator/challenge.py24
-rw-r--r--tests/challenges/challenge_decorator/challenge_decorator.py84
-rw-r--r--tests/challenges/challenge_decorator/challenge_utils.py85
-rw-r--r--tests/challenges/challenge_decorator/score_utils.py59
-rw-r--r--tests/challenges/conftest.py77
-rw-r--r--tests/challenges/current_score.json56
-rw-r--r--tests/challenges/debug_code/data/test.py31
-rw-r--r--tests/challenges/debug_code/test_debug_code_challenge_a.py68
-rw-r--r--tests/challenges/information_retrieval/test_information_retrieval_challenge_a.py50
-rw-r--r--tests/challenges/information_retrieval/test_information_retrieval_challenge_b.py52
-rw-r--r--tests/challenges/information_retrieval/test_information_retrieval_challenge_c.py47
-rw-r--r--tests/challenges/kubernetes/test_kubernetes_template_challenge_a.py53
-rw-r--r--tests/challenges/memory/test_memory_challenge_a.py86
-rw-r--r--tests/challenges/memory/test_memory_challenge_b.py86
-rw-r--r--tests/challenges/memory/test_memory_challenge_c.py105
-rw-r--r--tests/challenges/memory/test_memory_challenge_d.py239
-rw-r--r--tests/challenges/schema.py7
-rw-r--r--tests/challenges/test_challenge_should_be_formatted_properly.py59
-rw-r--r--tests/challenges/utils.py78
-rw-r--r--tests/challenges/utils/build_current_score.py44
-rw-r--r--tests/conftest.py113
-rw-r--r--tests/integration/agent_factory.py47
-rw-r--r--tests/integration/memory/test_json_file_memory.py126
-rw-r--r--tests/integration/memory/utils.py44
-rw-r--r--tests/integration/test_execute_code.py136
-rw-r--r--tests/integration/test_image_gen.py252
-rw-r--r--tests/integration/test_provider_openai.py54
-rw-r--r--tests/integration/test_setup.py73
-rw-r--r--tests/integration/test_web_selenium.py17
-rw-r--r--tests/mocks/mock_commands.py14
-rw-r--r--tests/unit/_test_json_parser.py113
-rw-r--r--tests/unit/data/test_plugins/auto_gpt_guanaco/__init__.py274
-rw-r--r--tests/unit/models/test_base_open_api_plugin.py79
-rw-r--r--tests/unit/test_agent.py47
-rw-r--r--tests/unit/test_ai_config.py74
-rw-r--r--tests/unit/test_api_manager.py86
-rw-r--r--tests/unit/test_commands.py231
-rw-r--r--tests/unit/test_config.py237
-rw-r--r--tests/unit/test_execute_command.py23
-rw-r--r--tests/unit/test_file_operations.py335
-rw-r--r--tests/unit/test_git_commands.py43
-rw-r--r--tests/unit/test_logs.py28
-rw-r--r--tests/unit/test_message_history.py148
-rw-r--r--tests/unit/test_plugins.py120
-rw-r--r--tests/unit/test_prompt_config.py48
-rw-r--r--tests/unit/test_prompt_generator.py101
-rw-r--r--tests/unit/test_retry_provider_openai.py130
-rw-r--r--tests/unit/test_spinner.py70
-rw-r--r--tests/unit/test_text_file_parsers.py150
-rw-r--r--tests/unit/test_token_counter.py55
-rw-r--r--tests/unit/test_url_validation.py166
-rw-r--r--tests/unit/test_utils.py208
-rw-r--r--tests/unit/test_web_search.py136
-rw-r--r--tests/unit/test_workspace.py99
-rw-r--r--tests/vcr/__init__.py92
-rw-r--r--tests/vcr/vcr_filter.py123
3302 files changed, 788195 insertions, 20668 deletions
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
deleted file mode 100644
index f31cdebad..000000000
--- a/.devcontainer/Dockerfile
+++ /dev/null
@@ -1,13 +0,0 @@
-# Use an official Python base image from the Docker Hub
-FROM python:3.10
-
-# Install browsers
-RUN apt-get update && apt-get install -y \
- chromium-driver firefox-esr \
- ca-certificates
-
-# Install utilities
-RUN apt-get install -y curl jq wget git
-
-# Declare working directory
-WORKDIR /workspace/Auto-GPT
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
deleted file mode 100644
index 08e74b403..000000000
--- a/.devcontainer/devcontainer.json
+++ /dev/null
@@ -1,56 +0,0 @@
-{
- "dockerComposeFile": "./docker-compose.yml",
- "service": "auto-gpt",
- "workspaceFolder": "/workspace/Auto-GPT",
- "shutdownAction": "stopCompose",
- "features": {
- "ghcr.io/devcontainers/features/common-utils:2": {
- "installZsh": "true",
- "username": "vscode",
- "userUid": "1000",
- "userGid": "1000",
- "upgradePackages": "true"
- },
- "ghcr.io/devcontainers/features/desktop-lite:1": {},
- "ghcr.io/devcontainers/features/github-cli:1": {},
- "ghcr.io/devcontainers/features/python:1": "none",
- "ghcr.io/devcontainers/features/node:1": "none",
- "ghcr.io/devcontainers/features/git:1": {
- "version": "latest",
- "ppa": "false"
- }
- },
- // Configure tool-specific properties.
- "customizations": {
- // Configure properties specific to VS Code.
- "vscode": {
- // Set *default* container specific settings.json values on container create.
- "settings": {
- "python.defaultInterpreterPath": "/usr/local/bin/python",
- "python.testing.pytestEnabled": true,
- "python.testing.unittestEnabled": false
- },
- "extensions": [
- "ms-python.python",
- "VisualStudioExptTeam.vscodeintellicode",
- "ms-python.vscode-pylance",
- "ms-python.black-formatter",
- "ms-python.isort",
- "GitHub.vscode-pull-request-github",
- "GitHub.copilot",
- "github.vscode-github-actions"
- ]
- }
- },
- // Use 'forwardPorts' to make a list of ports inside the container available locally.
- // "forwardPorts": [],
-
- // Use 'postCreateCommand' to run commands after the container is created.
- // "postCreateCommand": "pip3 install --user -r requirements.txt",
-
- // Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
- "remoteUser": "vscode",
-
- // Add the freshly containerized repo to the list of safe repositories
- "postCreateCommand": "git config --global --add safe.directory /workspace/Auto-GPT && pip3 install --user -r requirements.txt"
-} \ No newline at end of file
diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml
deleted file mode 100644
index c05a326e6..000000000
--- a/.devcontainer/docker-compose.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-# To boot the app run the following:
-# docker-compose run auto-gpt
-version: '3.9'
-
-services:
- auto-gpt:
- build:
- dockerfile: .devcontainer/Dockerfile
- context: ../
- tty: true
- volumes:
- - ../:/workspace/Auto-GPT
diff --git a/.dockerignore b/.dockerignore
deleted file mode 100644
index fef46549a..000000000
--- a/.dockerignore
+++ /dev/null
@@ -1,9 +0,0 @@
-.*
-*.template
-*.yaml
-*.yml
-!prompt_settings.yaml
-
-*.md
-*.png
-!BULLETIN.md
diff --git a/.env.template b/.env.template
deleted file mode 100644
index 1c1649119..000000000
--- a/.env.template
+++ /dev/null
@@ -1,212 +0,0 @@
-# For further descriptions of these settings see docs/configuration/options.md or go to docs.agpt.co
-
-################################################################################
-### AUTO-GPT - GENERAL SETTINGS
-################################################################################
-
-## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
-OPENAI_API_KEY=your-openai-api-key
-
-## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False)
-# EXECUTE_LOCAL_COMMANDS=False
-
-## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./auto_gpt_workspace (Default: True)
-# RESTRICT_TO_WORKSPACE=True
-
-## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
-# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
-
-## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
-# AI_SETTINGS_FILE=ai_settings.yaml
-
-## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file (Default plugins_config.yaml)
-# PLUGINS_CONFIG_FILE=plugins_config.yaml
-
-## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use (defaults to prompt_settings.yaml)
-# PROMPT_SETTINGS_FILE=prompt_settings.yaml
-
-## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
-# the following is an example:
-# OPENAI_API_BASE_URL=http://localhost:443/v1
-
-## OPENAI_FUNCTIONS - Enables OpenAI functions: https://platform.openai.com/docs/guides/gpt/function-calling
-## WARNING: this feature is only supported by OpenAI's newest models. Until these models become the default on 27 June, add a '-0613' suffix to the model of your choosing.
-# OPENAI_FUNCTIONS=False
-
-## AUTHORISE COMMAND KEY - Key to authorise commands
-# AUTHORISE_COMMAND_KEY=y
-
-## EXIT_KEY - Key to exit AUTO-GPT
-# EXIT_KEY=n
-
-## PLAIN_OUTPUT - Plain output, which disables the spinner (Default: False)
-# PLAIN_OUTPUT=False
-
-## DISABLED_COMMAND_CATEGORIES - The list of categories of commands that are disabled (Default: None)
-# DISABLED_COMMAND_CATEGORIES=
-
-################################################################################
-### LLM PROVIDER
-################################################################################
-
-## TEMPERATURE - Sets temperature in OpenAI (Default: 0)
-# TEMPERATURE=0
-
-## OPENAI_ORGANIZATION - Your OpenAI Organization key (Default: None)
-# OPENAI_ORGANIZATION=
-
-## USE_AZURE - Use Azure OpenAI or not (Default: False)
-# USE_AZURE=False
-
-## AZURE_CONFIG_FILE - The path to the azure.yaml file (Default: azure.yaml)
-# AZURE_CONFIG_FILE=azure.yaml
-
-
-################################################################################
-### LLM MODELS
-################################################################################
-
-## SMART_LLM - Smart language model (Default: gpt-4)
-# SMART_LLM=gpt-4
-
-## FAST_LLM - Fast language model (Default: gpt-3.5-turbo)
-# FAST_LLM=gpt-3.5-turbo
-
-## EMBEDDING_MODEL - Model to use for creating embeddings
-# EMBEDDING_MODEL=text-embedding-ada-002
-
-################################################################################
-### SHELL EXECUTION
-################################################################################
-
-## SHELL_COMMAND_CONTROL - Whether to use "allowlist" or "denylist" to determine what shell commands can be executed (Default: denylist)
-# SHELL_COMMAND_CONTROL=denylist
-
-## ONLY if SHELL_COMMAND_CONTROL is set to denylist:
-## SHELL_DENYLIST - List of shell commands that ARE NOT allowed to be executed by Auto-GPT (Default: sudo,su)
-# SHELL_DENYLIST=sudo,su
-
-## ONLY if SHELL_COMMAND_CONTROL is set to allowlist:
-## SHELL_ALLOWLIST - List of shell commands that ARE allowed to be executed by Auto-GPT (Default: None)
-# SHELL_ALLOWLIST=
-
-################################################################################
-### MEMORY
-################################################################################
-
-### General
-
-## MEMORY_BACKEND - Memory backend type
-# MEMORY_BACKEND=json_file
-
-## MEMORY_INDEX - Value used in the Memory backend for scoping, naming, or indexing (Default: auto-gpt)
-# MEMORY_INDEX=auto-gpt
-
-### Redis
-
-## REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose)
-# REDIS_HOST=localhost
-
-## REDIS_PORT - Redis port (Default: 6379)
-# REDIS_PORT=6379
-
-## REDIS_PASSWORD - Redis password (Default: "")
-# REDIS_PASSWORD=
-
-## WIPE_REDIS_ON_START - Wipes data / index on start (Default: True)
-# WIPE_REDIS_ON_START=True
-
-################################################################################
-### IMAGE GENERATION PROVIDER
-################################################################################
-
-### Common
-
-## IMAGE_PROVIDER - Image provider (Default: dalle)
-# IMAGE_PROVIDER=dalle
-
-## IMAGE_SIZE - Image size (Default: 256)
-# IMAGE_SIZE=256
-
-### Huggingface (IMAGE_PROVIDER=huggingface)
-
-## HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4)
-# HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4
-
-## HUGGINGFACE_API_TOKEN - HuggingFace API token (Default: None)
-# HUGGINGFACE_API_TOKEN=
-
-### Stable Diffusion (IMAGE_PROVIDER=sdwebui)
-
-## SD_WEBUI_AUTH - Stable Diffusion Web UI username:password pair (Default: None)
-# SD_WEBUI_AUTH=
-
-## SD_WEBUI_URL - Stable Diffusion Web UI API URL (Default: http://localhost:7860)
-# SD_WEBUI_URL=http://localhost:7860
-
-################################################################################
-### AUDIO TO TEXT PROVIDER
-################################################################################
-
-## AUDIO_TO_TEXT_PROVIDER - Audio-to-text provider (Default: huggingface)
-# AUDIO_TO_TEXT_PROVIDER=huggingface
-
-## HUGGINGFACE_AUDIO_TO_TEXT_MODEL - The model for HuggingFace to use (Default: CompVis/stable-diffusion-v1-4)
-# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=CompVis/stable-diffusion-v1-4
-
-################################################################################
-### GITHUB
-################################################################################
-
-## GITHUB_API_KEY - Github API key / PAT (Default: None)
-# GITHUB_API_KEY=
-
-## GITHUB_USERNAME - Github username (Default: None)
-# GITHUB_USERNAME=
-
-################################################################################
-### WEB BROWSING
-################################################################################
-
-## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True)
-# HEADLESS_BROWSER=True
-
-## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome)
-# USE_WEB_BROWSER=chrome
-
-## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (Default: 3000)
-# BROWSE_CHUNK_MAX_LENGTH=3000
-
-## BROWSE_SPACY_LANGUAGE_MODEL - spaCy language model](https://spacy.io/usage/models) to use when creating chunks. (Default: en_core_web_sm)
-# BROWSE_SPACY_LANGUAGE_MODEL=en_core_web_sm
-
-## GOOGLE_API_KEY - Google API key (Default: None)
-# GOOGLE_API_KEY=
-
-## GOOGLE_CUSTOM_SEARCH_ENGINE_ID - Google custom search engine ID (Default: None)
-# GOOGLE_CUSTOM_SEARCH_ENGINE_ID=
-
-################################################################################
-### TEXT TO SPEECH PROVIDER
-################################################################################
-
-## TEXT_TO_SPEECH_PROVIDER - Which Text to Speech provider to use (Default: gtts)
-# TEXT_TO_SPEECH_PROVIDER=gtts
-
-### Only if TEXT_TO_SPEECH_PROVIDER=streamelements
-## STREAMELEMENTS_VOICE - Voice to use for StreamElements (Default: Brian)
-# STREAMELEMENTS_VOICE=Brian
-
-### Only if TEXT_TO_SPEECH_PROVIDER=elevenlabs
-## ELEVENLABS_API_KEY - Eleven Labs API key (Default: None)
-# ELEVENLABS_API_KEY=
-
-## ELEVENLABS_VOICE_ID - Eleven Labs voice ID (Example: None)
-# ELEVENLABS_VOICE_ID=
-
-################################################################################
-### CHAT MESSAGES
-################################################################################
-
-## CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False)
-# CHAT_MESSAGES_ENABLED=False
diff --git a/.envrc b/.envrc
deleted file mode 100644
index a7ad72637..000000000
--- a/.envrc
+++ /dev/null
@@ -1,4 +0,0 @@
-# Upon entering directory, direnv requests user permission once to automatically load project dependencies onwards.
-# Eliminating the need of running "nix develop github:superherointj/nix-auto-gpt" for Nix users to develop/use Auto-GPT.
-
-[[ -z $IN_NIX_SHELL ]] && use flake github:superherointj/nix-auto-gpt
diff --git a/.flake8 b/.flake8
index 779762248..5b9641724 100644
--- a/.flake8
+++ b/.flake8
@@ -1,6 +1,5 @@
[flake8]
max-line-length = 88
-select = "E303, W293, W291, W292, E305, E231, E302"
exclude =
.tox,
__pycache__,
@@ -10,3 +9,4 @@ exclude =
.venv/*,
reports/*,
dist/*,
+ data/*,
diff --git a/.gitattributes b/.gitattributes
index 88a33bf75..d3adc9db4 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,5 +1,3 @@
-# Exclude VCR cassettes from stats
-tests/Auto-GPT-test-cassettes/**/**.y*ml linguist-generated
+frontend/build/* linguist-generated
-# Mark documentation as such
-docs/**.md linguist-documentation
+**/poetry.lock linguist-generated
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index d4c5c1919..110493c3f 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -1,2 +1,5 @@
-.github/workflows/ @Significant-Gravitas/maintainers
-autogpt/core @collijk
+.github/workflows/ @Significant-Gravitas/maintainers
+autogpts/autogpt/ @Pwuts
+benchmark/ @Significant-Gravitas/benchmarkers
+forge/ @Swiftyos
+frontend/ @hunteraraujo
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
deleted file mode 100644
index 54ef10844..000000000
--- a/.github/FUNDING.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-# These are supported funding model platforms
-
-github: Torantulino
diff --git a/.github/ISSUE_TEMPLATE/1.bug.yml b/.github/ISSUE_TEMPLATE/1.bug.yml
index 53c7f9da0..3b9b6d265 100644
--- a/.github/ISSUE_TEMPLATE/1.bug.yml
+++ b/.github/ISSUE_TEMPLATE/1.bug.yml
@@ -1,5 +1,5 @@
name: Bug report 🐛
-description: Create a bug report for Auto-GPT.
+description: Create a bug report for AutoGPT.
labels: ['status: needs triage']
body:
- type: markdown
@@ -13,16 +13,16 @@ body:
[backlog]: https://github.com/orgs/Significant-Gravitas/projects/1
[roadmap]: https://github.com/orgs/Significant-Gravitas/projects/2
[discord]: https://discord.gg/autogpt
- [discussions]: https://github.com/Significant-Gravitas/Auto-GPT/discussions
+ [discussions]: https://github.com/Significant-Gravitas/AutoGPT/discussions
[#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184
- [existing issues]: https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue
+ [existing issues]: https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue
[wiki page on Contributing]: https://github.com/Significant-Gravitas/Nexus/wiki/Contributing
- type: checkboxes
attributes:
label: ⚠️ Search for existing issues first ⚠️
description: >
- Please [search the history](https://github.com/Torantulino/Auto-GPT/issues)
+ Please [search the history](https://github.com/Significant-Gravitas/AutoGPT/issues)
to see if an issue already exists for the same problem.
options:
- label: I have searched the existing issues, and there is no existing issue for my problem
@@ -35,8 +35,8 @@ body:
A good rule of thumb: What would you type if you were searching for the issue?
For example:
- BAD - my auto-gpt keeps looping
- GOOD - After performing execute_python_file, auto-gpt goes into a loop where it keeps trying to execute the file.
+ BAD - my AutoGPT keeps looping
+ GOOD - After performing execute_python_file, AutoGPT goes into a loop where it keeps trying to execute the file.
⚠️ SUPER-busy repo, please help the volunteer maintainers.
The less time we spend here, the more time we can spend building AutoGPT.
@@ -54,7 +54,7 @@ body:
attributes:
label: Which Operating System are you using?
description: >
- Please select the operating system you were using to run Auto-GPT when this problem occurred.
+ Please select the operating system you were using to run AutoGPT when this problem occurred.
options:
- Windows
- Linux
@@ -73,12 +73,12 @@ body:
- type: dropdown
attributes:
- label: Which version of Auto-GPT are you using?
+ label: Which version of AutoGPT are you using?
description: |
- Please select which version of Auto-GPT you were using when this issue occurred.
- If you downloaded the code from the [releases page](https://github.com/Significant-Gravitas/Auto-GPT/releases/) make sure you were using the latest code.
- **If you weren't please try with the [latest code](https://github.com/Significant-Gravitas/Auto-GPT/releases/)**.
- If installed with git you can run `git branch` to see which version of Auto-GPT you are running.
+ Please select which version of AutoGPT you were using when this issue occurred.
+ If you downloaded the code from the [releases page](https://github.com/Significant-Gravitas/AutoGPT/releases/) make sure you were using the latest code.
+ **If you weren't please try with the [latest code](https://github.com/Significant-Gravitas/AutoGPT/releases/)**.
+ If installed with git you can run `git branch` to see which version of AutoGPT you are running.
options:
- Latest Release
- Stable (branch)
@@ -90,8 +90,8 @@ body:
attributes:
label: Do you use OpenAI GPT-3 or GPT-4?
description: >
- If you are using Auto-GPT with `--gpt3only`, your problems may be caused by
- the [limitations](https://github.com/Significant-Gravitas/Auto-GPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
+ If you are using AutoGPT with `--gpt3only`, your problems may be caused by
+ the [limitations](https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
options:
- GPT-3.5
- GPT-4
@@ -129,7 +129,7 @@ body:
- type: textarea
attributes:
label: Describe your issue.
- description: Describe the problem you are experiencing. Try to describe only the issue and phrase it short but clear. ⚠️ Provide NO other data in this field
+ description: Describe the problem you are experiencing. Try to describe only the issue and phrase it short but clear. ⚠️ Provide NO other data in this field
validations:
required: true
@@ -139,16 +139,16 @@ body:
value: |
⚠️The following is OPTIONAL, please keep in mind that the log files may contain personal information such as credentials.⚠️
- "The log files are located in the folder 'logs' inside the main auto-gpt folder."
+ "The log files are located in the folder 'logs' inside the main AutoGPT folder."
- type: textarea
attributes:
label: Upload Activity Log Content
description: |
Upload the activity log content, this can help us understand the issue better.
- To do this, go to the folder logs in your main auto-gpt folder, open activity.log and copy/paste the contents to this field.
- ⚠️ The activity log may contain personal data given to auto-gpt by you in prompt or input as well as
- any personal information that auto-gpt collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
+ To do this, go to the folder logs in your main AutoGPT folder, open activity.log and copy/paste the contents to this field.
+ ⚠️ The activity log may contain personal data given to AutoGPT by you in prompt or input as well as
+ any personal information that AutoGPT collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
validations:
required: false
@@ -157,8 +157,8 @@ body:
label: Upload Error Log Content
description: |
Upload the error log content, this will help us understand the issue better.
- To do this, go to the folder logs in your main auto-gpt folder, open error.log and copy/paste the contents to this field.
- ⚠️ The error log may contain personal data given to auto-gpt by you in prompt or input as well as
- any personal information that auto-gpt collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
+ To do this, go to the folder logs in your main AutoGPT folder, open error.log and copy/paste the contents to this field.
+ ⚠️ The error log may contain personal data given to AutoGPT by you in prompt or input as well as
+ any personal information that AutoGPT collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
validations:
required: false
diff --git a/.github/ISSUE_TEMPLATE/2.feature.yml b/.github/ISSUE_TEMPLATE/2.feature.yml
index ea6733a19..9d505cf7a 100644
--- a/.github/ISSUE_TEMPLATE/2.feature.yml
+++ b/.github/ISSUE_TEMPLATE/2.feature.yml
@@ -1,5 +1,5 @@
name: Feature request 🚀
-description: Suggest a new idea for Auto-GPT!
+description: Suggest a new idea for AutoGPT!
labels: ['status: needs triage']
body:
- type: markdown
@@ -10,7 +10,7 @@ body:
- type: checkboxes
attributes:
label: Duplicates
- description: Please [search the history](https://github.com/Torantulino/Auto-GPT/issues) to see if an issue already exists for the same problem.
+ description: Please [search the history](https://github.com/Significant-Gravitas/AutoGPT/issues) to see if an issue already exists for the same problem.
options:
- label: I have searched the existing issues
required: true
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index efb678688..19eb09eee 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,49 +1,31 @@
-<!-- ⚠️ At the moment any non-essential commands are not being merged.
-If you want to add non-essential commands to Auto-GPT, please create a plugin instead.
-We are expecting to ship plugin support within the week (PR #757).
-Resources:
-* https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template
--->
-
-<!-- 📢 Announcement
-We've recently noticed an increase in pull requests focusing on combining multiple changes. While the intentions behind these PRs are appreciated, it's essential to maintain a clean and manageable git history. To ensure the quality of our repository, we kindly ask you to adhere to the following guidelines when submitting PRs:
-
-Focus on a single, specific change.
-Do not include any unrelated or "extra" modifications.
-Provide clear documentation and explanations of the changes made.
-Ensure diffs are limited to the intended lines — no applying preferred formatting styles or line endings (unless that's what the PR is about).
-For guidance on committing only the specific lines you have changed, refer to this helpful video: https://youtu.be/8-hSNHHbiZg
-
-Check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing)
-
-By following these guidelines, your PRs are more likely to be merged quickly after testing, as long as they align with the project's overall direction. -->
-
### Background
-<!-- Provide a concise overview of the rationale behind this change. Include relevant context, prior discussions, or links to related issues. Ensure that the change aligns with the project's overall direction. -->
-### Changes
-<!-- Describe the specific, focused change made in this pull request. Detail the modifications clearly and avoid any unrelated or "extra" changes. -->
+<!-- Clearly explain the need for these changes: -->
-### Documentation
-<!-- Explain how your changes are documented, such as in-code comments or external documentation. Ensure that the documentation is clear, concise, and easy to understand. -->
+### Changes 🏗️
-### Test Plan
-<!-- Describe how you tested this functionality. Include steps to reproduce, relevant test cases, and any other pertinent information. -->
+<!-- Concisely describe all of the changes made in this pull request: -->
-### PR Quality Checklist
-- [ ] My pull request is atomic and focuses on a single change.
-- [ ] I have thoroughly tested my changes with multiple different prompts.
-- [ ] I have considered potential risks and mitigations for my changes.
-- [ ] I have documented my changes clearly and comprehensively.
-- [ ] I have not snuck in any "extra" small tweaks changes. <!-- Submit these as separate Pull Requests, they are the easiest to merge! -->
-- [ ] I have run the following commands against my code to ensure it passes our linters:
- ```shell
- black .
- isort .
- mypy
- autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests --in-place
- ```
+### PR Quality Scorecard ✨
-<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
+<!--
+Check out our contribution guide:
+https://github.com/Significant-Gravitas/Nexus/wiki/Contributing
+
+1. Avoid duplicate work, issues, PRs etc.
+2. Also consider contributing something other than code; see the [contribution guide]
+ for options.
+3. Clearly explain your changes.
+4. Avoid making unnecessary changes, especially if they're purely based on personal
+ preferences. Doing so is the maintainers' job. ;-)
+-->
-<!-- By submitting this, I agree that my pull request should be closed if I do not fill this out or follow the guidelines. -->
+- [x] Have you used the PR description template? &ensp; `+2 pts`
+- [ ] Is your pull request atomic, focusing on a single change? &ensp; `+5 pts`
+- [ ] Have you linked the GitHub issue(s) that this PR addresses? &ensp; `+5 pts`
+- [ ] Have you documented your changes clearly and comprehensively? &ensp; `+5 pts`
+- [ ] Have you changed or added a feature? &ensp; `-4 pts`
+ - [ ] Have you added/updated corresponding documentation? &ensp; `+4 pts`
+ - [ ] Have you added/updated corresponding integration tests? &ensp; `+5 pts`
+- [ ] Have you changed the behavior of AutoGPT? &ensp; `-5 pts`
+ - [ ] Have you also run `agbenchmark` to verify that these changes do not regress performance? &ensp; `+10 pts`
diff --git a/.github/workflows/autogpt-ci.yml b/.github/workflows/autogpt-ci.yml
new file mode 100644
index 000000000..2ce756a7a
--- /dev/null
+++ b/.github/workflows/autogpt-ci.yml
@@ -0,0 +1,250 @@
+name: AutoGPT Python CI
+
+on:
+ push:
+ branches: [ master, development, ci-test* ]
+ paths:
+ - '.github/workflows/autogpt-ci.yml'
+ - 'autogpts/autogpt/**'
+ - '!autogpts/autogpt/tests/vcr_cassettes'
+ pull_request:
+ branches: [ master, development, release-* ]
+ paths:
+ - '.github/workflows/autogpt-ci.yml'
+ - 'autogpts/autogpt/**'
+ - '!autogpts/autogpt/tests/vcr_cassettes'
+
+concurrency:
+ group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
+
+defaults:
+ run:
+ working-directory: autogpts/autogpt
+
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+ env:
+ min-python-version: "3.10"
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Set up Python ${{ env.min-python-version }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ env.min-python-version }}
+
+ - id: get_date
+ name: Get date
+ run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
+
+ - name: Set up Python dependency cache
+ uses: actions/cache@v3
+ with:
+ path: ~/.cache/pypoetry
+ key: ${{ runner.os }}-poetry-${{ hashFiles('autogpts/autogpt/pyproject.toml') }}-${{ steps.get_date.outputs.date }}
+
+ - name: Install Python dependencies
+ run: |
+ curl -sSL https://install.python-poetry.org | python3 -
+ poetry install
+
+ - name: Lint with flake8
+ run: poetry run flake8
+
+ - name: Check black formatting
+ run: poetry run black . --check
+ if: success() || failure()
+
+ - name: Check isort formatting
+ run: poetry run isort . --check
+ if: success() || failure()
+
+ # - name: Check mypy formatting
+ # run: poetry run mypy
+ # if: success() || failure()
+
+ # - name: Check for unused imports and pass statements
+ # run: |
+ # cmd="autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests"
+ # poetry run $cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1)
+
+ test:
+ permissions:
+ contents: read
+ runs-on: ubuntu-latest
+ timeout-minutes: 30
+ strategy:
+ matrix:
+ python-version: ["3.10"]
+
+ services:
+ minio:
+ image: minio/minio:edge-cicd
+ ports:
+ - 9000:9000
+ options: >
+ --health-interval=10s --health-timeout=5s --health-retries=3
+ --health-cmd="curl -f http://localhost:9000/minio/health/live"
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ submodules: true
+
+ - name: Configure git user Auto-GPT-Bot
+ run: |
+ git config --global user.name "Auto-GPT-Bot"
+ git config --global user.email "github-bot@agpt.co"
+
+ - name: Checkout cassettes
+ if: ${{ startsWith(github.event_name, 'pull_request') }}
+ env:
+ PR_BASE: ${{ github.event.pull_request.base.ref }}
+ PR_BRANCH: ${{ github.event.pull_request.head.ref }}
+ PR_AUTHOR: ${{ github.event.pull_request.user.login }}
+ run: |
+ cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
+ cassette_base_branch="${PR_BASE}"
+ cd tests/vcr_cassettes
+
+ if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
+ cassette_base_branch="master"
+ fi
+
+ if git ls-remote --exit-code --heads origin $cassette_branch ; then
+ git fetch origin $cassette_branch
+ git fetch origin $cassette_base_branch
+
+ git checkout $cassette_branch
+
+ # Pick non-conflicting cassette updates from the base branch
+ git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
+ echo "Using cassettes from mirror branch '$cassette_branch'," \
+ "synced to upstream branch '$cassette_base_branch'."
+ else
+ git checkout -b $cassette_branch
+ echo "Branch '$cassette_branch' does not exist in cassette submodule." \
+ "Using cassettes from '$cassette_base_branch'."
+ fi
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - id: get_date
+ name: Get date
+ run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
+
+ - name: Set up Python dependency cache
+ uses: actions/cache@v3
+ with:
+ path: ~/.cache/pypoetry
+ key: ${{ runner.os }}-poetry-${{ hashFiles('autogpts/autogpt/pyproject.toml') }}-${{ steps.get_date.outputs.date }}
+
+ - name: Install Python dependencies
+ run: |
+ curl -sSL https://install.python-poetry.org | python3 -
+ poetry install
+
+ - name: Run pytest with coverage
+ run: |
+ poetry run pytest -vv \
+ --cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
+ --numprocesses=logical --durations=10 \
+ tests/unit tests/integration
+ env:
+ CI: true
+ PLAIN_OUTPUT: True
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
+ S3_ENDPOINT_URL: http://localhost:9000
+ AWS_ACCESS_KEY_ID: minioadmin
+ AWS_SECRET_ACCESS_KEY: minioadmin
+
+ - name: Upload coverage reports to Codecov
+ uses: codecov/codecov-action@v3
+
+ - id: setup_git_auth
+ name: Set up git token authentication
+ # Cassettes may be pushed even when tests fail
+ if: success() || failure()
+ run: |
+ config_key="http.${{ github.server_url }}/.extraheader"
+ base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
+
+ git config "$config_key" \
+ "Authorization: Basic $base64_pat"
+
+ cd tests/vcr_cassettes
+ git config "$config_key" \
+ "Authorization: Basic $base64_pat"
+
+ echo "config_key=$config_key" >> $GITHUB_OUTPUT
+
+ - id: push_cassettes
+ name: Push updated cassettes
+ # For pull requests, push updated cassettes even when tests fail
+ if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure()))
+ env:
+ PR_BRANCH: ${{ github.event.pull_request.head.ref }}
+ PR_AUTHOR: ${{ github.event.pull_request.user.login }}
+ run: |
+ if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
+ is_pull_request=true
+ cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
+ else
+ cassette_branch="${{ github.ref_name }}"
+ fi
+
+ cd tests/vcr_cassettes
+ # Commit & push changes to cassettes if any
+ if ! git diff --quiet; then
+ git add .
+ git commit -m "Auto-update cassettes"
+ git push origin HEAD:$cassette_branch
+ if [ ! $is_pull_request ]; then
+ cd ../..
+ git add tests/vcr_cassettes
+ git commit -m "Update cassette submodule"
+ git push origin HEAD:$cassette_branch
+ fi
+ echo "updated=true" >> $GITHUB_OUTPUT
+ else
+ echo "updated=false" >> $GITHUB_OUTPUT
+ echo "No cassette changes to commit"
+ fi
+
+ - name: Post Set up git token auth
+ if: steps.setup_git_auth.outcome == 'success'
+ run: |
+ git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
+ git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
+
+ - name: Apply "behaviour change" label and comment on PR
+ if: ${{ startsWith(github.event_name, 'pull_request') }}
+ run: |
+ PR_NUMBER="${{ github.event.pull_request.number }}"
+ TOKEN="${{ secrets.PAT_REVIEW }}"
+ REPO="${{ github.repository }}"
+
+ if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
+ echo "Adding label and comment..."
+ echo $TOKEN | gh auth login --with-token
+ gh issue edit $PR_NUMBER --add-label "behaviour change"
+ gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
+ fi
+
+ - name: Upload logs to artifact
+ if: always()
+ uses: actions/upload-artifact@v3
+ with:
+ name: test-logs
+ path: autogpts/autogpt/logs/
diff --git a/.github/workflows/autogpt-docker-cache-clean.yml b/.github/workflows/autogpt-docker-cache-clean.yml
new file mode 100644
index 000000000..780caf44f
--- /dev/null
+++ b/.github/workflows/autogpt-docker-cache-clean.yml
@@ -0,0 +1,59 @@
+name: Purge Auto-GPT Docker CI cache
+
+on:
+ schedule:
+ - cron: 20 4 * * 1,4
+
+env:
+ BASE_BRANCH: development
+ IMAGE_NAME: auto-gpt
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ build-type: [release, dev]
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v2
+
+ - id: build
+ name: Build image
+ uses: docker/build-push-action@v3
+ with:
+ context: autogpts/autogpt
+ build-args: BUILD_TYPE=${{ matrix.build-type }}
+ load: true # save to docker images
+ # use GHA cache as read-only
+ cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
+
+ - name: Generate build report
+ env:
+ event_name: ${{ github.event_name }}
+ event_ref: ${{ github.event.schedule }}
+
+ build_type: ${{ matrix.build-type }}
+
+ prod_branch: master
+ dev_branch: development
+ repository: ${{ github.repository }}
+ base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
+
+ current_ref: ${{ github.ref_name }}
+ commit_hash: ${{ github.sha }}
+ source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
+ push_forced_label:
+
+ new_commits_json: ${{ null }}
+ compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
+
+ github_context_json: ${{ toJSON(github) }}
+ job_env_json: ${{ toJSON(env) }}
+ vars_json: ${{ toJSON(vars) }}
+
+ run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
+ continue-on-error: true
diff --git a/.github/workflows/autogpt-docker-ci.yml b/.github/workflows/autogpt-docker-ci.yml
new file mode 100644
index 000000000..dc555c381
--- /dev/null
+++ b/.github/workflows/autogpt-docker-ci.yml
@@ -0,0 +1,162 @@
+name: AutoGPT Docker CI
+
+on:
+ push:
+ branches: [ master, development ]
+ paths:
+ - '.github/workflows/autogpt-docker-ci.yml'
+ - 'autogpts/autogpt/**'
+ - '!autogpts/autogpt/tests/vcr_cassettes'
+ pull_request:
+ branches: [ master, development, release-* ]
+ paths:
+ - '.github/workflows/autogpt-docker-ci.yml'
+ - 'autogpts/autogpt/**'
+ - '!autogpts/autogpt/tests/vcr_cassettes'
+
+concurrency:
+ group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
+ cancel-in-progress: ${{ github.event_name == 'pull_request' }}
+
+defaults:
+ run:
+ working-directory: autogpts/autogpt
+
+env:
+ IMAGE_NAME: auto-gpt
+ DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt
+ DEV_IMAGE_TAG: latest-dev
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ build-type: [release, dev]
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v2
+
+ - if: runner.debug
+ run: |
+ ls -al
+ du -hs *
+
+ - id: build
+ name: Build image
+ uses: docker/build-push-action@v3
+ with:
+ context: autogpts/autogpt
+ build-args: BUILD_TYPE=${{ matrix.build-type }}
+ tags: ${{ env.IMAGE_NAME }}
+ load: true # save to docker images
+ # cache layers in GitHub Actions cache to speed up builds
+ cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
+ cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
+
+ - name: Generate build report
+ env:
+ event_name: ${{ github.event_name }}
+ event_ref: ${{ github.event.ref }}
+ event_ref_type: ${{ github.event.ref}}
+
+ build_type: ${{ matrix.build-type }}
+
+ prod_branch: master
+ dev_branch: development
+ repository: ${{ github.repository }}
+ base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
+
+ current_ref: ${{ github.ref_name }}
+ commit_hash: ${{ github.event.after }}
+ source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
+ push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
+
+ new_commits_json: ${{ toJSON(github.event.commits) }}
+ compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
+
+ github_context_json: ${{ toJSON(github) }}
+ job_env_json: ${{ toJSON(env) }}
+ vars_json: ${{ toJSON(vars) }}
+
+ run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
+ working-directory: ./
+ continue-on-error: true
+
+ test:
+ runs-on: ubuntu-latest
+ timeout-minutes: 10
+
+ services:
+ minio:
+ image: minio/minio:edge-cicd
+ options: >
+ --name=minio
+ --health-interval=10s --health-timeout=5s --health-retries=3
+ --health-cmd="curl -f http://localhost:9000/minio/health/live"
+
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v3
+ with:
+ submodules: true
+
+ - name: Log in to Docker hub
+ uses: docker/login-action@v2
+ with:
+ username: ${{ secrets.DOCKER_USER }}
+ password: ${{ secrets.DOCKER_PASSWORD }}
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v2
+
+ - id: build
+ name: Build image
+ uses: docker/build-push-action@v3
+ with:
+ context: autogpts/autogpt
+ build-args: BUILD_TYPE=dev # include pytest
+ tags: >
+ ${{ env.IMAGE_NAME }},
+ ${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
+ load: true # save to docker images
+ # cache layers in GitHub Actions cache to speed up builds
+ cache-from: type=gha,scope=autogpt-docker-dev
+ cache-to: type=gha,scope=autogpt-docker-dev,mode=max
+
+ - id: test
+ name: Run tests
+ env:
+ CI: true
+ PLAIN_OUTPUT: True
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
+ S3_ENDPOINT_URL: http://minio:9000
+ AWS_ACCESS_KEY_ID: minioadmin
+ AWS_SECRET_ACCESS_KEY: minioadmin
+ run: |
+ set +e
+ docker run --env CI --env OPENAI_API_KEY \
+ --network container:minio \
+ --env S3_ENDPOINT_URL --env AWS_ACCESS_KEY_ID --env AWS_SECRET_ACCESS_KEY \
+ --entrypoint poetry ${{ env.IMAGE_NAME }} run \
+ pytest -v --cov=autogpt --cov-branch --cov-report term-missing \
+ --numprocesses=4 --durations=10 \
+ tests/unit tests/integration 2>&1 | tee test_output.txt
+
+ test_failure=${PIPESTATUS[0]}
+
+ cat << $EOF >> $GITHUB_STEP_SUMMARY
+ # Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
+ \`\`\`
+ $(cat test_output.txt)
+ \`\`\`
+ $EOF
+
+ exit $test_failure
+
+ - if: github.event_name == 'push' && github.ref_name == 'master'
+ name: Push image to Docker Hub
+ run: docker push ${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
diff --git a/.github/workflows/autogpt-docker-release.yml b/.github/workflows/autogpt-docker-release.yml
new file mode 100644
index 000000000..4213c78b5
--- /dev/null
+++ b/.github/workflows/autogpt-docker-release.yml
@@ -0,0 +1,90 @@
+name: AutoGPT Docker Release
+
+on:
+ release:
+ types: [ published, edited ]
+
+ workflow_dispatch:
+ inputs:
+ no_cache:
+ type: boolean
+ description: 'Build from scratch, without using cached layers'
+
+defaults:
+ run:
+ working-directory: autogpts/autogpt
+
+env:
+ IMAGE_NAME: auto-gpt
+ DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt
+
+jobs:
+ build:
+ if: startsWith(github.ref, 'refs/tags/autogpt-')
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ - name: Log in to Docker hub
+ uses: docker/login-action@v2
+ with:
+ username: ${{ secrets.DOCKER_USER }}
+ password: ${{ secrets.DOCKER_PASSWORD }}
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v2
+
+ # slashes are not allowed in image tags, but can appear in git branch or tag names
+ - id: sanitize_tag
+ name: Sanitize image tag
+ run: |
+ tag=${raw_tag//\//-}
+ echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
+ env:
+ raw_tag: ${{ github.ref_name }}
+
+ - id: build
+ name: Build image
+ uses: docker/build-push-action@v3
+ with:
+ context: autogpts/autogpt
+ build-args: BUILD_TYPE=release
+ load: true # save to docker images
+ # push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
+ tags: >
+ ${{ env.IMAGE_NAME }},
+ ${{ env.DEPLOY_IMAGE_NAME }}:latest,
+ ${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
+
+ # cache layers in GitHub Actions cache to speed up builds
+ cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
+ cache-to: type=gha,scope=autogpt-docker-release,mode=max
+
+ - name: Push image to Docker Hub
+ run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
+
+ - name: Generate build report
+ env:
+ event_name: ${{ github.event_name }}
+ event_ref: ${{ github.event.ref }}
+ event_ref_type: ${{ github.event.ref}}
+ inputs_no_cache: ${{ inputs.no_cache }}
+
+ prod_branch: master
+ dev_branch: development
+ repository: ${{ github.repository }}
+ base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
+
+ ref_type: ${{ github.ref_type }}
+ current_ref: ${{ github.ref_name }}
+ commit_hash: ${{ github.sha }}
+ source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
+
+ github_context_json: ${{ toJSON(github) }}
+ job_env_json: ${{ toJSON(env) }}
+ vars_json: ${{ toJSON(vars) }}
+
+ run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
+ working-directory: ./
+ continue-on-error: true
diff --git a/.github/workflows/autogpts-ci.yml b/.github/workflows/autogpts-ci.yml
new file mode 100644
index 000000000..0385470ac
--- /dev/null
+++ b/.github/workflows/autogpts-ci.yml
@@ -0,0 +1,68 @@
+name: AutoGPTs smoke test CI
+
+on:
+ workflow_dispatch:
+ schedule:
+ - cron: '0 8 * * *'
+ push:
+ branches: [ master, development, ci-test* ]
+ paths:
+ - '.github/workflows/autogpts-ci.yml'
+ - 'autogpts/**'
+ - 'benchmark/**'
+ - 'run'
+ - 'cli.py'
+ - 'setup.py'
+ - '!**/*.md'
+ pull_request:
+ branches: [ master, development, release-* ]
+ paths:
+ - '.github/workflows/autogpts-ci.yml'
+ - 'autogpts/**'
+ - 'benchmark/**'
+ - 'run'
+ - 'cli.py'
+ - 'setup.py'
+ - '!**/*.md'
+
+jobs:
+ run-tests:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ agent-name: [ autogpt, forge ]
+ fail-fast: false
+ timeout-minutes: 20
+ env:
+ min-python-version: '3.10'
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ submodules: true
+
+ - name: Set up Python ${{ env.min-python-version }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ env.min-python-version }}
+
+ - name: Install Poetry
+ working-directory: ./autogpts/${{ matrix.agent-name }}/
+ run: |
+ curl -sSL https://install.python-poetry.org | python -
+
+ - name: Run regression tests
+ run: |
+ ./run agent start ${{ matrix.agent-name }}
+ cd autogpts/${{ matrix.agent-name }}
+ poetry run agbenchmark --mock
+ poetry run agbenchmark --test=WriteFile
+ env:
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
+ AGENT_NAME: ${{ matrix.agent-name }}
+ HELICONE_API_KEY: ${{ secrets.HELICONE_API_KEY }}
+ REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
+ HELICONE_CACHE_ENABLED: false
+ HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }}
+ REPORT_LOCATION: ${{ format('../../reports/{0}', matrix.agent-name) }}
diff --git a/.github/workflows/benchmark-ci.yml b/.github/workflows/benchmark-ci.yml
new file mode 100644
index 000000000..5f95dd025
--- /dev/null
+++ b/.github/workflows/benchmark-ci.yml
@@ -0,0 +1,140 @@
+name: Benchmark CI
+
+on:
+ push:
+ branches: [ master, development, ci-test* ]
+ paths:
+ - 'benchmark/**'
+ - .github/workflows/benchmark-ci.yml
+ - '!benchmark/reports/**'
+ pull_request:
+ branches: [ master, development, release-* ]
+ paths:
+ - 'benchmark/**'
+ - '!benchmark/reports/**'
+ - .github/workflows/benchmark-ci.yml
+
+env:
+ min-python-version: '3.10'
+
+jobs:
+ lint:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ - name: Set up Python ${{ env.min-python-version }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ env.min-python-version }}
+
+ - id: get_date
+ name: Get date
+ working-directory: ./benchmark/
+ run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
+
+ - name: Install Poetry
+ working-directory: ./benchmark/
+ run: |
+ curl -sSL https://install.python-poetry.org | python -
+
+ - name: Install dependencies
+ working-directory: ./benchmark/
+ run: |
+ export POETRY_VIRTUALENVS_IN_PROJECT=true
+ poetry install -vvv
+
+ - name: Lint with flake8
+ working-directory: ./benchmark/
+ run: poetry run flake8
+
+ - name: Check black formatting
+ working-directory: ./benchmark/
+ run: poetry run black . --exclude test.py --check
+ if: success() || failure()
+
+ - name: Check isort formatting
+ working-directory: ./benchmark/
+ run: poetry run isort . --check
+ if: success() || failure()
+
+ - name: Check for unused imports and pass statements
+ working-directory: ./benchmark/
+ run: |
+ cmd="poetry run autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring agbenchmark"
+ $cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1)
+ if: success() || failure()
+
+ tests-agbenchmark:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ agent-name: [ forge ]
+ fail-fast: false
+ timeout-minutes: 20
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ submodules: true
+
+ - name: Set up Python ${{ env.min-python-version }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ env.min-python-version }}
+
+ - name: Install Poetry
+ working-directory: ./autogpts/${{ matrix.agent-name }}/
+ run: |
+ curl -sSL https://install.python-poetry.org | python -
+
+ - name: Run regression tests
+ run: |
+ ./run agent start ${{ matrix.agent-name }}
+ sleep 10
+ cd autogpts/${{ matrix.agent-name }}
+ set +e # Ignore non-zero exit codes and continue execution
+ echo "Running the following command: poetry run agbenchmark --maintain --mock"
+
+ poetry run agbenchmark --maintain --mock
+ EXIT_CODE=$?
+ set -e # Stop ignoring non-zero exit codes
+ # Check if the exit code was 5, and if so, exit with 0 instead
+ if [ $EXIT_CODE -eq 5 ]; then
+ echo "regression_tests.json is empty."
+ fi
+
+ echo "Running the following command: poetry run agbenchmark --mock"
+ poetry run agbenchmark --mock
+
+ echo "Running the following command: poetry run agbenchmark --mock --category=data"
+ poetry run agbenchmark --mock --category=data
+
+ echo "Running the following command: poetry run agbenchmark --mock --category=coding"
+ poetry run agbenchmark --mock --category=coding
+
+ echo "Running the following command: poetry run agbenchmark --test=WriteFile"
+ poetry run agbenchmark --test=WriteFile
+ cd ../../benchmark
+ poetry install
+ echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
+ export BUILD_SKILL_TREE=true
+
+ poetry run agbenchmark --mock
+ poetry run pytest -vv -s tests
+
+ CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../frontend/assets)') || echo "No diffs"
+ if [ ! -z "$CHANGED" ]; then
+ echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
+ echo "$CHANGED"
+ exit 1
+ else
+ echo "No unstaged changes."
+ fi
+ env:
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
diff --git a/.github/workflows/benchmark_publish_package.yml b/.github/workflows/benchmark_publish_package.yml
new file mode 100644
index 000000000..de0ca66fe
--- /dev/null
+++ b/.github/workflows/benchmark_publish_package.yml
@@ -0,0 +1,55 @@
+name: Publish to PyPI
+
+on:
+ workflow_dispatch:
+
+jobs:
+ deploy:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: write
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v2
+ with:
+ submodules: true
+ fetch-depth: 0
+
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.8
+
+ - name: Install Poetry
+ working-directory: ./benchmark/
+ run: |
+ curl -sSL https://install.python-poetry.org | python3 -
+ echo "$HOME/.poetry/bin" >> $GITHUB_PATH
+
+ - name: Build project for distribution
+ working-directory: ./benchmark/
+ run: poetry build
+
+ - name: Install dependencies
+ working-directory: ./benchmark/
+ run: poetry install
+
+ - name: Check Version
+ working-directory: ./benchmark/
+ id: check-version
+ run: |
+ echo version=$(poetry version --short) >> $GITHUB_OUTPUT
+
+ - name: Create Release
+ uses: ncipollo/release-action@v1
+ with:
+ artifacts: "benchmark/dist/*"
+ token: ${{ secrets.GITHUB_TOKEN }}
+ draft: false
+ generateReleaseNotes: false
+ tag: agbenchmark-v${{ steps.check-version.outputs.version }}
+ commit: master
+
+ - name: Build and publish
+ working-directory: ./benchmark/
+ run: poetry publish -u __token__ -p ${{ secrets.PYPI_API_TOKEN }}
diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml
deleted file mode 100644
index e40abf2f6..000000000
--- a/.github/workflows/benchmarks.yml
+++ /dev/null
@@ -1,73 +0,0 @@
-name: Benchmarks
-
-on:
- schedule:
- - cron: '0 8 * * *'
- workflow_dispatch:
-
-jobs:
- Benchmark:
- name: ${{ matrix.config.task-name }}
- runs-on: ubuntu-latest
- timeout-minutes: 30
- strategy:
- fail-fast: false
- matrix:
- config:
- - python-version: "3.10"
- task: "tests/challenges"
- task-name: "Mandatory Tasks"
- - python-version: "3.10"
- task: "--beat-challenges -ra tests/challenges"
- task-name: "Challenging Tasks"
-
- steps:
- - name: Checkout repository
- uses: actions/checkout@v3
- with:
- ref: master
-
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
- with:
- python-version: ${{ matrix.config.python-version }}
-
- - id: get_date
- name: Get date
- run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
-
- - name: Set up Python dependency cache
- uses: actions/cache@v3
- with:
- path: ~/.cache/pip
- key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
-
- - name: Install Python dependencies
- run: |
- python -m pip install --upgrade pip
- pip install -r requirements.txt
-
- - name: Run pytest with coverage
- run: |
- rm -rf tests/Auto-GPT-test-cassettes
- pytest -n auto --record-mode=all ${{ matrix.config.task }}
- env:
- CI: true
- PROXY: ${{ secrets.PROXY }}
- AGENT_MODE: ${{ secrets.AGENT_MODE }}
- AGENT_TYPE: ${{ secrets.AGENT_TYPE }}
- PLAIN_OUTPUT: True
-
- - name: Upload logs as artifact
- if: always()
- uses: actions/upload-artifact@v3
- with:
- name: test-logs-${{ matrix.config.task-name }}
- path: logs/
-
- - name: Upload cassettes as artifact
- if: always()
- uses: actions/upload-artifact@v3
- with:
- name: cassettes-${{ matrix.config.task-name }}
- path: tests/Auto-GPT-test-cassettes/
diff --git a/.github/workflows/build-frontend.yml b/.github/workflows/build-frontend.yml
new file mode 100644
index 000000000..dedf67f3c
--- /dev/null
+++ b/.github/workflows/build-frontend.yml
@@ -0,0 +1,46 @@
+name: Build and Commit Frontend
+
+on:
+ push:
+ branches:
+ - master
+ - development
+ - 'ci-test*' # This will match any branch that starts with "ci-test"
+ paths:
+ - 'frontend/**'
+
+jobs:
+ build:
+ permissions:
+ contents: write
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout Repo
+ uses: actions/checkout@v2
+ - name: Setup Flutter
+ uses: subosito/flutter-action@v1
+ with:
+ flutter-version: '3.13.2'
+ - name: Build Flutter Web
+ run: |
+ cd frontend
+ flutter build web --base-href /app/
+ - name: Set branch name
+ id: vars
+ run: echo "::set-output name=branch::frontend_build_${GITHUB_SHA}"
+ - name: Commit and Push
+ run: |
+ git config --local user.email "action@github.com"
+ git config --local user.name "GitHub Action"
+ git add frontend/build/web
+ git commit -m "Update frontend build" -a
+ git checkout -b ${{ steps.vars.outputs.branch }}
+ echo "Commit hash: ${GITHUB_SHA}"
+ git push origin ${{ steps.vars.outputs.branch }}
+ # - name: Create Pull Request
+ # uses: peter-evans/create-pull-request@v3
+ # with:
+ # title: "Update frontend build"
+ # body: "This PR updates the frontend build."
+ # branch: ${{ steps.vars.outputs.branch }}
+ # base: "master"
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
deleted file mode 100644
index 710359d0e..000000000
--- a/.github/workflows/ci.yml
+++ /dev/null
@@ -1,255 +0,0 @@
-name: Python CI
-
-on:
- push:
- branches: [ master, ci-test* ]
- paths-ignore:
- - 'tests/Auto-GPT-test-cassettes'
- - 'tests/challenges/current_score.json'
- pull_request:
- branches: [ stable, master, release-* ]
- pull_request_target:
- branches: [ master, release-*, ci-test* ]
-
-concurrency:
- group: ${{ format('ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
- cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
-
-jobs:
- lint:
- # eliminate duplicate runs
- if: github.event_name == 'push' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target'))
-
- runs-on: ubuntu-latest
- env:
- min-python-version: "3.10"
-
- steps:
- - name: Checkout repository
- uses: actions/checkout@v3
- with:
- fetch-depth: 0
- ref: ${{ github.event.pull_request.head.ref }}
- repository: ${{ github.event.pull_request.head.repo.full_name }}
-
- - name: Set up Python ${{ env.min-python-version }}
- uses: actions/setup-python@v2
- with:
- python-version: ${{ env.min-python-version }}
-
- - id: get_date
- name: Get date
- run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
-
- - name: Set up Python dependency cache
- uses: actions/cache@v3
- with:
- path: ~/.cache/pip
- key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
-
- - name: Install dependencies
- run: |
- python -m pip install --upgrade pip
- pip install -r requirements.txt
-
- - name: Lint with flake8
- run: flake8
-
- - name: Check black formatting
- run: black . --check
- if: success() || failure()
-
- - name: Check isort formatting
- run: isort . --check
- if: success() || failure()
-
- - name: Check mypy formatting
- run: mypy
- if: success() || failure()
-
- - name: Check for unused imports and pass statements
- run: |
- cmd="autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests"
- $cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1)
-
- test:
- # eliminate duplicate runs
- if: github.event_name == 'push' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target'))
-
- permissions:
- # Gives the action the necessary permissions for publishing new
- # comments in pull requests.
- pull-requests: write
- # Gives the action the necessary permissions for pushing data to the
- # python-coverage-comment-action branch, and for editing existing
- # comments (to avoid publishing multiple comments in the same PR)
- contents: write
- runs-on: ubuntu-latest
- timeout-minutes: 30
- strategy:
- matrix:
- python-version: ["3.10"]
-
- steps:
- - name: Checkout repository
- uses: actions/checkout@v3
- with:
- fetch-depth: 0
- ref: ${{ github.event.pull_request.head.ref }}
- repository: ${{ github.event.pull_request.head.repo.full_name }}
- submodules: true
-
- - name: Configure git user Auto-GPT-Bot
- run: |
- git config --global user.name "Auto-GPT-Bot"
- git config --global user.email "github-bot@agpt.co"
-
- - name: Checkout cassettes
- if: ${{ startsWith(github.event_name, 'pull_request') }}
- run: |
- cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
- cd tests/Auto-GPT-test-cassettes
-
- if git ls-remote --exit-code --heads origin $cassette_branch ; then
- git fetch origin $cassette_branch
- git fetch origin ${{ github.event.pull_request.base.ref }}
-
- git checkout $cassette_branch
-
- # Pick non-conflicting cassette updates from the base branch
- git merge --no-commit --strategy-option=ours origin/${{ github.event.pull_request.base.ref }}
- echo "Using cassettes from mirror branch '$cassette_branch'," \
- "synced to upstream branch '${{ github.event.pull_request.base.ref }}'."
- else
- git checkout -b $cassette_branch
- echo "Branch '$cassette_branch' does not exist in cassette submodule." \
- "Using cassettes from '${{ github.event.pull_request.base.ref }}'."
- fi
-
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
- with:
- python-version: ${{ matrix.python-version }}
-
- - id: get_date
- name: Get date
- run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
-
- - name: Set up Python dependency cache
- uses: actions/cache@v3
- with:
- path: ~/.cache/pip
- key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
-
- - name: Install Python dependencies
- run: |
- python -m pip install --upgrade pip
- pip install -r requirements.txt
-
- - name: Run pytest with coverage
- run: |
- pytest -vv --cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
- --numprocesses=logical --durations=10 \
- tests/unit tests/integration tests/challenges
- python tests/challenges/utils/build_current_score.py
- env:
- CI: true
- PROXY: ${{ secrets.PROXY }}
- AGENT_MODE: ${{ secrets.AGENT_MODE }}
- AGENT_TYPE: ${{ secrets.AGENT_TYPE }}
- PLAIN_OUTPUT: True
-
- - name: Upload coverage reports to Codecov
- uses: codecov/codecov-action@v3
-
- - id: setup_git_auth
- name: Set up git token authentication
- # Cassettes may be pushed even when tests fail
- if: success() || failure()
- run: |
- config_key="http.${{ github.server_url }}/.extraheader"
- base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
-
- git config "$config_key" \
- "Authorization: Basic $base64_pat"
-
- cd tests/Auto-GPT-test-cassettes
- git config "$config_key" \
- "Authorization: Basic $base64_pat"
-
- echo "config_key=$config_key" >> $GITHUB_OUTPUT
-
- - name: Push updated challenge scores
- if: github.event_name == 'push'
- run: |
- score_file="tests/challenges/current_score.json"
-
- if ! git diff --quiet $score_file; then
- git add $score_file
- git commit -m "Update challenge scores"
- git push origin HEAD:${{ github.ref_name }}
- else
- echo "The challenge scores didn't change."
- fi
-
- - id: push_cassettes
- name: Push updated cassettes
- # For pull requests, push updated cassettes even when tests fail
- if: github.event_name == 'push' || success() || failure()
- run: |
- if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
- is_pull_request=true
- cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
- else
- cassette_branch="${{ github.ref_name }}"
- fi
-
- cd tests/Auto-GPT-test-cassettes
- # Commit & push changes to cassettes if any
- if ! git diff --quiet; then
- git add .
- git commit -m "Auto-update cassettes"
- git push origin HEAD:$cassette_branch
- if [ ! $is_pull_request ]; then
- cd ../..
- git add tests/Auto-GPT-test-cassettes
- git commit -m "Update cassette submodule"
- git push origin HEAD:$cassette_branch
- fi
- echo "updated=true" >> $GITHUB_OUTPUT
- else
- echo "updated=false" >> $GITHUB_OUTPUT
- echo "No cassette changes to commit"
- fi
-
- - name: Post Set up git token auth
- if: steps.setup_git_auth.outcome == 'success'
- run: |
- git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
- git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
-
- - name: Apply "behaviour change" label and comment on PR
- if: ${{ startsWith(github.event_name, 'pull_request') }}
- run: |
- PR_NUMBER=${{ github.event.pull_request.number }}
- TOKEN=${{ secrets.PAT_REVIEW }}
- REPO=${{ github.repository }}
-
- if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
- echo "Adding label and comment..."
- curl -X POST \
- -H "Authorization: Bearer $TOKEN" \
- -H "Accept: application/vnd.github.v3+json" \
- https://api.github.com/repos/$REPO/issues/$PR_NUMBER/labels \
- -d '{"labels":["behaviour change"]}'
-
- echo $TOKEN | gh auth login --with-token
- gh api repos/$REPO/issues/$PR_NUMBER/comments -X POST -F body="You changed AutoGPT's behaviour. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
- fi
-
- - name: Upload logs to artifact
- if: always()
- uses: actions/upload-artifact@v3
- with:
- name: test-logs
- path: logs/
diff --git a/.github/workflows/close-stale-issues.yml b/.github/workflows/close-stale-issues.yml
new file mode 100644
index 000000000..f8d66dcea
--- /dev/null
+++ b/.github/workflows/close-stale-issues.yml
@@ -0,0 +1,34 @@
+name: 'Close stale issues'
+on:
+ schedule:
+ - cron: '30 1 * * *'
+ workflow_dispatch:
+
+permissions:
+ issues: write
+
+jobs:
+ stale:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/stale@v8
+ with:
+ # operations-per-run: 5000
+ stale-issue-message: >
+ This issue has automatically been marked as _stale_ because it has not had
+ any activity in the last 50 days. You can _unstale_ it by commenting or
+ removing the label. Otherwise, this issue will be closed in 10 days.
+ stale-pr-message: >
+ This pull request has automatically been marked as _stale_ because it has
+ not had any activity in the last 50 days. You can _unstale_ it by commenting
+ or removing the label.
+ close-issue-message: >
+ This issue was closed automatically because it has been stale for 10 days
+ with no activity.
+ days-before-stale: 50
+ days-before-close: 10
+ # Do not touch meta issues:
+ exempt-issue-labels: meta,fridge,project management
+ # Do not affect pull requests:
+ days-before-pr-stale: -1
+ days-before-pr-close: -1
diff --git a/.github/workflows/docker-cache-clean.yml b/.github/workflows/docker-cache-clean.yml
deleted file mode 100644
index f3fc8c10b..000000000
--- a/.github/workflows/docker-cache-clean.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-name: Purge Docker CI cache
-
-on:
- schedule:
- - cron: 20 4 * * 1,4
-
-env:
- BASE_BRANCH: master
- IMAGE_NAME: auto-gpt
-
-jobs:
- build:
- runs-on: ubuntu-latest
- strategy:
- matrix:
- build-type: [release, dev]
- steps:
- - name: Checkout repository
- uses: actions/checkout@v3
-
- - name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v2
-
- - id: build
- name: Build image
- uses: docker/build-push-action@v3
- with:
- build-args: BUILD_TYPE=${{ matrix.build-type }}
- load: true # save to docker images
- # use GHA cache as read-only
- cache-to: type=gha,scope=docker-${{ matrix.build-type }},mode=max
-
- - name: Generate build report
- env:
- event_name: ${{ github.event_name }}
- event_ref: ${{ github.event.schedule }}
-
- build_type: ${{ matrix.build-type }}
-
- prod_branch: stable
- dev_branch: master
- repository: ${{ github.repository }}
- base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
-
- current_ref: ${{ github.ref_name }}
- commit_hash: ${{ github.sha }}
- source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
- push_forced_label:
-
- new_commits_json: ${{ null }}
- compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
-
- github_context_json: ${{ toJSON(github) }}
- job_env_json: ${{ toJSON(env) }}
- vars_json: ${{ toJSON(vars) }}
-
- run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
- continue-on-error: true
diff --git a/.github/workflows/docker-ci.yml b/.github/workflows/docker-ci.yml
deleted file mode 100644
index cbdd2f130..000000000
--- a/.github/workflows/docker-ci.yml
+++ /dev/null
@@ -1,124 +0,0 @@
-name: Docker CI
-
-on:
- push:
- branches: [ master ]
- paths-ignore:
- - 'tests/Auto-GPT-test-cassettes'
- - 'tests/challenges/current_score.json'
- pull_request:
- branches: [ master, release-*, stable ]
-
-concurrency:
- group: ${{ format('docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
- cancel-in-progress: ${{ github.event_name == 'pull_request' }}
-
-env:
- IMAGE_NAME: auto-gpt
-
-jobs:
- build:
- runs-on: ubuntu-latest
- strategy:
- matrix:
- build-type: [release, dev]
- steps:
- - name: Checkout repository
- uses: actions/checkout@v3
-
- - name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v2
-
- - if: runner.debug
- run: |
- ls -al
- du -hs *
-
- - id: build
- name: Build image
- uses: docker/build-push-action@v3
- with:
- build-args: BUILD_TYPE=${{ matrix.build-type }}
- tags: ${{ env.IMAGE_NAME }}
- load: true # save to docker images
- # cache layers in GitHub Actions cache to speed up builds
- cache-from: type=gha,scope=docker-${{ matrix.build-type }}
- cache-to: type=gha,scope=docker-${{ matrix.build-type }},mode=max
-
- - name: Generate build report
- env:
- event_name: ${{ github.event_name }}
- event_ref: ${{ github.event.ref }}
- event_ref_type: ${{ github.event.ref}}
-
- build_type: ${{ matrix.build-type }}
-
- prod_branch: stable
- dev_branch: master
- repository: ${{ github.repository }}
- base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
-
- current_ref: ${{ github.ref_name }}
- commit_hash: ${{ github.event.after }}
- source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
- push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
-
- new_commits_json: ${{ toJSON(github.event.commits) }}
- compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
-
- github_context_json: ${{ toJSON(github) }}
- job_env_json: ${{ toJSON(env) }}
- vars_json: ${{ toJSON(vars) }}
-
- run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
- continue-on-error: true
-
- test:
- runs-on: ubuntu-latest
- timeout-minutes: 10
- steps:
- - name: Check out repository
- uses: actions/checkout@v3
- with:
- submodules: true
-
- - name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v2
-
- - id: build
- name: Build image
- uses: docker/build-push-action@v3
- with:
- build-args: BUILD_TYPE=dev # include pytest
- tags: ${{ env.IMAGE_NAME }}
- load: true # save to docker images
- # cache layers in GitHub Actions cache to speed up builds
- cache-from: type=gha,scope=docker-dev
- cache-to: type=gha,scope=docker-dev,mode=max
-
- - id: test
- name: Run tests
- env:
- CI: true
- PLAIN_OUTPUT: True
- OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
- run: |
- set +e
- test_output=$(
- docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \
- pytest -v --cov=autogpt --cov-branch --cov-report term-missing \
- --numprocesses=4 --durations=10 \
- tests/unit tests/integration 2>&1
- )
- test_failure=$?
-
- echo "$test_output"
-
- cat << $EOF >> $GITHUB_STEP_SUMMARY
- # Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
- \`\`\`
- $test_output
- \`\`\`
- $EOF
-
- exit $test_failure
diff --git a/.github/workflows/docker-release.yml b/.github/workflows/docker-release.yml
deleted file mode 100644
index e066641f0..000000000
--- a/.github/workflows/docker-release.yml
+++ /dev/null
@@ -1,81 +0,0 @@
-name: Docker Release
-
-on:
- release:
- types: [ published, edited ]
-
- workflow_dispatch:
- inputs:
- no_cache:
- type: boolean
- description: 'Build from scratch, without using cached layers'
-
-env:
- IMAGE_NAME: auto-gpt
- DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt
-
-jobs:
- build:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repository
- uses: actions/checkout@v3
-
- - name: Log in to Docker hub
- uses: docker/login-action@v2
- with:
- username: ${{ secrets.DOCKER_USER }}
- password: ${{ secrets.DOCKER_PASSWORD }}
-
- - name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v2
-
- # slashes are not allowed in image tags, but can appear in git branch or tag names
- - id: sanitize_tag
- name: Sanitize image tag
- run: echo tag=${raw_tag//\//-} >> $GITHUB_OUTPUT
- env:
- raw_tag: ${{ github.ref_name }}
-
- - id: build
- name: Build image
- uses: docker/build-push-action@v3
- with:
- build-args: BUILD_TYPE=release
- load: true # save to docker images
- # push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
- tags: >
- ${{ env.IMAGE_NAME }},
- ${{ env.DEPLOY_IMAGE_NAME }}:latest,
- ${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
-
- # cache layers in GitHub Actions cache to speed up builds
- cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=docker-release
- cache-to: type=gha,scope=docker-release,mode=max
-
- - name: Push image to Docker Hub
- run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
-
- - name: Generate build report
- env:
- event_name: ${{ github.event_name }}
- event_ref: ${{ github.event.ref }}
- event_ref_type: ${{ github.event.ref}}
- inputs_no_cache: ${{ inputs.no_cache }}
-
- prod_branch: stable
- dev_branch: master
- repository: ${{ github.repository }}
- base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'stable' && 'master' || 'stable' }}
-
- ref_type: ${{ github.ref_type }}
- current_ref: ${{ github.ref_name }}
- commit_hash: ${{ github.sha }}
- source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
-
- github_context_json: ${{ toJSON(github) }}
- job_env_json: ${{ toJSON(env) }}
- vars_json: ${{ toJSON(vars) }}
-
- run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
- continue-on-error: true
diff --git a/.github/workflows/documentation-release.yml b/.github/workflows/documentation-release.yml
deleted file mode 100644
index ef97e1e43..000000000
--- a/.github/workflows/documentation-release.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-name: Docs
-
-on:
- push:
- branches: [ stable ]
- paths:
- - 'docs/**'
- - 'mkdocs.yml'
- - '.github/workflows/documentation.yml'
-
- # Allows you to run this workflow manually from the Actions tab
- workflow_dispatch:
-
-permissions:
- contents: write
-
-jobs:
- deploy:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repository
- uses: actions/checkout@v3
-
- - name: Set up Python 3
- uses: actions/setup-python@v4
- with:
- python-version: 3.x
-
- - name: Set up workflow cache
- uses: actions/cache@v3
- with:
- key: ${{ github.ref }}
- path: .cache
-
- - run: pip install mkdocs-material
-
- - run: mkdocs gh-deploy --force
diff --git a/.github/workflows/hackathon.yml b/.github/workflows/hackathon.yml
new file mode 100644
index 000000000..3becee799
--- /dev/null
+++ b/.github/workflows/hackathon.yml
@@ -0,0 +1,133 @@
+name: Hackathon
+
+on:
+ workflow_dispatch:
+ inputs:
+ agents:
+ description: "Agents to run (comma-separated)"
+ required: false
+ default: "autogpt" # Default agents if none are specified
+
+jobs:
+ matrix-setup:
+ runs-on: ubuntu-latest
+ # Service containers to run with `matrix-setup`
+ services:
+ # Label used to access the service container
+ postgres:
+ # Docker Hub image
+ image: postgres
+ # Provide the password for postgres
+ env:
+ POSTGRES_PASSWORD: postgres
+ # Set health checks to wait until postgres has started
+ options: >-
+ --health-cmd pg_isready
+ --health-interval 10s
+ --health-timeout 5s
+ --health-retries 5
+ ports:
+ # Maps tcp port 5432 on service container to the host
+ - 5432:5432
+ outputs:
+ matrix: ${{ steps.set-matrix.outputs.matrix }}
+ env-name: ${{ steps.set-matrix.outputs.env-name }}
+ steps:
+ - id: set-matrix
+ run: |
+ if [ "${{ github.event_name }}" == "schedule" ]; then
+ echo "::set-output name=env-name::production"
+ echo "::set-output name=matrix::[ 'irrelevant']"
+ elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
+ IFS=',' read -ra matrix_array <<< "${{ github.event.inputs.agents }}"
+ matrix_string="[ \"$(echo "${matrix_array[@]}" | sed 's/ /", "/g')\" ]"
+ echo "::set-output name=env-name::production"
+ echo "::set-output name=matrix::$matrix_string"
+ else
+ echo "::set-output name=env-name::testing"
+ echo "::set-output name=matrix::[ 'irrelevant' ]"
+ fi
+
+ tests:
+ environment:
+ name: "${{ needs.matrix-setup.outputs.env-name }}"
+ needs: matrix-setup
+ env:
+ min-python-version: "3.10"
+ name: "${{ matrix.agent-name }}"
+ runs-on: ubuntu-latest
+ services:
+ # Label used to access the service container
+ postgres:
+ # Docker Hub image
+ image: postgres
+ # Provide the password for postgres
+ env:
+ POSTGRES_PASSWORD: postgres
+ # Set health checks to wait until postgres has started
+ options: >-
+ --health-cmd pg_isready
+ --health-interval 10s
+ --health-timeout 5s
+ --health-retries 5
+ ports:
+ # Maps tcp port 5432 on service container to the host
+ - 5432:5432
+ timeout-minutes: 50
+ strategy:
+ fail-fast: false
+ matrix:
+ agent-name: ${{fromJson(needs.matrix-setup.outputs.matrix)}}
+ steps:
+ - name: Print Environment Name
+ run: |
+ echo "Matrix Setup Environment Name: ${{ needs.matrix-setup.outputs.env-name }}"
+
+ - name: Check Docker Container
+ id: check
+ run: docker ps
+
+ - name: Checkout repository
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ submodules: true
+
+ - name: Set up Python ${{ env.min-python-version }}
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ env.min-python-version }}
+
+ - id: get_date
+ name: Get date
+ run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
+
+ - name: Install Poetry
+ run: |
+ curl -sSL https://install.python-poetry.org | python -
+
+ - name: Install Node.js
+ uses: actions/setup-node@v1
+ with:
+ node-version: v18.15
+
+ - name: Run benchmark
+ run: |
+ link=$(jq -r '.["github_repo_url"]' arena/$AGENT_NAME.json)
+ branch=$(jq -r '.["branch_to_benchmark"]' arena/$AGENT_NAME.json)
+ git clone "$link" -b "$branch" "$AGENT_NAME"
+ cd $AGENT_NAME
+ cp ./autogpts/$AGENT_NAME/.env.example ./autogpts/$AGENT_NAME/.env || echo "file not found"
+ ./run agent start $AGENT_NAME
+ cd ../benchmark
+ poetry install
+ poetry run agbenchmark --no-dep
+ env:
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
+ SERP_API_KEY: ${{ secrets.SERP_API_KEY }}
+ SERPAPI_API_KEY: ${{ secrets.SERP_API_KEY }}
+ WEAVIATE_API_KEY: ${{ secrets.WEAVIATE_API_KEY }}
+ WEAVIATE_URL: ${{ secrets.WEAVIATE_URL }}
+ GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
+ GOOGLE_CUSTOM_SEARCH_ENGINE_ID: ${{ secrets.GOOGLE_CUSTOM_SEARCH_ENGINE_ID }}
+ AGENT_NAME: ${{ matrix.agent-name }}
diff --git a/.github/workflows/pr-label.yml b/.github/workflows/pr-label.yml
index ebeb7305e..512daf116 100644
--- a/.github/workflows/pr-label.yml
+++ b/.github/workflows/pr-label.yml
@@ -3,10 +3,10 @@ name: "Pull Request auto-label"
on:
# So that PRs touching the same files as the push are updated
push:
- branches: [ master, release-* ]
+ branches: [ master, development, release-* ]
paths-ignore:
- - 'tests/Auto-GPT-test-cassettes'
- - 'tests/challenges/current_score.json'
+ - 'autogpts/autogpt/tests/vcr_cassettes'
+ - 'benchmark/reports/**'
# So that the `dirtyLabel` is removed if conflicts are resolve
# We recommend `pull_request_target` so that github secrets are available.
# In `pull_request` we wouldn't be able to change labels of fork PRs
diff --git a/.github/workflows/repo-stats.yml b/.github/workflows/repo-stats.yml
new file mode 100644
index 000000000..e41885f5d
--- /dev/null
+++ b/.github/workflows/repo-stats.yml
@@ -0,0 +1,20 @@
+name: github-repo-stats
+
+on:
+ schedule:
+ # Run this once per day, towards the end of the day for keeping the most
+ # recent data point most meaningful (hours are interpreted in UTC).
+ - cron: "0 23 * * *"
+ workflow_dispatch: # Allow for running this manually.
+
+jobs:
+ j1:
+ name: github-repo-stats
+ runs-on: ubuntu-latest
+ steps:
+ - name: run-ghrs
+ # Use latest release.
+ uses: jgehrcke/github-repo-stats@HEAD
+ with:
+ ghtoken: ${{ secrets.ghrs_github_api_token }}
+
diff --git a/.gitignore b/.gitignore
index 9695cf4a1..c4f86e958 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,8 @@
## Original ignores
+.github_access_token
autogpt/keys.py
autogpt/*.json
-**/auto_gpt_workspace/*
+auto_gpt_workspace/*
*.mpeg
.env
azure.yaml
@@ -28,11 +29,8 @@ __pycache__/
# Distribution / packaging
.Python
-build/
develop-eggs/
dist/
-/plugins/
-plugins_config.yaml
downloads/
eggs/
.eggs/
@@ -160,3 +158,22 @@ openai/
# news
CURRENT_BULLETIN.md
+
+# AgBenchmark
+agbenchmark/reports/
+
+# Nodejs
+package-lock.json
+package.json
+
+# Allow for locally private items
+# private
+pri*
+# ignore
+ig*
+.github_access_token
+arena/TestAgent.json
+
+# evo.ninja
+autogpts/evo.ninja/*
+!autogpts/evo.ninja/setup
diff --git a/.gitmodules b/.gitmodules
index 55d09f842..c7d571220 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,4 +1,3 @@
-[submodule "tests/Auto-GPT-test-cassettes"]
- path = tests/Auto-GPT-test-cassettes
- url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
- branch = master \ No newline at end of file
+[submodule "autogpts/autogpt/tests/vcr_cassettes"]
+ path = autogpts/autogpt/tests/vcr_cassettes
+ url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
diff --git a/.isort.cfg b/.isort.cfg
deleted file mode 100644
index 17eab482f..000000000
--- a/.isort.cfg
+++ /dev/null
@@ -1,10 +0,0 @@
-[settings]
-profile = black
-multi_line_output = 3
-include_trailing_comma = true
-force_grid_wrap = 0
-use_parentheses = true
-ensure_newline_before_comments = true
-line_length = 88
-sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
-skip = .tox,__pycache__,*.pyc,venv*/*,reports,venv,env,node_modules,.env,.venv,dist
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
deleted file mode 100644
index cb7180d4c..000000000
--- a/.pre-commit-config.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-repos:
- - repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.4.0
- hooks:
- - id: check-added-large-files
- args: ['--maxkb=500']
- - id: check-byte-order-marker
- - id: check-case-conflict
- - id: check-merge-conflict
- - id: check-symlinks
- - id: debug-statements
-
- - repo: https://github.com/pycqa/isort
- rev: 5.12.0
- hooks:
- - id: isort
- language_version: python3.10
-
- - repo: https://github.com/psf/black
- rev: 23.3.0
- hooks:
- - id: black
- language_version: python3.10
-
- - repo: https://github.com/pre-commit/mirrors-mypy
- rev: 'v1.3.0'
- hooks:
- - id: mypy
-
- - repo: local
- hooks:
- - id: autoflake
- name: autoflake
- entry: autoflake --in-place --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests
- language: python
- types: [ python ]
- - id: pytest-check
- name: pytest-check
- entry: pytest --cov=autogpt tests/unit
- language: system
- pass_filenames: false
- always_run: true
diff --git a/BULLETIN.md b/BULLETIN.md
deleted file mode 100644
index 0b8afeba4..000000000
--- a/BULLETIN.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Website and Documentation Site 📰📖
-Check out *https://agpt.co*, the official news & updates site for Auto-GPT!
-The documentation also has a place here, at *https://docs.agpt.co*
-
-# For contributors 👷🏼
-Since releasing v0.3.0, whave been working on re-architecting the Auto-GPT core to make it more extensible and make room for structural performance-oriented R&D.
-
-Check out the contribution guide on our wiki:
-https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing
-
-# 🚀 v0.4.3 Release 🚀
-We're happy to announce the 0.4.3 maintenance release, which primarily focuses on refining the LLM command execution,
-extending support for OpenAI's latest models (including the powerful GPT-3 16k model), and laying the groundwork
-for future compatibility with OpenAI's function calling feature.
-
-Key Highlights:
-- OpenAI API Key Prompt: Auto-GPT will now courteously prompt users for their OpenAI API key, if it's not already provided.
-- Summarization Enhancements: We've optimized Auto-GPT's use of the LLM context window even further.
-- JSON Memory Reading: Support for reading memories from JSON files has been improved, resulting in enhanced task execution.
-- Deprecated commands, removed for a leaner, more performant LLM: analyze_code, write_tests, improve_code, audio_text, web_playwright, web_requests
-## Take a look at the Release Notes on Github for the full changelog!
-https://github.com/Significant-Gravitas/Auto-GPT/releases
diff --git a/CITATION.cff b/CITATION.cff
new file mode 100644
index 000000000..3f6fd8561
--- /dev/null
+++ b/CITATION.cff
@@ -0,0 +1,21 @@
+# This CITATION.cff file was generated with cffinit.
+# Visit https://bit.ly/cffinit to generate yours today!
+
+cff-version: 1.2.0
+title: AutoGPT
+message: >-
+ If you use this software, please cite it using the
+ metadata from this file.
+type: software
+authors:
+ - name: Significant Gravitas
+ website: 'https://agpt.co'
+repository-code: 'https://github.com/Significant-Gravitas/AutoGPT'
+url: 'https://agpt.co'
+abstract: >-
+ A collection of tools and experimental open-source attempts to make GPT-4 fully
+ autonomous.
+keywords:
+ - AI
+ - Agent
+license: MIT
diff --git a/CLI-USAGE.md b/CLI-USAGE.md
new file mode 100755
index 000000000..82c3ecc8d
--- /dev/null
+++ b/CLI-USAGE.md
@@ -0,0 +1,182 @@
+## CLI Documentation
+
+This document describes how to interact with the project's CLI (Command Line Interface). It includes the types of outputs you can expect from each command. Note that the `agents stop` command will terminate any process running on port 8000.
+
+### 1. Entry Point for the CLI
+
+Running the `./run` command without any parameters will display the help message, which provides a list of available commands and options. Additionally, you can append `--help` to any command to view help information specific to that command.
+
+```sh
+./run
+```
+
+**Output**:
+
+```
+Usage: cli.py [OPTIONS] COMMAND [ARGS]...
+
+Options:
+ --help Show this message and exit.
+
+Commands:
+ agent Commands to create, start and stop agents
+ benchmark Commands to start the benchmark and list tests and categories
+ setup Installs dependencies needed for your system.
+```
+
+If you need assistance with any command, simply add the `--help` parameter to the end of your command, like so:
+
+```sh
+./run COMMAND --help
+```
+
+This will display a detailed help message regarding that specific command, including a list of any additional options and arguments it accepts.
+
+### 2. Setup Command
+
+```sh
+./run setup
+```
+
+**Output**:
+
+```
+Setup initiated
+Installation has been completed.
+```
+
+This command initializes the setup of the project.
+
+### 3. Agents Commands
+
+**a. List All Agents**
+
+```sh
+./run agent list
+```
+
+**Output**:
+
+```
+Available agents: 🤖
+ 🐙 forge
+ 🐙 autogpt
+```
+
+Lists all the available agents.
+
+**b. Create a New Agent**
+
+```sh
+./run agent create my_agent
+```
+
+**Output**:
+
+```
+🎉 New agent 'my_agent' created and switched to the new directory in autogpts folder.
+```
+
+Creates a new agent named 'my_agent'.
+
+**c. Start an Agent**
+
+```sh
+./run agent start my_agent
+```
+
+**Output**:
+
+```
+... (ASCII Art representing the agent startup)
+[Date and Time] [forge.sdk.db] [DEBUG] 🐛 Initializing AgentDB with database_string: sqlite:///agent.db
+[Date and Time] [forge.sdk.agent] [INFO] 📝 Agent server starting on http://0.0.0.0:8000
+```
+
+Starts the 'my_agent' and displays startup ASCII art and logs.
+
+**d. Stop an Agent**
+
+```sh
+./run agent stop
+```
+
+**Output**:
+
+```
+Agent stopped
+```
+
+Stops the running agent.
+
+### 4. Benchmark Commands
+
+**a. List Benchmark Categories**
+
+```sh
+./run benchmark categories list
+```
+
+**Output**:
+
+```
+Available categories: 📚
+ 📖 code
+ 📖 safety
+ 📖 memory
+ ... (and so on)
+```
+
+Lists all available benchmark categories.
+
+**b. List Benchmark Tests**
+
+```sh
+./run benchmark tests list
+```
+
+**Output**:
+
+```
+Available tests: 📚
+ 📖 interface
+ 🔬 Search - TestSearch
+ 🔬 Write File - TestWriteFile
+ ... (and so on)
+```
+
+Lists all available benchmark tests.
+
+**c. Show Details of a Benchmark Test**
+
+```sh
+./run benchmark tests details TestWriteFile
+```
+
+**Output**:
+
+```
+TestWriteFile
+-------------
+
+ Category: interface
+ Task: Write the word 'Washington' to a .txt file
+ ... (and other details)
+```
+
+Displays the details of the 'TestWriteFile' benchmark test.
+
+**d. Start Benchmark for the Agent**
+
+```sh
+./run benchmark start my_agent
+```
+
+**Output**:
+
+```
+(more details about the testing process shown whilst the test are running)
+============= 13 failed, 1 passed in 0.97s ============...
+```
+
+Displays the results of the benchmark tests on 'my_agent'.
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index b1f7caf39..a92ea4bd2 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -1,12 +1,12 @@
-# Code of Conduct for Auto-GPT
+# Code of Conduct for AutoGPT
## 1. Purpose
-The purpose of this Code of Conduct is to provide guidelines for contributors to the auto-gpt project on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct.
+The purpose of this Code of Conduct is to provide guidelines for contributors to the AutoGPT projects on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct.
## 2. Scope
-This Code of Conduct applies to all contributors, maintainers, and users of the auto-gpt project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project.
+This Code of Conduct applies to all contributors, maintainers, and users of the AutoGPT project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project.
## 3. Our Standards
@@ -36,4 +36,5 @@ This Code of Conduct is adapted from the [Contributor Covenant](https://www.cont
## 6. Contact
-If you have any questions or concerns, please contact the project maintainers.
+If you have any questions or concerns, please contact the project maintainers on Discord:
+https://discord.gg/autogpt
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 13a805ff0..acea33066 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,14 +1,34 @@
-We maintain a knowledgebase at this [wiki](https://github.com/Significant-Gravitas/Nexus/wiki)
+# AutoGPT Contribution Guide
+If you are reading this, you are probably looking for our **[contribution guide]**,
+which is part of our [knowledge base].
-We would like to say "We value all contributions". After all, we are an open-source project, so we should say something fluffy like this, right?
+You can find our immediate priorities and their progress on our public [kanban board].
-However the reality is that some contributions are SUPER-valuable, while others create more trouble than they are worth and actually _create_ work for the core team.
+[contribution guide]: https://github.com/Significant-Gravitas/Nexus/wiki/Contributing
+[knowledge base]: https://github.com/Significant-Gravitas/Nexus/wiki
+[kanban board]: https://github.com/orgs/Significant-Gravitas/projects/1
-If you wish to contribute, please look through the wiki [contributing](https://github.com/Significant-Gravitas/Nexus/wiki/Contributing) page.
+## In short
+1. Avoid duplicate work, issues, PRs etc.
+2. We encourage you to collaborate with fellow community members on some of our bigger
+ [todo's][kanban board]!
+ * We highly recommend to post your idea and discuss it in the [dev channel].
+4. Create a draft PR when starting work on bigger changes.
+3. Please also consider contributing something other than code; see the
+ [contribution guide] for options.
+5. Clearly explain your changes when submitting a PR.
+6. Don't submit stuff that's broken.
+7. Avoid making unnecessary changes, especially if they're purely based on your personal
+ preferences. Doing so is the maintainers' job. ;-)
-If you wish to involve with the project (beyond just contributing PRs), please read the wiki [catalyzing](https://github.com/Significant-Gravitas/Nexus/wiki/Catalyzing) page.
+[dev channel]: https://discord.com/channels/1092243196446249134/1095817829405704305
-In fact, why not just look through the whole wiki (it's only a few pages) and hop on our discord (you'll find it in the wiki).
+If you wish to involve with the project (beyond just contributing PRs), please read the
+wiki [catalyzing](https://github.com/Significant-Gravitas/Nexus/wiki/Catalyzing) page.
-❤️ & 🔆
-The team @ Auto-GPT
+In fact, why not just look through the whole wiki (it's only a few pages) and
+hop on our Discord. See you there! :-)
+
+❤️ & 🔆
+The team @ AutoGPT
+https://discord.gg/autogpt
diff --git a/Dockerfile b/Dockerfile
deleted file mode 100644
index a31c78abf..000000000
--- a/Dockerfile
+++ /dev/null
@@ -1,46 +0,0 @@
-# 'dev' or 'release' container build
-ARG BUILD_TYPE=dev
-
-# Use an official Python base image from the Docker Hub
-FROM python:3.10-slim AS autogpt-base
-
-# Install browsers
-RUN apt-get update && apt-get install -y \
- chromium-driver firefox-esr ca-certificates \
- && apt-get clean && rm -rf /var/lib/apt/lists/*
-
-# Install utilities
-RUN apt-get update && apt-get install -y \
- curl jq wget git \
- && apt-get clean && rm -rf /var/lib/apt/lists/*
-
-# Set environment variables
-ENV PIP_NO_CACHE_DIR=yes \
- PYTHONUNBUFFERED=1 \
- PYTHONDONTWRITEBYTECODE=1
-
-# Install the required python packages globally
-ENV PATH="$PATH:/root/.local/bin"
-COPY requirements.txt .
-
-# Set the entrypoint
-ENTRYPOINT ["python", "-m", "autogpt", "--install-plugin-deps"]
-
-# dev build -> include everything
-FROM autogpt-base as autogpt-dev
-RUN pip install --no-cache-dir -r requirements.txt
-WORKDIR /app
-ONBUILD COPY . ./
-
-# release build -> include bare minimum
-FROM autogpt-base as autogpt-release
-RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \
- pip install --no-cache-dir -r requirements.txt
-WORKDIR /app
-ONBUILD COPY autogpt/ ./autogpt
-ONBUILD COPY scripts/ ./scripts
-ONBUILD COPY plugins/ ./plugins
-ONBUILD COPY prompt_settings.yaml ./prompt_settings.yaml
-ONBUILD RUN mkdir ./data
-
-FROM autogpt-${BUILD_TYPE} AS auto-gpt
diff --git a/QUICKSTART.md b/QUICKSTART.md
new file mode 100644
index 000000000..5eab892e3
--- /dev/null
+++ b/QUICKSTART.md
@@ -0,0 +1,190 @@
+# Quickstart Guide
+
+> For the complete getting started [tutorial series](https://aiedge.medium.com/autogpt-forge-e3de53cc58ec) <- click here
+
+Welcome to the Quickstart Guide! This guide will walk you through the process of setting up and running your own AutoGPT agent. Whether you're a seasoned AI developer or just starting out, this guide will provide you with the necessary steps to jumpstart your journey in the world of AI development with AutoGPT.
+
+## System Requirements
+
+This project supports Linux (Debian based), Mac, and Windows Subsystem for Linux (WSL). If you are using a Windows system, you will need to install WSL. You can find the installation instructions for WSL [here](https://learn.microsoft.com/en-us/windows/wsl/).
+
+
+## Getting Setup
+1. **Fork the Repository**
+ To fork the repository, follow these steps:
+ - Navigate to the main page of the repository.
+
+ ![Repository](docs/content/imgs/quickstart/001_repo.png)
+ - In the top-right corner of the page, click Fork.
+
+ ![Create Fork UI](docs/content/imgs/quickstart/002_fork.png)
+ - On the next page, select your GitHub account to create the fork under.
+ - Wait for the forking process to complete. You now have a copy of the repository in your GitHub account.
+
+2. **Clone the Repository**
+ To clone the repository, you need to have Git installed on your system. If you don't have Git installed, you can download it from [here](https://git-scm.com/downloads). Once you have Git installed, follow these steps:
+ - Open your terminal.
+ - Navigate to the directory where you want to clone the repository.
+ - Run the git clone command for the fork you just created
+
+ ![Clone the Repository](docs/content/imgs/quickstart/003_clone.png)
+
+ - Then open your project in your ide
+
+ ![Open the Project in your IDE](docs/content/imgs/quickstart/004_ide.png)
+
+4. **Setup the Project**
+ Next we need to setup the required dependencies. We have a tool for helping you do all the tasks you need to on the repo.
+ It can be accessed by running the `run` command by typing `./run` in the terminal.
+
+ The first command you need to use is `./run setup` This will guide you through the process of setting up your system.
+ Initially you will get instructions for installing flutter, chrome and setting up your github access token like the following image:
+
+ > Note: for advanced users. The github access token is only needed for the ./run arena enter command so the system can automatically create a PR
+
+
+ ![Setup the Project](docs/content/imgs/quickstart/005_setup.png)
+
+### For Windows Users
+
+If you're a Windows user and experience issues after installing WSL, follow the steps below to resolve them.
+
+#### Update WSL
+Run the following command in Powershell or Command Prompt to:
+1. Enable the optional WSL and Virtual Machine Platform components.
+2. Download and install the latest Linux kernel.
+3. Set WSL 2 as the default.
+4. Download and install the Ubuntu Linux distribution (a reboot may be required).
+
+```shell
+wsl --install
+```
+
+For more detailed information and additional steps, refer to [Microsoft's WSL Setup Environment Documentation](https://learn.microsoft.com/en-us/windows/wsl/setup/environment).
+
+#### Resolve FileNotFoundError or "No such file or directory" Errors
+When you run `./run setup`, if you encounter errors like `No such file or directory` or `FileNotFoundError`, it might be because Windows-style line endings (CRLF - Carriage Return Line Feed) are not compatible with Unix/Linux style line endings (LF - Line Feed).
+
+To resolve this, you can use the `dos2unix` utility to convert the line endings in your script from CRLF to LF. Here’s how to install and run `dos2unix` on the script:
+
+```shell
+sudo apt update
+sudo apt install dos2unix
+dos2unix ./run
+```
+
+After executing the above commands, running `./run setup` should work successfully.
+
+#### Store Project Files within the WSL File System
+If you continue to experience issues, consider storing your project files within the WSL file system instead of the Windows file system. This method avoids issues related to path translations and permissions and provides a more consistent development environment.
+
+ You can keep running the command to get feedback on where you are up to with your setup.
+ When setup has been completed, the command will return an output like this:
+
+ ![Setup Complete](docs/content/imgs/quickstart/006_setup_complete.png)
+
+## Creating Your Agent
+
+ Now setup has been completed its time to create your agent template.
+ Do so by running the `./run agent create YOUR_AGENT_NAME` replacing YOUR_AGENT_NAME with a name of your choice. Examples of valid names: swiftyosgpt or SwiftyosAgent or swiftyos_agent
+
+ ![Create an Agent](docs/content/imgs/quickstart/007_create_agent.png)
+
+ Upon creating your agent its time to officially enter the Arena!
+ Do so by running `./run arena enter YOUR_AGENT_NAME`
+
+ ![Enter the Arena](docs/content/imgs/quickstart/008_enter_arena.png)
+
+ > Note: for advanced users, create a new branch and create a file called YOUR_AGENT_NAME.json in the arena directory. Then commit this and create a PR to merge into the main repo. Only single file entries will be permitted. The json file needs the following format.
+ ```json
+ {
+ "github_repo_url": "https://github.com/Swiftyos/YourAgentName",
+ "timestamp": "2023-09-18T10:03:38.051498",
+ "commit_hash_to_benchmark": "ac36f7bfc7f23ad8800339fa55943c1405d80d5e",
+ "branch_to_benchmark": "master"
+ }
+ ```
+ - github_repo_url: the url to your fork
+ - timestamp: timestamp of the last update of this file
+ - commit_hash_to_benchmark: the commit hash of your entry. You update each time you have an something ready to be officially entered into the hackathon
+ - branch_to_benchmark: the branch you are using to develop your agent on, default is master.
+
+
+## Running your Agent
+
+Your agent can started using the `./run agent start YOUR_AGENT_NAME`
+
+This start the agent on `http://localhost:8000/`
+
+![Start the Agent](docs/content/imgs/quickstart/009_start_agent.png)
+
+The frontend can be accessed from `http://localhost:8000/`, you will first need to login using either a google account or your github account.
+
+![Login](docs/content/imgs/quickstart/010_login.png)
+
+Upon logging in you will get a page that looks something like this. With your task history down the left hand side of the page and the 'chat' window to send tasks to your agent.
+
+![Login](docs/content/imgs/quickstart/011_home.png)
+
+When you have finished with your agent, or if you just need to restart it, use Ctl-C to end the session then you can re-run the start command.
+
+If you are having issues and want to ensure the agent has been stopped there is a `./run agent stop` command which will kill the process using port 8000, which should be the agent.
+
+## Benchmarking your Agent
+
+The benchmarking system can also be accessed using the cli too:
+
+```bash
+agpt % ./run benchmark
+Usage: cli.py benchmark [OPTIONS] COMMAND [ARGS]...
+
+ Commands to start the benchmark and list tests and categories
+
+Options:
+ --help Show this message and exit.
+
+Commands:
+ categories Benchmark categories group command
+ start Starts the benchmark command
+ tests Benchmark tests group command
+agpt % ./run benchmark categories
+Usage: cli.py benchmark categories [OPTIONS] COMMAND [ARGS]...
+
+ Benchmark categories group command
+
+Options:
+ --help Show this message and exit.
+
+Commands:
+ list List benchmark categories command
+agpt % ./run benchmark tests
+Usage: cli.py benchmark tests [OPTIONS] COMMAND [ARGS]...
+
+ Benchmark tests group command
+
+Options:
+ --help Show this message and exit.
+
+Commands:
+ details Benchmark test details command
+ list List benchmark tests command
+```
+
+The benchmark has been split into different categories of skills you can test your agent on. You can see what categories are available with
+```bash
+./run benchmark categories list
+# And what tests are available with
+./run benchmark tests list
+```
+
+![Login](docs/content/imgs/quickstart/012_tests.png)
+
+
+Finally you can run the benchmark with
+
+```bash
+./run benchmark start YOUR_AGENT_NAME
+
+```
+
+>
diff --git a/README.md b/README.md
index 223e189e1..a139cd2e8 100644
--- a/README.md
+++ b/README.md
@@ -1,148 +1,108 @@
-# Auto-GPT: An Autonomous GPT-4 Experiment
-[![Official Website](https://img.shields.io/badge/Official%20Website-agpt.co-blue?style=flat&logo=world&logoColor=white)](https://agpt.co)
-[![Unit Tests](https://img.shields.io/github/actions/workflow/status/Significant-Gravitas/Auto-GPT/ci.yml?label=unit%20tests)](https://github.com/Significant-Gravitas/Auto-GPT/actions/workflows/ci.yml)
-[![Discord Follow](https://dcbadge.vercel.app/api/server/autogpt?style=flat)](https://discord.gg/autogpt)
-[![GitHub Repo stars](https://img.shields.io/github/stars/Significant-Gravitas/auto-gpt?style=social)](https://github.com/Significant-Gravitas/Auto-GPT/stargazers)
-[![Twitter Follow](https://img.shields.io/twitter/follow/siggravitas?style=social)](https://twitter.com/SigGravitas)
+# AutoGPT: build & use AI agents
-## 💡 Get help - [Q&A](https://github.com/Significant-Gravitas/Auto-GPT/discussions/categories/q-a) or [Discord 💬](https://discord.gg/autogpt)
+[![Discord Follow](https://dcbadge.vercel.app/api/server/autogpt?style=flat)](https://discord.gg/autogpt) &ensp;
+[![Twitter Follow](https://img.shields.io/twitter/follow/Auto_GPT?style=social)](https://twitter.com/Auto_GPT) &ensp;
+[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
-<hr/>
+**AutoGPT** is the vision of the power of AI accessible to everyone, to use and to build on. Our mission is to provide the tools, so that you can focus on what matters:
-### 🔴 USE `stable` not `master` 🔴
+- 🏗️ **Building** - Lay the foundation for something amazing.
+- 🧪 **Testing** - Fine-tune your agent to perfection.
+- 🤝 **Delegating** - Let AI work for you, and have your ideas come to life.
-**Download the latest `stable` release from here: https://github.com/Significant-Gravitas/Auto-GPT/releases/latest.**
-The `master` branch is under heavy development and may often be in a **broken** state.
+Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI innovation.
-<hr/>
+**📖 [Documentation](https://docs.agpt.co)**
+&ensp;|&ensp;
+**🚀 [Contributing](CONTRIBUTING.md)**
+&ensp;|&ensp;
+**🛠️ [Build your own Agent - Quickstart](QUICKSTART.md)**
+## 🥇 Current Best Agent: evo.ninja
+[Current Best Agent]: #-current-best-agent-evoninja
-Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI.
+The AutoGPT Arena Hackathon saw [**evo.ninja**](https://github.com/polywrap/evo.ninja) earn the top spot on our Arena Leaderboard, proving itself as the best open-source generalist agent. Try it now at https://evo.ninja!
-<h2 align="center"> Demo April 16th 2023 </h2>
+📈 To challenge evo.ninja, AutoGPT, and others, submit your benchmark run to the [Leaderboard](#-leaderboard), and maybe your agent will be up here next!
-https://user-images.githubusercontent.com/70048414/232352935-55c6bf7c-3958-406e-8610-0913475a0b05.mp4
+## 🧱 Building blocks
-Demo made by <a href=https://twitter.com/BlakeWerlinger>Blake Werlinger</a>
+### 🏗️ Forge
-## 🚀 Features
+**Forge your own agent!** &ndash; Forge is a ready-to-go template for your agent application. All the boilerplate code is already handled, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from the [`forge.sdk`](/autogpts/forge/forge/sdk) can also be used individually to speed up development and reduce boilerplate in your agent project.
-- 🌐 Internet access for searches and information gathering
-- 💾 Long-term and short-term memory management
-- 🧠 GPT-4 instances for text generation
-- 🔗 Access to popular websites and platforms
-- 🗃️ File storage and summarization with GPT-3.5
-- 🔌 Extensibility with Plugins
+🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpts/forge/tutorials/001_getting_started.md) &ndash;
+This guide will walk you through the process of creating your own agent and using the benchmark and user interface.
-## Quickstart
+📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpts/forge) about Forge
-0. Check out the [wiki](https://github.com/Significant-Gravitas/Nexus/wiki)
-1. Get an OpenAI [API Key](https://platform.openai.com/account/api-keys)
-2. Download the [latest release](https://github.com/Significant-Gravitas/Auto-GPT/releases/latest)
-3. Follow the [installation instructions][docs/setup]
-4. Configure any additional features you want, or install some [plugins][docs/plugins]
-5. [Run][docs/usage] the app
+### 🎯 Benchmark
-Please see the [documentation][docs] for full setup instructions and configuration options.
+**Measure your agent's performance!** The `agbenchmark` can be used with any agent that supports the agent protocol, and the integration with the project's [CLI] makes it even easier to use with AutoGPT and forge-based agents. The benchmark offers a stringent testing environment. Our framework allows for autonomous, objective performance evaluations, ensuring your agents are primed for real-world action.
-[docs]: https://docs.agpt.co/
+<!-- TODO: insert visual demonstrating the benchmark -->
-## 📖 Documentation
-* [⚙️ Setup][docs/setup]
-* [💻 Usage][docs/usage]
-* [🔌 Plugins][docs/plugins]
-* Configuration
- * [🔍 Web Search](https://docs.agpt.co/configuration/search/)
- * [🧠 Memory](https://docs.agpt.co/configuration/memory/)
- * [🗣️ Voice (TTS)](https://docs.agpt.co/configuration/voice/)
- * [🖼️ Image Generation](https://docs.agpt.co/configuration/imagegen/)
+📦 [`agbenchmark`](https://pypi.org/project/agbenchmark/) on Pypi
+&ensp;|&ensp;
+📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/blob/master/benchmark) about the Benchmark
-[docs/setup]: https://docs.agpt.co/setup/
-[docs/usage]: https://docs.agpt.co/usage/
-[docs/plugins]: https://docs.agpt.co/plugins/
+#### 🏆 [Leaderboard][leaderboard]
+[leaderboard]: https://leaderboard.agpt.co
-<h2 align="center"> 💖 Help Fund Auto-GPT's Development 💖</h2>
-<p align="center">
-If you can spare a coffee, you can help to cover the costs of developing Auto-GPT and help to push the boundaries of fully autonomous AI!
-Your support is greatly appreciated. Development of this free, open-source project is made possible by all the <a href="https://github.com/Significant-Gravitas/Auto-GPT/graphs/contributors">contributors</a> and <a href="https://github.com/sponsors/Torantulino">sponsors</a>. If you'd like to sponsor this project and have your avatar or company logo appear below <a href="https://github.com/sponsors/Torantulino">click here</a>.
-</p>
+Submit your benchmark run through the UI and claim your place on the AutoGPT Arena Leaderboard! The best scoring general agent earns the title of **[Current Best Agent]**, and will be adopted into our repo so people can easily run it through the [CLI].
+[![Screenshot of the AutoGPT Arena leaderboard](https://github.com/Significant-Gravitas/AutoGPT/assets/12185583/60813392-9ddb-4cca-bb44-b477dbae225d)][leaderboard]
-<p align="center">
-<div align="center" class="logo-container">
-<a href="https://www.zilliz.com/">
-<picture height="40px">
- <source media="(prefers-color-scheme: light)" srcset="https://user-images.githubusercontent.com/22963551/234158272-7917382e-ff80-469e-8d8c-94f4477b8b5a.png">
- <img src="https://user-images.githubusercontent.com/22963551/234158222-30e2d7a7-f0a9-433d-a305-e3aa0b194444.png" height="40px" alt="Zilliz" />
-</picture>
-</a>
-
-<a href="https://roost.ai">
-<img src="https://user-images.githubusercontent.com/22963551/234180283-b58cb03c-c95a-4196-93c1-28b52a388e9d.png" height="40px" alt="Roost.AI" />
-</a>
-
-<a href="https://nuclei.ai/">
-<picture height="40px">
- <source media="(prefers-color-scheme: light)" srcset="https://user-images.githubusercontent.com/22963551/234153428-24a6f31d-c0c6-4c9b-b3f4-9110148f67b4.png">
- <img src="https://user-images.githubusercontent.com/22963551/234181283-691c5d71-ca94-4646-a1cf-6e818bd86faa.png" height="40px" alt="NucleiAI" />
-</picture>
-</a>
+### 💻 UI
-<a href="https://www.algohash.org/">
-<picture>
- <source media="(prefers-color-scheme: light)" srcset="https://user-images.githubusercontent.com/22963551/234180375-1365891c-0ba6-4d49-94c3-847c85fe03b0.png" >
- <img src="https://user-images.githubusercontent.com/22963551/234180359-143e4a7a-4a71-4830-99c8-9b165cde995f.png" height="40px" alt="Algohash" />
-</picture>
-</a>
+**Makes agents easy to use!** The `frontend` gives you a user-friendly interface to control and monitor your agents. It connects to agents through the [agent protocol](#-agent-protocol), ensuring compatibility with many agents from both inside and outside of our ecosystem.
-<a href="https://github.com/weaviate/weaviate">
-<picture height="40px">
- <source media="(prefers-color-scheme: light)" srcset="https://user-images.githubusercontent.com/22963551/234181699-3d7f6ea8-5a7f-4e98-b812-37be1081be4b.png">
- <img src="https://user-images.githubusercontent.com/22963551/234181695-fc895159-b921-4895-9a13-65e6eff5b0e7.png" height="40px" alt="TypingMind" />
-</picture>
-</a>
+<!-- TODO: instert screenshot of front end -->
-<a href="https://chatgpv.com/?ref=spni76459e4fa3f30a">
-<img src="https://github-production-user-asset-6210df.s3.amazonaws.com/22963551/239132565-623a2dd6-eaeb-4941-b40f-c5a29ca6bebc.png" height="40px" alt="ChatGPV" />
-</a>
-
-</div>
-</br>
+The frontend works out-of-the-box with all agents in the repo. Just use the [CLI] to run your agent of choice!
+📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/frontend) about the Frontend
+### ⌨️ CLI
-<p align="center"><a href="https://github.com/robinicus"><img src="https://avatars.githubusercontent.com/robinicus?v=4" width="50px" alt="robinicus" /></a>&nbsp;&nbsp;<a href="https://github.com/0xmatchmaker"><img src="https://avatars.githubusercontent.com/0xmatchmaker?v=4" width="50px" alt="0xmatchmaker" /></a>&nbsp;&nbsp;<a href="https://github.com/jazgarewal"><img src="https://avatars.githubusercontent.com/jazgarewal?v=4" width="50px" alt="jazgarewal" /></a>&nbsp;&nbsp;<a href="https://github.com/MayurVirkar"><img src="https://avatars.githubusercontent.com/MayurVirkar?v=4" width="50px" alt="MayurVirkar" /></a>&nbsp;&nbsp;<a href="https://github.com/avy-ai"><img src="https://avatars.githubusercontent.com/avy-ai?v=4" width="50px" alt="avy-ai" /></a>&nbsp;&nbsp;<a href="https://github.com/TheStoneMX"><img src="https://avatars.githubusercontent.com/TheStoneMX?v=4" width="50px" alt="TheStoneMX" /></a>&nbsp;&nbsp;<a href="https://github.com/goldenrecursion"><img src="https://avatars.githubusercontent.com/goldenrecursion?v=4" width="50px" alt="goldenrecursion" /></a>&nbsp;&nbsp;<a href="https://github.com/MatthewAgs"><img src="https://avatars.githubusercontent.com/MatthewAgs?v=4" width="50px" alt="MatthewAgs" /></a>&nbsp;&nbsp;<a href="https://github.com/eelbaz"><img src="https://avatars.githubusercontent.com/eelbaz?v=4" width="50px" alt="eelbaz" /></a>&nbsp;&nbsp;<a href="https://github.com/rapidstartup"><img src="https://avatars.githubusercontent.com/rapidstartup?v=4" width="50px" alt="rapidstartup" /></a>&nbsp;&nbsp;<a href="https://github.com/gklab"><img src="https://avatars.githubusercontent.com/gklab?v=4" width="50px" alt="gklab" /></a>&nbsp;&nbsp;<a href="https://github.com/VoiceBeer"><img src="https://avatars.githubusercontent.com/VoiceBeer?v=4" width="50px" alt="VoiceBeer" /></a>&nbsp;&nbsp;<a href="https://github.com/DailyBotHQ"><img src="https://avatars.githubusercontent.com/DailyBotHQ?v=4" width="50px" alt="DailyBotHQ" /></a>&nbsp;&nbsp;<a href="https://github.com/lucas-chu"><img src="https://avatars.githubusercontent.com/lucas-chu?v=4" width="50px" alt="lucas-chu" /></a>&nbsp;&nbsp;<a href="https://github.com/knifour"><img src="https://avatars.githubusercontent.com/knifour?v=4" width="50px" alt="knifour" /></a>&nbsp;&nbsp;<a href="https://github.com/refinery1"><img src="https://avatars.githubusercontent.com/refinery1?v=4" width="50px" alt="refinery1" /></a>&nbsp;&nbsp;<a href="https://github.com/st617"><img src="https://avatars.githubusercontent.com/st617?v=4" width="50px" alt="st617" /></a>&nbsp;&nbsp;<a href="https://github.com/neodenit"><img src="https://avatars.githubusercontent.com/neodenit?v=4" width="50px" alt="neodenit" /></a>&nbsp;&nbsp;<a href="https://github.com/CrazySwami"><img src="https://avatars.githubusercontent.com/CrazySwami?v=4" width="50px" alt="CrazySwami" /></a>&nbsp;&nbsp;<a href="https://github.com/Heitechsoft"><img src="https://avatars.githubusercontent.com/Heitechsoft?v=4" width="50px" alt="Heitechsoft" /></a>&nbsp;&nbsp;<a href="https://github.com/RealChrisSean"><img src="https://avatars.githubusercontent.com/RealChrisSean?v=4" width="50px" alt="RealChrisSean" /></a>&nbsp;&nbsp;<a href="https://github.com/abhinav-pandey29"><img src="https://avatars.githubusercontent.com/abhinav-pandey29?v=4" width="50px" alt="abhinav-pandey29" /></a>&nbsp;&nbsp;<a href="https://github.com/Explorergt92"><img src="https://avatars.githubusercontent.com/Explorergt92?v=4" width="50px" alt="Explorergt92" /></a>&nbsp;&nbsp;<a href="https://github.com/SparkplanAI"><img src="https://avatars.githubusercontent.com/SparkplanAI?v=4" width="50px" alt="SparkplanAI" /></a>&nbsp;&nbsp;<a href="https://github.com/crizzler"><img src="https://avatars.githubusercontent.com/crizzler?v=4" width="50px" alt="crizzler" /></a>&nbsp;&nbsp;<a href="https://github.com/kreativai"><img src="https://avatars.githubusercontent.com/kreativai?v=4" width="50px" alt="kreativai" /></a>&nbsp;&nbsp;<a href="https://github.com/omphos"><img src="https://avatars.githubusercontent.com/omphos?v=4" width="50px" alt="omphos" /></a>&nbsp;&nbsp;<a href="https://github.com/Jahmazon"><img src="https://avatars.githubusercontent.com/Jahmazon?v=4" width="50px" alt="Jahmazon" /></a>&nbsp;&nbsp;<a href="https://github.com/tjarmain"><img src="https://avatars.githubusercontent.com/tjarmain?v=4" width="50px" alt="tjarmain" /></a>&nbsp;&nbsp;<a href="https://github.com/ddtarazona"><img src="https://avatars.githubusercontent.com/ddtarazona?v=4" width="50px" alt="ddtarazona" /></a>&nbsp;&nbsp;<a href="https://github.com/saten-private"><img src="https://avatars.githubusercontent.com/saten-private?v=4" width="50px" alt="saten-private" /></a>&nbsp;&nbsp;<a href="https://github.com/anvarazizov"><img src="https://avatars.githubusercontent.com/anvarazizov?v=4" width="50px" alt="anvarazizov" /></a>&nbsp;&nbsp;<a href="https://github.com/lazzacapital"><img src="https://avatars.githubusercontent.com/lazzacapital?v=4" width="50px" alt="lazzacapital" /></a>&nbsp;&nbsp;<a href="https://github.com/m"><img src="https://avatars.githubusercontent.com/m?v=4" width="50px" alt="m" /></a>&nbsp;&nbsp;<a href="https://github.com/Pythagora-io"><img src="https://avatars.githubusercontent.com/Pythagora-io?v=4" width="50px" alt="Pythagora-io" /></a>&nbsp;&nbsp;<a href="https://github.com/Web3Capital"><img src="https://avatars.githubusercontent.com/Web3Capital?v=4" width="50px" alt="Web3Capital" /></a>&nbsp;&nbsp;<a href="https://github.com/toverly1"><img src="https://avatars.githubusercontent.com/toverly1?v=4" width="50px" alt="toverly1" /></a>&nbsp;&nbsp;<a href="https://github.com/digisomni"><img src="https://avatars.githubusercontent.com/digisomni?v=4" width="50px" alt="digisomni" /></a>&nbsp;&nbsp;<a href="https://github.com/concreit"><img src="https://avatars.githubusercontent.com/concreit?v=4" width="50px" alt="concreit" /></a>&nbsp;&nbsp;<a href="https://github.com/LeeRobidas"><img src="https://avatars.githubusercontent.com/LeeRobidas?v=4" width="50px" alt="LeeRobidas" /></a>&nbsp;&nbsp;<a href="https://github.com/Josecodesalot"><img src="https://avatars.githubusercontent.com/Josecodesalot?v=4" width="50px" alt="Josecodesalot" /></a>&nbsp;&nbsp;<a href="https://github.com/dexterityx"><img src="https://avatars.githubusercontent.com/dexterityx?v=4" width="50px" alt="dexterityx" /></a>&nbsp;&nbsp;<a href="https://github.com/rickscode"><img src="https://avatars.githubusercontent.com/rickscode?v=4" width="50px" alt="rickscode" /></a>&nbsp;&nbsp;<a href="https://github.com/Brodie0"><img src="https://avatars.githubusercontent.com/Brodie0?v=4" width="50px" alt="Brodie0" /></a>&nbsp;&nbsp;<a href="https://github.com/FSTatSBS"><img src="https://avatars.githubusercontent.com/FSTatSBS?v=4" width="50px" alt="FSTatSBS" /></a>&nbsp;&nbsp;<a href="https://github.com/nocodeclarity"><img src="https://avatars.githubusercontent.com/nocodeclarity?v=4" width="50px" alt="nocodeclarity" /></a>&nbsp;&nbsp;<a href="https://github.com/jsolejr"><img src="https://avatars.githubusercontent.com/jsolejr?v=4" width="50px" alt="jsolejr" /></a>&nbsp;&nbsp;<a href="https://github.com/amr-elsehemy"><img src="https://avatars.githubusercontent.com/amr-elsehemy?v=4" width="50px" alt="amr-elsehemy" /></a>&nbsp;&nbsp;<a href="https://github.com/RawBanana"><img src="https://avatars.githubusercontent.com/RawBanana?v=4" width="50px" alt="RawBanana" /></a>&nbsp;&nbsp;<a href="https://github.com/horazius"><img src="https://avatars.githubusercontent.com/horazius?v=4" width="50px" alt="horazius" /></a>&nbsp;&nbsp;<a href="https://github.com/SwftCoins"><img src="https://avatars.githubusercontent.com/SwftCoins?v=4" width="50px" alt="SwftCoins" /></a>&nbsp;&nbsp;<a href="https://github.com/tob-le-rone"><img src="https://avatars.githubusercontent.com/tob-le-rone?v=4" width="50px" alt="tob-le-rone" /></a>&nbsp;&nbsp;<a href="https://github.com/RThaweewat"><img src="https://avatars.githubusercontent.com/RThaweewat?v=4" width="50px" alt="RThaweewat" /></a>&nbsp;&nbsp;<a href="https://github.com/jun784"><img src="https://avatars.githubusercontent.com/jun784?v=4" width="50px" alt="jun784" /></a>&nbsp;&nbsp;<a href="https://github.com/joaomdmoura"><img src="https://avatars.githubusercontent.com/joaomdmoura?v=4" width="50px" alt="joaomdmoura" /></a>&nbsp;&nbsp;<a href="https://github.com/rejunity"><img src="https://avatars.githubusercontent.com/rejunity?v=4" width="50px" alt="rejunity" /></a>&nbsp;&nbsp;<a href="https://github.com/mathewhawkins"><img src="https://avatars.githubusercontent.com/mathewhawkins?v=4" width="50px" alt="mathewhawkins" /></a>&nbsp;&nbsp;<a href="https://github.com/caitlynmeeks"><img src="https://avatars.githubusercontent.com/caitlynmeeks?v=4" width="50px" alt="caitlynmeeks" /></a>&nbsp;&nbsp;<a href="https://github.com/jd3655"><img src="https://avatars.githubusercontent.com/jd3655?v=4" width="50px" alt="jd3655" /></a>&nbsp;&nbsp;<a href="https://github.com/Odin519Tomas"><img src="https://avatars.githubusercontent.com/Odin519Tomas?v=4" width="50px" alt="Odin519Tomas" /></a>&nbsp;&nbsp;<a href="https://github.com/DataMetis"><img src="https://avatars.githubusercontent.com/DataMetis?v=4" width="50px" alt="DataMetis" /></a>&nbsp;&nbsp;<a href="https://github.com/webbcolton"><img src="https://avatars.githubusercontent.com/webbcolton?v=4" width="50px" alt="webbcolton" /></a>&nbsp;&nbsp;<a href="https://github.com/rocks6"><img src="https://avatars.githubusercontent.com/rocks6?v=4" width="50px" alt="rocks6" /></a>&nbsp;&nbsp;<a href="https://github.com/cxs"><img src="https://avatars.githubusercontent.com/cxs?v=4" width="50px" alt="cxs" /></a>&nbsp;&nbsp;<a href="https://github.com/fruition"><img src="https://avatars.githubusercontent.com/fruition?v=4" width="50px" alt="fruition" /></a>&nbsp;&nbsp;<a href="https://github.com/nnkostov"><img src="https://avatars.githubusercontent.com/nnkostov?v=4" width="50px" alt="nnkostov" /></a>&nbsp;&nbsp;<a href="https://github.com/morcos"><img src="https://avatars.githubusercontent.com/morcos?v=4" width="50px" alt="morcos" /></a>&nbsp;&nbsp;<a href="https://github.com/pingbotan"><img src="https://avatars.githubusercontent.com/pingbotan?v=4" width="50px" alt="pingbotan" /></a>&nbsp;&nbsp;<a href="https://github.com/maxxflyer"><img src="https://avatars.githubusercontent.com/maxxflyer?v=4" width="50px" alt="maxxflyer" /></a>&nbsp;&nbsp;<a href="https://github.com/tommi-joentakanen"><img src="https://avatars.githubusercontent.com/tommi-joentakanen?v=4" width="50px" alt="tommi-joentakanen" /></a>&nbsp;&nbsp;<a href="https://github.com/hunteraraujo"><img src="https://avatars.githubusercontent.com/hunteraraujo?v=4" width="50px" alt="hunteraraujo" /></a>&nbsp;&nbsp;<a href="https://github.com/projectonegames"><img src="https://avatars.githubusercontent.com/projectonegames?v=4" width="50px" alt="projectonegames" /></a>&nbsp;&nbsp;<a href="https://github.com/tullytim"><img src="https://avatars.githubusercontent.com/tullytim?v=4" width="50px" alt="tullytim" /></a>&nbsp;&nbsp;<a href="https://github.com/comet-ml"><img src="https://avatars.githubusercontent.com/comet-ml?v=4" width="50px" alt="comet-ml" /></a>&nbsp;&nbsp;<a href="https://github.com/thepok"><img src="https://avatars.githubusercontent.com/thepok?v=4" width="50px" alt="thepok" /></a>&nbsp;&nbsp;<a href="https://github.com/prompthero"><img src="https://avatars.githubusercontent.com/prompthero?v=4" width="50px" alt="prompthero" /></a>&nbsp;&nbsp;<a href="https://github.com/sunchongren"><img src="https://avatars.githubusercontent.com/sunchongren?v=4" width="50px" alt="sunchongren" /></a>&nbsp;&nbsp;<a href="https://github.com/neverinstall"><img src="https://avatars.githubusercontent.com/neverinstall?v=4" width="50px" alt="neverinstall" /></a>&nbsp;&nbsp;<a href="https://github.com/josephcmiller2"><img src="https://avatars.githubusercontent.com/josephcmiller2?v=4" width="50px" alt="josephcmiller2" /></a>&nbsp;&nbsp;<a href="https://github.com/yx3110"><img src="https://avatars.githubusercontent.com/yx3110?v=4" width="50px" alt="yx3110" /></a>&nbsp;&nbsp;<a href="https://github.com/MBassi91"><img src="https://avatars.githubusercontent.com/MBassi91?v=4" width="50px" alt="MBassi91" /></a>&nbsp;&nbsp;<a href="https://github.com/SpacingLily"><img src="https://avatars.githubusercontent.com/SpacingLily?v=4" width="50px" alt="SpacingLily" /></a>&nbsp;&nbsp;<a href="https://github.com/arthur-x88"><img src="https://avatars.githubusercontent.com/arthur-x88?v=4" width="50px" alt="arthur-x88" /></a>&nbsp;&nbsp;<a href="https://github.com/ciscodebs"><img src="https://avatars.githubusercontent.com/ciscodebs?v=4" width="50px" alt="ciscodebs" /></a>&nbsp;&nbsp;<a href="https://github.com/christian-gheorghe"><img src="https://avatars.githubusercontent.com/christian-gheorghe?v=4" width="50px" alt="christian-gheorghe" /></a>&nbsp;&nbsp;<a href="https://github.com/EngageStrategies"><img src="https://avatars.githubusercontent.com/EngageStrategies?v=4" width="50px" alt="EngageStrategies" /></a>&nbsp;&nbsp;<a href="https://github.com/jondwillis"><img src="https://avatars.githubusercontent.com/jondwillis?v=4" width="50px" alt="jondwillis" /></a>&nbsp;&nbsp;<a href="https://github.com/Cameron-Fulton"><img src="https://avatars.githubusercontent.com/Cameron-Fulton?v=4" width="50px" alt="Cameron-Fulton" /></a>&nbsp;&nbsp;<a href="https://github.com/AryaXAI"><img src="https://avatars.githubusercontent.com/AryaXAI?v=4" width="50px" alt="AryaXAI" /></a>&nbsp;&nbsp;<a href="https://github.com/AuroraHolding"><img src="https://avatars.githubusercontent.com/AuroraHolding?v=4" width="50px" alt="AuroraHolding" /></a>&nbsp;&nbsp;<a href="https://github.com/Mr-Bishop42"><img src="https://avatars.githubusercontent.com/Mr-Bishop42?v=4" width="50px" alt="Mr-Bishop42" /></a>&nbsp;&nbsp;<a href="https://github.com/doverhq"><img src="https://avatars.githubusercontent.com/doverhq?v=4" width="50px" alt="doverhq" /></a>&nbsp;&nbsp;<a href="https://github.com/johnculkin"><img src="https://avatars.githubusercontent.com/johnculkin?v=4" width="50px" alt="johnculkin" /></a>&nbsp;&nbsp;<a href="https://github.com/marv-technology"><img src="https://avatars.githubusercontent.com/marv-technology?v=4" width="50px" alt="marv-technology" /></a>&nbsp;&nbsp;<a href="https://github.com/ikarosai"><img src="https://avatars.githubusercontent.com/ikarosai?v=4" width="50px" alt="ikarosai" /></a>&nbsp;&nbsp;<a href="https://github.com/ColinConwell"><img src="https://avatars.githubusercontent.com/ColinConwell?v=4" width="50px" alt="ColinConwell" /></a>&nbsp;&nbsp;<a href="https://github.com/humungasaurus"><img src="https://avatars.githubusercontent.com/humungasaurus?v=4" width="50px" alt="humungasaurus" /></a>&nbsp;&nbsp;<a href="https://github.com/terpsfreak"><img src="https://avatars.githubusercontent.com/terpsfreak?v=4" width="50px" alt="terpsfreak" /></a>&nbsp;&nbsp;<a href="https://github.com/iddelacruz"><img src="https://avatars.githubusercontent.com/iddelacruz?v=4" width="50px" alt="iddelacruz" /></a>&nbsp;&nbsp;<a href="https://github.com/thisisjeffchen"><img src="https://avatars.githubusercontent.com/thisisjeffchen?v=4" width="50px" alt="thisisjeffchen" /></a>&nbsp;&nbsp;<a href="https://github.com/nicoguyon"><img src="https://avatars.githubusercontent.com/nicoguyon?v=4" width="50px" alt="nicoguyon" /></a>&nbsp;&nbsp;<a href="https://github.com/arjunb023"><img src="https://avatars.githubusercontent.com/arjunb023?v=4" width="50px" alt="arjunb023" /></a>&nbsp;&nbsp;<a href="https://github.com/Nalhos"><img src="https://avatars.githubusercontent.com/Nalhos?v=4" width="50px" alt="Nalhos" /></a>&nbsp;&nbsp;<a href="https://github.com/belharethsami"><img src="https://avatars.githubusercontent.com/belharethsami?v=4" width="50px" alt="belharethsami" /></a>&nbsp;&nbsp;<a href="https://github.com/Mobivs"><img src="https://avatars.githubusercontent.com/Mobivs?v=4" width="50px" alt="Mobivs" /></a>&nbsp;&nbsp;<a href="https://github.com/txtr99"><img src="https://avatars.githubusercontent.com/txtr99?v=4" width="50px" alt="txtr99" /></a>&nbsp;&nbsp;<a href="https://github.com/ntwrite"><img src="https://avatars.githubusercontent.com/ntwrite?v=4" width="50px" alt="ntwrite" /></a>&nbsp;&nbsp;<a href="https://github.com/founderblocks-sils"><img src="https://avatars.githubusercontent.com/founderblocks-sils?v=4" width="50px" alt="founderblocks-sils" /></a>&nbsp;&nbsp;<a href="https://github.com/kMag410"><img src="https://avatars.githubusercontent.com/kMag410?v=4" width="50px" alt="kMag410" /></a>&nbsp;&nbsp;<a href="https://github.com/angiaou"><img src="https://avatars.githubusercontent.com/angiaou?v=4" width="50px" alt="angiaou" /></a>&nbsp;&nbsp;<a href="https://github.com/garythebat"><img src="https://avatars.githubusercontent.com/garythebat?v=4" width="50px" alt="garythebat" /></a>&nbsp;&nbsp;<a href="https://github.com/lmaugustin"><img src="https://avatars.githubusercontent.com/lmaugustin?v=4" width="50px" alt="lmaugustin" /></a>&nbsp;&nbsp;<a href="https://github.com/shawnharmsen"><img src="https://avatars.githubusercontent.com/shawnharmsen?v=4" width="50px" alt="shawnharmsen" /></a>&nbsp;&nbsp;<a href="https://github.com/clortegah"><img src="https://avatars.githubusercontent.com/clortegah?v=4" width="50px" alt="clortegah" /></a>&nbsp;&nbsp;<a href="https://github.com/MetaPath01"><img src="https://avatars.githubusercontent.com/MetaPath01?v=4" width="50px" alt="MetaPath01" /></a>&nbsp;&nbsp;<a href="https://github.com/sekomike910"><img src="https://avatars.githubusercontent.com/sekomike910?v=4" width="50px" alt="sekomike910" /></a>&nbsp;&nbsp;<a href="https://github.com/MediConCenHK"><img src="https://avatars.githubusercontent.com/MediConCenHK?v=4" width="50px" alt="MediConCenHK" /></a>&nbsp;&nbsp;<a href="https://github.com/svpermari0"><img src="https://avatars.githubusercontent.com/svpermari0?v=4" width="50px" alt="svpermari0" /></a>&nbsp;&nbsp;<a href="https://github.com/jacobyoby"><img src="https://avatars.githubusercontent.com/jacobyoby?v=4" width="50px" alt="jacobyoby" /></a>&nbsp;&nbsp;<a href="https://github.com/turintech"><img src="https://avatars.githubusercontent.com/turintech?v=4" width="50px" alt="turintech" /></a>&nbsp;&nbsp;<a href="https://github.com/allenstecat"><img src="https://avatars.githubusercontent.com/allenstecat?v=4" width="50px" alt="allenstecat" /></a>&nbsp;&nbsp;<a href="https://github.com/CatsMeow492"><img src="https://avatars.githubusercontent.com/CatsMeow492?v=4" width="50px" alt="CatsMeow492" /></a>&nbsp;&nbsp;<a href="https://github.com/tommygeee"><img src="https://avatars.githubusercontent.com/tommygeee?v=4" width="50px" alt="tommygeee" /></a>&nbsp;&nbsp;<a href="https://github.com/judegomila"><img src="https://avatars.githubusercontent.com/judegomila?v=4" width="50px" alt="judegomila" /></a>&nbsp;&nbsp;<a href="https://github.com/cfarquhar"><img src="https://avatars.githubusercontent.com/cfarquhar?v=4" width="50px" alt="cfarquhar" /></a>&nbsp;&nbsp;<a href="https://github.com/ZoneSixGames"><img src="https://avatars.githubusercontent.com/ZoneSixGames?v=4" width="50px" alt="ZoneSixGames" /></a>&nbsp;&nbsp;<a href="https://github.com/kenndanielso"><img src="https://avatars.githubusercontent.com/kenndanielso?v=4" width="50px" alt="kenndanielso" /></a>&nbsp;&nbsp;<a href="https://github.com/CrypteorCapital"><img src="https://avatars.githubusercontent.com/CrypteorCapital?v=4" width="50px" alt="CrypteorCapital" /></a>&nbsp;&nbsp;<a href="https://github.com/sultanmeghji"><img src="https://avatars.githubusercontent.com/sultanmeghji?v=4" width="50px" alt="sultanmeghji" /></a>&nbsp;&nbsp;<a href="https://github.com/jenius-eagle"><img src="https://avatars.githubusercontent.com/jenius-eagle?v=4" width="50px" alt="jenius-eagle" /></a>&nbsp;&nbsp;<a href="https://github.com/josephjacks"><img src="https://avatars.githubusercontent.com/josephjacks?v=4" width="50px" alt="josephjacks" /></a>&nbsp;&nbsp;<a href="https://github.com/pingshian0131"><img src="https://avatars.githubusercontent.com/pingshian0131?v=4" width="50px" alt="pingshian0131" /></a>&nbsp;&nbsp;<a href="https://github.com/AIdevelopersAI"><img src="https://avatars.githubusercontent.com/AIdevelopersAI?v=4" width="50px" alt="AIdevelopersAI" /></a>&nbsp;&nbsp;<a href="https://github.com/ternary5"><img src="https://avatars.githubusercontent.com/ternary5?v=4" width="50px" alt="ternary5" /></a>&nbsp;&nbsp;<a href="https://github.com/ChrisDMT"><img src="https://avatars.githubusercontent.com/ChrisDMT?v=4" width="50px" alt="ChrisDMT" /></a>&nbsp;&nbsp;<a href="https://github.com/AcountoOU"><img src="https://avatars.githubusercontent.com/AcountoOU?v=4" width="50px" alt="AcountoOU" /></a>&nbsp;&nbsp;<a href="https://github.com/chatgpt-prompts"><img src="https://avatars.githubusercontent.com/chatgpt-prompts?v=4" width="50px" alt="chatgpt-prompts" /></a>&nbsp;&nbsp;<a href="https://github.com/Partender"><img src="https://avatars.githubusercontent.com/Partender?v=4" width="50px" alt="Partender" /></a>&nbsp;&nbsp;<a href="https://github.com/Daniel1357"><img src="https://avatars.githubusercontent.com/Daniel1357?v=4" width="50px" alt="Daniel1357" /></a>&nbsp;&nbsp;<a href="https://github.com/KiaArmani"><img src="https://avatars.githubusercontent.com/KiaArmani?v=4" width="50px" alt="KiaArmani" /></a>&nbsp;&nbsp;<a href="https://github.com/zkonduit"><img src="https://avatars.githubusercontent.com/zkonduit?v=4" width="50px" alt="zkonduit" /></a>&nbsp;&nbsp;<a href="https://github.com/fabrietech"><img src="https://avatars.githubusercontent.com/fabrietech?v=4" width="50px" alt="fabrietech" /></a>&nbsp;&nbsp;<a href="https://github.com/scryptedinc"><img src="https://avatars.githubusercontent.com/scryptedinc?v=4" width="50px" alt="scryptedinc" /></a>&nbsp;&nbsp;<a href="https://github.com/coreyspagnoli"><img src="https://avatars.githubusercontent.com/coreyspagnoli?v=4" width="50px" alt="coreyspagnoli" /></a>&nbsp;&nbsp;<a href="https://github.com/AntonioCiolino"><img src="https://avatars.githubusercontent.com/AntonioCiolino?v=4" width="50px" alt="AntonioCiolino" /></a>&nbsp;&nbsp;<a href="https://github.com/Dradstone"><img src="https://avatars.githubusercontent.com/Dradstone?v=4" width="50px" alt="Dradstone" /></a>&nbsp;&nbsp;<a href="https://github.com/CarmenCocoa"><img src="https://avatars.githubusercontent.com/CarmenCocoa?v=4" width="50px" alt="CarmenCocoa" /></a>&nbsp;&nbsp;<a href="https://github.com/bentoml"><img src="https://avatars.githubusercontent.com/bentoml?v=4" width="50px" alt="bentoml" /></a>&nbsp;&nbsp;<a href="https://github.com/merwanehamadi"><img src="https://avatars.githubusercontent.com/merwanehamadi?v=4" width="50px" alt="merwanehamadi" /></a>&nbsp;&nbsp;<a href="https://github.com/vkozacek"><img src="https://avatars.githubusercontent.com/vkozacek?v=4" width="50px" alt="vkozacek" /></a>&nbsp;&nbsp;<a href="https://github.com/ASmithOWL"><img src="https://avatars.githubusercontent.com/ASmithOWL?v=4" width="50px" alt="ASmithOWL" /></a>&nbsp;&nbsp;<a href="https://github.com/tekelsey"><img src="https://avatars.githubusercontent.com/tekelsey?v=4" width="50px" alt="tekelsey" /></a>&nbsp;&nbsp;<a href="https://github.com/GalaxyVideoAgency"><img src="https://avatars.githubusercontent.com/GalaxyVideoAgency?v=4" width="50px" alt="GalaxyVideoAgency" /></a>&nbsp;&nbsp;<a href="https://github.com/wenfengwang"><img src="https://avatars.githubusercontent.com/wenfengwang?v=4" width="50px" alt="wenfengwang" /></a>&nbsp;&nbsp;<a href="https://github.com/rviramontes"><img src="https://avatars.githubusercontent.com/rviramontes?v=4" width="50px" alt="rviramontes" /></a>&nbsp;&nbsp;<a href="https://github.com/indoor47"><img src="https://avatars.githubusercontent.com/indoor47?v=4" width="50px" alt="indoor47" /></a>&nbsp;&nbsp;<a href="https://github.com/ZERO-A-ONE"><img src="https://avatars.githubusercontent.com/ZERO-A-ONE?v=4" width="50px" alt="ZERO-A-ONE" /></a>&nbsp;&nbsp;</p>
+[CLI]: #-cli
-## ⚠️ Limitations
+To make it as easy as possible to use all of the tools offered by the repository, a CLI is included at the root of the repo:
-This experiment aims to showcase the potential of GPT-4 but comes with some limitations:
+```shell
+$ ./run
+Usage: cli.py [OPTIONS] COMMAND [ARGS]...
-1. Not a polished application or product, just an experiment
-2. May not perform well in complex, real-world business scenarios. In fact, if it actually does, please share your results!
-3. Quite expensive to run, so set and monitor your API key limits with OpenAI!
+Options:
+ --help Show this message and exit.
-## 🛡 Disclaimer
+Commands:
+ agent Commands to create, start and stop agents
+ arena Commands to enter the arena
+ benchmark Commands to start the benchmark and list tests and categories
+ setup Installs dependencies needed for your system.
+```
-This project, Auto-GPT, is an experimental application and is provided "as-is" without any warranty, express or implied. By using this software, you agree to assume all risks associated with its use, including but not limited to data loss, system failure, or any other issues that may arise.
+Just clone the repo, install dependencies with `./run setup`, and you should be good to go!
-The developers and contributors of this project do not accept any responsibility or liability for any losses, damages, or other consequences that may occur as a result of using this software. You are solely responsible for any decisions and actions taken based on the information provided by Auto-GPT.
+## 🤔 Questions? Problems? Suggestions?
-**Please note that the use of the GPT-4 language model can be expensive due to its token usage.** By utilizing this project, you acknowledge that you are responsible for monitoring and managing your own token usage and the associated costs. It is highly recommended to check your OpenAI API usage regularly and set up any necessary limits or alerts to prevent unexpected charges.
+### Get help - [Discord 💬](https://discord.gg/autogpt)
-As an autonomous experiment, Auto-GPT may generate content or take actions that are not in line with real-world business practices or legal requirements. It is your responsibility to ensure that any actions or decisions made based on the output of this software comply with all applicable laws, regulations, and ethical standards. The developers and contributors of this project shall not be held responsible for any consequences arising from the use of this software.
+[![Join us on Discord](https://invidget.switchblade.xyz/autogpt)](https://discord.gg/autogpt)
-By using Auto-GPT, you agree to indemnify, defend, and hold harmless the developers, contributors, and any affiliated parties from and against any and all claims, damages, losses, liabilities, costs, and expenses (including reasonable attorneys' fees) arising from your use of this software or your violation of these terms.
+To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasn’t created an issue for the same topic.
-## 🐦 Connect with Us on Twitter
+## 🤝 Sister projects
-Stay up-to-date with the latest news, updates, and insights about Auto-GPT by following our Twitter accounts. Engage with the developer and the AI's own account for interesting discussions, project updates, and more.
+### 🔄 Agent Protocol
-- **Developer**: Follow [@siggravitas](https://twitter.com/siggravitas) for insights into the development process, project updates, and related topics from the creator of Entrepreneur-GPT.
+To maintain a uniform standard and ensure seamless compatibility with many current and future applications, AutoGPT employs the [agent protocol](https://agentprotocol.ai/) standard by the AI Engineer Foundation. This standardizes the communication pathways from your agent to the frontend and benchmark.
-We look forward to connecting with you and hearing your thoughts, ideas, and experiences with Auto-GPT. Join us on Twitter and let's explore the future of AI together!
+---
<p align="center">
- <a href="https://star-history.com/#Torantulino/auto-gpt&Date">
- <img src="https://api.star-history.com/svg?repos=Torantulino/auto-gpt&type=Date" alt="Star History Chart">
+ <a href="https://star-history.com/#Significant-Gravitas/AutoGPT&Date">
+ <img src="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date" alt="Star History Chart">
</a>
</p>
diff --git a/TROUBLESHOOTING.md b/TROUBLESHOOTING.md
new file mode 100644
index 000000000..2b51851bb
--- /dev/null
+++ b/TROUBLESHOOTING.md
@@ -0,0 +1,23 @@
+This page is a list of issues you could encounter along with their fixes.
+
+# Forge
+**Poetry configuration invalid**
+
+The poetry configuration is invalid:
+- Additional properties are not allowed ('group' was unexpected)
+<img width="487" alt="Screenshot 2023-09-22 at 5 42 59 PM" src="https://github.com/Significant-Gravitas/AutoGPT/assets/9652976/dd451e6b-8114-44de-9928-075f5f06d661">
+
+**Pydantic Validation Error**
+
+Remove your sqlite agent.db file. it's probably because some of your data is not complying with the new spec (we will create migrations soon to avoid this problem)
+
+
+*Solution*
+
+Update poetry
+
+# Benchmark
+TODO
+
+# Frontend
+TODO
diff --git a/arena/480bot.json b/arena/480bot.json
new file mode 100644
index 000000000..819ac9cc9
--- /dev/null
+++ b/arena/480bot.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/480/AutoGPT",
+ "timestamp": "2023-10-22T06:49:52.536177",
+ "commit_hash_to_benchmark": "16e266c65fb4620a1b1397532c503fa426ec191d",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AGENT_GORDON.json b/arena/AGENT_GORDON.json
new file mode 100644
index 000000000..98784273f
--- /dev/null
+++ b/arena/AGENT_GORDON.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/filipjakubowski/AutoGPT",
+ "timestamp": "2023-11-01T17:13:24.272333",
+ "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AGENT_JARVIS.json b/arena/AGENT_JARVIS.json
new file mode 100644
index 000000000..ac284f6aa
--- /dev/null
+++ b/arena/AGENT_JARVIS.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/filipjakubowski/AutoGPT",
+ "timestamp": "2023-11-04T10:13:11.039444",
+ "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AI.json b/arena/AI.json
new file mode 100644
index 000000000..a6b27fdb1
--- /dev/null
+++ b/arena/AI.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/QingquanBao/AutoGPT",
+ "timestamp": "2023-11-01T16:20:51.086235",
+ "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AKBAgent.json b/arena/AKBAgent.json
new file mode 100644
index 000000000..71e8cea91
--- /dev/null
+++ b/arena/AKBAgent.json
@@ -0,0 +1,7 @@
+{
+ "github_repo_url": "https://github.com/imakb/AKBAgent",
+ "timestamp": "2023-10-31T00:03:23.000000",
+ "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0",
+ "branch_to_benchmark": "AKBAgent"
+}
+
diff --git a/arena/ASSISTANT.json b/arena/ASSISTANT.json
new file mode 100644
index 000000000..bd0c0f055
--- /dev/null
+++ b/arena/ASSISTANT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/hongzzz/AutoGPT",
+ "timestamp": "2023-10-13T03:22:59.347424",
+ "commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AUTO_ENGINEER.json b/arena/AUTO_ENGINEER.json
new file mode 100644
index 000000000..5f8e28c97
--- /dev/null
+++ b/arena/AUTO_ENGINEER.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/kaiomagalhaes/AutoGPT",
+ "timestamp": "2023-10-04T15:25:30.458687",
+ "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AUTO_GPT_JON001.json b/arena/AUTO_GPT_JON001.json
new file mode 100644
index 000000000..f36fad390
--- /dev/null
+++ b/arena/AUTO_GPT_JON001.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Jonobinsoftware/AutoGPT-Tutorial",
+ "timestamp": "2023-10-10T06:01:23.439061",
+ "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Adtractive_Agent.json b/arena/Adtractive_Agent.json
new file mode 100644
index 000000000..ebec6e6ad
--- /dev/null
+++ b/arena/Adtractive_Agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/aivaras-mazylis/AutoGPT",
+ "timestamp": "2023-10-17T13:16:16.327237",
+ "commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AgGPT.json b/arena/AgGPT.json
new file mode 100644
index 000000000..07751b8ec
--- /dev/null
+++ b/arena/AgGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/althaf004/AutoGPT",
+ "timestamp": "2023-09-26T03:40:03.658369",
+ "commit_hash_to_benchmark": "4a8da53d85d466f2eb325c745a2c03cf88792e7d",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AgentJPark.json b/arena/AgentJPark.json
new file mode 100644
index 000000000..636e4d1f7
--- /dev/null
+++ b/arena/AgentJPark.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/againeureka/AutoGPT",
+ "timestamp": "2023-10-12T02:20:01.005361",
+ "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AgentKD.json b/arena/AgentKD.json
new file mode 100644
index 000000000..1aa340eac
--- /dev/null
+++ b/arena/AgentKD.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/kitdesai/AgentKD",
+ "timestamp": "2023-10-14T02:35:09.979434",
+ "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Ahmad.json b/arena/Ahmad.json
new file mode 100644
index 000000000..2b5b86f12
--- /dev/null
+++ b/arena/Ahmad.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/JawadAbu/AutoGPT.git",
+ "timestamp": "2023-11-05T12:35:35.352028",
+ "commit_hash_to_benchmark": "a1d60878141116641ea864ef6de7ca6142e9534c",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Alfred.json b/arena/Alfred.json
new file mode 100644
index 000000000..be510f1fd
--- /dev/null
+++ b/arena/Alfred.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Shadowless422/Alfred",
+ "timestamp": "2023-10-03T10:42:45.473477",
+ "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AlphaCISO.json b/arena/AlphaCISO.json
new file mode 100644
index 000000000..06791274b
--- /dev/null
+++ b/arena/AlphaCISO.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/alphaciso/AutoGPT",
+ "timestamp": "2023-10-21T08:26:41.961187",
+ "commit_hash_to_benchmark": "415b4ceed1417d0b21d87d7d4ea0cd38943e264f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AndersLensway.json b/arena/AndersLensway.json
new file mode 100644
index 000000000..6bbf68fdf
--- /dev/null
+++ b/arena/AndersLensway.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/4nd3rs/AutoGPT",
+ "timestamp": "2023-10-11T11:00:08.150159",
+ "commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AntlerTestGPT.json b/arena/AntlerTestGPT.json
new file mode 100644
index 000000000..9df76d4a8
--- /dev/null
+++ b/arena/AntlerTestGPT.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/pjw1/AntlerAI", "timestamp": "2023-10-07T11:46:39Z", "commit_hash_to_benchmark": "f81e086e5647370854ec639c531c900775a99207", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/AppleGPT.json b/arena/AppleGPT.json
new file mode 100644
index 000000000..7fe3a7bee
--- /dev/null
+++ b/arena/AppleGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Nimit3-droid/AutoGPT",
+ "timestamp": "2023-10-03T11:59:15.495902",
+ "commit_hash_to_benchmark": "d8d7fc4858a8d13407f6d7da360c6b5d398f2175",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AquaAgent.json b/arena/AquaAgent.json
new file mode 100644
index 000000000..6deb549db
--- /dev/null
+++ b/arena/AquaAgent.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/somnistudio/SomniGPT", "timestamp": "2023-10-06T16:40:14Z", "commit_hash_to_benchmark": "47eb5124fa97187d7f3fa4036e422cd771cf0ae7", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/ArtistManagerGPT.json b/arena/ArtistManagerGPT.json
new file mode 100644
index 000000000..881ed049b
--- /dev/null
+++ b/arena/ArtistManagerGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/AmahAjavon/AutoGPT",
+ "timestamp": "2023-10-28T20:32:15.845741",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AskOpie.json b/arena/AskOpie.json
new file mode 100644
index 000000000..a2f6bd393
--- /dev/null
+++ b/arena/AskOpie.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/arunqa/AutoGPT",
+ "timestamp": "2023-09-26T05:13:24.466017",
+ "commit_hash_to_benchmark": "4a8da53d85d466f2eb325c745a2c03cf88792e7d",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Auto.json b/arena/Auto.json
new file mode 100644
index 000000000..9bad9db50
--- /dev/null
+++ b/arena/Auto.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Nikhil8652/AutoGPT",
+ "timestamp": "2023-10-16T09:12:17.452121",
+ "commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AutoGPT-ariel.json b/arena/AutoGPT-ariel.json
new file mode 100644
index 000000000..cefa43620
--- /dev/null
+++ b/arena/AutoGPT-ariel.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/RedTachyon/AutoGPT",
+ "timestamp": "2023-10-21T22:31:30.871023",
+ "commit_hash_to_benchmark": "eda21d51921899756bf866cf5c4d0f2dcd3e2e23",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AutoGPT2.json b/arena/AutoGPT2.json
new file mode 100644
index 000000000..11a71f66f
--- /dev/null
+++ b/arena/AutoGPT2.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/SarahGrevy/AutoGPT", "timestamp": "2023-10-20T17:21:22Z", "commit_hash_to_benchmark": "32300906c9aafea8c550fa2f9edcc113fbfc512c", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/AutoGenius.json b/arena/AutoGenius.json
new file mode 100644
index 000000000..3974b9dcc
--- /dev/null
+++ b/arena/AutoGenius.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/JasonDRZ/AutoGPT",
+ "timestamp": "2023-10-26T13:27:58.805270",
+ "commit_hash_to_benchmark": "ab2a61833584c42ededa805cbac50718c72aa5ae",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AutoTDD.json b/arena/AutoTDD.json
new file mode 100644
index 000000000..ea61ddd82
--- /dev/null
+++ b/arena/AutoTDD.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/vshneer/AutoTDD",
+ "timestamp": "2023-10-11T19:14:30.939747",
+ "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AutoTestGenerator.json b/arena/AutoTestGenerator.json
new file mode 100644
index 000000000..c28d6da87
--- /dev/null
+++ b/arena/AutoTestGenerator.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/cagdasbas/AutoGPT",
+ "timestamp": "2023-10-15T08:43:40.193080",
+ "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/AwareAgent.json b/arena/AwareAgent.json
new file mode 100644
index 000000000..d4155dd67
--- /dev/null
+++ b/arena/AwareAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/LuisLechugaRuiz/AwareAgent",
+ "timestamp": "2023-10-26T10:10:01.481205",
+ "commit_hash_to_benchmark": "c180063dde49af02ed95ec4c019611da0a5540d7",
+ "branch_to_benchmark": "master"
+}
diff --git a/arena/Bagi_agent.json b/arena/Bagi_agent.json
new file mode 100644
index 000000000..4251bb424
--- /dev/null
+++ b/arena/Bagi_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/xpineda/AutoGPT_xabyvng.git",
+ "timestamp": "2023-10-20T09:21:48.837635",
+ "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/BanglaSgAgent.json b/arena/BanglaSgAgent.json
new file mode 100644
index 000000000..12014fe8d
--- /dev/null
+++ b/arena/BanglaSgAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/aniruddha-adhikary/AutoGPT",
+ "timestamp": "2023-09-27T15:32:24.056105",
+ "commit_hash_to_benchmark": "6f289e6dfa8246f8993b76c933527f3707b8d7e5",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Baptiste.json b/arena/Baptiste.json
new file mode 100644
index 000000000..691f62952
--- /dev/null
+++ b/arena/Baptiste.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Baptistecaille/AutoGPT",
+ "timestamp": "2023-10-01T19:44:23.416591",
+ "commit_hash_to_benchmark": "3da29eae45683457131ee8736bedae7e2a74fbba",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Bravo06.json b/arena/Bravo06.json
new file mode 100644
index 000000000..21ceec258
--- /dev/null
+++ b/arena/Bravo06.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/jafar-albadarneh/Bravo06GPT", "timestamp": "2023-10-04T23:01:27Z", "commit_hash_to_benchmark": "f8c177b4b0e4ca45a3a104011b866c0415c648f1", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/Brillante-AI.json b/arena/Brillante-AI.json
new file mode 100644
index 000000000..3c81a02c0
--- /dev/null
+++ b/arena/Brillante-AI.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/dabeer021/Brillante-AI", "timestamp": "2023-10-02T19:05:04Z", "commit_hash_to_benchmark": "163ab75379e1ee7792f50d4d70a1f482ca9cb6a1", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/Bunny.json b/arena/Bunny.json
new file mode 100644
index 000000000..33c2b0d1a
--- /dev/null
+++ b/arena/Bunny.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/razorhasbeen/AutoGPT",
+ "timestamp": "2023-10-03T11:50:56.725628",
+ "commit_hash_to_benchmark": "d8d7fc4858a8d13407f6d7da360c6b5d398f2175",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/CCAgent.json b/arena/CCAgent.json
new file mode 100644
index 000000000..899172e34
--- /dev/null
+++ b/arena/CCAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/ccsnow127/AutoGPT",
+ "timestamp": "2023-10-21T13:57:15.131761",
+ "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/CES-GPT.json b/arena/CES-GPT.json
new file mode 100644
index 000000000..016804e65
--- /dev/null
+++ b/arena/CES-GPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/ces-sonnguyen/CES-GPT",
+ "timestamp": "2023-10-30T07:45:07.337258",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/CISLERK.json b/arena/CISLERK.json
new file mode 100644
index 000000000..1370a0a2d
--- /dev/null
+++ b/arena/CISLERK.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/cislerk/AutoGPT",
+ "timestamp": "2023-10-10T18:40:50.718850",
+ "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/CONNECTBOT.json b/arena/CONNECTBOT.json
new file mode 100644
index 000000000..b43e147a9
--- /dev/null
+++ b/arena/CONNECTBOT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/myncow/DocumentAgent.git",
+ "timestamp": "2023-10-31T21:21:28.951345",
+ "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/CYNO_AGENT.json b/arena/CYNO_AGENT.json
new file mode 100644
index 000000000..288802d5d
--- /dev/null
+++ b/arena/CYNO_AGENT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/dr1yl/AutoGPT",
+ "timestamp": "2023-10-09T20:01:05.041446",
+ "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/ChadGPT.json b/arena/ChadGPT.json
new file mode 100644
index 000000000..6a378b1d8
--- /dev/null
+++ b/arena/ChadGPT.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/Ahmad-Alaziz/ChadGPT", "timestamp": "2023-10-26T09:39:35Z", "commit_hash_to_benchmark": "84dd029c011379791a6fec8b148b2982a2ef159e", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/ChrisGPT.json b/arena/ChrisGPT.json
new file mode 100644
index 000000000..6ec46681e
--- /dev/null
+++ b/arena/ChrisGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/darkcyber-ninja/AutoGPT",
+ "timestamp": "2023-10-31T17:55:41.458834",
+ "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/CodeAutoGPT.json b/arena/CodeAutoGPT.json
new file mode 100644
index 000000000..1780a4966
--- /dev/null
+++ b/arena/CodeAutoGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/hugomastromauro/AutoGPT",
+ "timestamp": "2023-11-01T13:21:42.624202",
+ "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/CreaitorMarketing.json b/arena/CreaitorMarketing.json
new file mode 100644
index 000000000..38ffca0f8
--- /dev/null
+++ b/arena/CreaitorMarketing.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/simonfunk/Auto-GPT", "timestamp": "2023-10-08T02:10:18Z", "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/CurieAssistant.json b/arena/CurieAssistant.json
new file mode 100644
index 000000000..bdbd14c9c
--- /dev/null
+++ b/arena/CurieAssistant.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/HMDCrew/AutoGPT",
+ "timestamp": "2023-10-06T20:41:26.293944",
+ "commit_hash_to_benchmark": "9e353e09b5df39d4d410bef57cf17387331e96f6",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/DE.json b/arena/DE.json
new file mode 100644
index 000000000..fcea35c9d
--- /dev/null
+++ b/arena/DE.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/wic0144/AutoGPT",
+ "timestamp": "2023-10-26T09:05:21.013962",
+ "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/DavidsAgent.json b/arena/DavidsAgent.json
new file mode 100644
index 000000000..f824fd14d
--- /dev/null
+++ b/arena/DavidsAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/beisdog/AutoGPT",
+ "timestamp": "2023-09-29T22:06:18.846082",
+ "commit_hash_to_benchmark": "d6abb27db61142a70defd0c75b53985ea9a71fce",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Derpmaster.json b/arena/Derpmaster.json
new file mode 100644
index 000000000..6a4e159e5
--- /dev/null
+++ b/arena/Derpmaster.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/schumacher-m/Derpmaster",
+ "timestamp": "2023-10-30T21:10:27.407732",
+ "commit_hash_to_benchmark": "d9fbd26b8563e5f59d705623bae0d5cf9c9499c7",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/DevOpsAgent.json b/arena/DevOpsAgent.json
new file mode 100644
index 000000000..6f3384cd6
--- /dev/null
+++ b/arena/DevOpsAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/rahuldotar/AutoGPT",
+ "timestamp": "2023-10-02T11:34:29.870077",
+ "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Drench.json b/arena/Drench.json
new file mode 100644
index 000000000..49417551e
--- /dev/null
+++ b/arena/Drench.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/MohamedBasueny/AutoGPT-Drench",
+ "timestamp": "2023-10-27T01:28:13.869318",
+ "commit_hash_to_benchmark": "21b809794a90cf6f9a6aa41f179f420045becadc",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Eduardo.json b/arena/Eduardo.json
new file mode 100644
index 000000000..dfffd902d
--- /dev/null
+++ b/arena/Eduardo.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/MuriloEduardo/AutoGPT.git",
+ "timestamp": "2023-09-25T03:18:20.659056",
+ "commit_hash_to_benchmark": "ffa76c3a192c36827669335de4390262da5fd972",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/EmbeddedAg.json b/arena/EmbeddedAg.json
new file mode 100644
index 000000000..b26355e38
--- /dev/null
+++ b/arena/EmbeddedAg.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/Significant-Gravitas/AutoGPT", "timestamp": "2023-10-26T09:15:50Z", "commit_hash_to_benchmark": "6c9152a95c8994898c47c85ea90ba58e0cc02c28", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/EnglishTestpaperAgent.json b/arena/EnglishTestpaperAgent.json
new file mode 100644
index 000000000..7271eb0c9
--- /dev/null
+++ b/arena/EnglishTestpaperAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/kyannai/AutoGPT",
+ "timestamp": "2023-09-29T03:05:45.504690",
+ "commit_hash_to_benchmark": "1f367618edf903f38dff4dd064f96e611ffc5242",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/ExampleAgent.json b/arena/ExampleAgent.json
new file mode 100644
index 000000000..2fb8c44a3
--- /dev/null
+++ b/arena/ExampleAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/janekdijkstra/AutoGPT",
+ "timestamp": "2023-10-16T12:12:54.998033",
+ "commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/FLASH.json b/arena/FLASH.json
new file mode 100644
index 000000000..7cce9c10e
--- /dev/null
+++ b/arena/FLASH.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/flashdumper/AutoGPT",
+ "timestamp": "2023-10-30T23:02:13.653861",
+ "commit_hash_to_benchmark": "d9fbd26b8563e5f59d705623bae0d5cf9c9499c7",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/FactoryGPT.json b/arena/FactoryGPT.json
new file mode 100644
index 000000000..e66434c39
--- /dev/null
+++ b/arena/FactoryGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/neilmartindev/FactoryGPT",
+ "timestamp": "2023-10-04T16:24:58.525870",
+ "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/FcsummerGPT.json b/arena/FcsummerGPT.json
new file mode 100644
index 000000000..2f2eb88fa
--- /dev/null
+++ b/arena/FcsummerGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/fbk111/FcsummerGPT",
+ "timestamp": "2023-10-25T09:58:39.801277",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/FynAgent.json b/arena/FynAgent.json
new file mode 100644
index 000000000..1f006e63e
--- /dev/null
+++ b/arena/FynAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/tomkat-cr/AutoGPT.git",
+ "timestamp": "2023-10-18T09:41:21.282992",
+ "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/GG.json b/arena/GG.json
new file mode 100644
index 000000000..78421b484
--- /dev/null
+++ b/arena/GG.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/IgorCIs/AutoGPT",
+ "timestamp": "2023-09-27T14:01:20.964953",
+ "commit_hash_to_benchmark": "a14aadd91493886663232bfd23c0412609f2a2fc",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/GPTTest.json b/arena/GPTTest.json
new file mode 100644
index 000000000..e2c1c0af3
--- /dev/null
+++ b/arena/GPTTest.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/h3llix/GPTTest.git",
+ "timestamp": "2023-11-02T10:56:53.142288",
+ "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/GameSoundGPT.json b/arena/GameSoundGPT.json
new file mode 100644
index 000000000..66fe962ab
--- /dev/null
+++ b/arena/GameSoundGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/mordvinov/AutoGPT",
+ "timestamp": "2023-10-13T14:48:02.852293",
+ "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/GeorgeGPT.json b/arena/GeorgeGPT.json
new file mode 100644
index 000000000..83ce96df7
--- /dev/null
+++ b/arena/GeorgeGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/norn93/GeorgeGPT",
+ "timestamp": "2023-10-17T14:38:41.051458",
+ "commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Granger.json b/arena/Granger.json
new file mode 100644
index 000000000..203e99c34
--- /dev/null
+++ b/arena/Granger.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/balloch/AutoGPTProblemSolver",
+ "timestamp": "2023-09-29T15:11:44.876627",
+ "commit_hash_to_benchmark": "9fb6d5bbbd6928402a5718b8c249811c6f682a88",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/HACKATHON.json b/arena/HACKATHON.json
new file mode 100644
index 000000000..7f29e7582
--- /dev/null
+++ b/arena/HACKATHON.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/manuel-soria/AutoGPT",
+ "timestamp": "2023-10-07T16:55:38.741776",
+ "commit_hash_to_benchmark": "a00d880a3fd62373f53a0b0a45c9dcfdb45968e4",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/HMD2.json b/arena/HMD2.json
new file mode 100644
index 000000000..5ef36bd18
--- /dev/null
+++ b/arena/HMD2.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/HMDCrew/AutoGPT",
+ "timestamp": "2023-10-09T08:46:37.457740",
+ "commit_hash_to_benchmark": "9e353e09b5df39d4d410bef57cf17387331e96f6",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Heisenberg.json b/arena/Heisenberg.json
new file mode 100644
index 000000000..a77ce87d7
--- /dev/null
+++ b/arena/Heisenberg.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/georgehaws/Heisenberg",
+ "timestamp": "2023-10-02T16:07:18-07:00",
+ "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098",
+ "branch_to_benchmark": "master"
+}
diff --git a/arena/HekolcuAutoGPT.json b/arena/HekolcuAutoGPT.json
new file mode 100644
index 000000000..e64dd9c63
--- /dev/null
+++ b/arena/HekolcuAutoGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/hekolcu/AutoGPT",
+ "timestamp": "2023-09-30T17:31:20.979122",
+ "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/HuitzilAiAgent.json b/arena/HuitzilAiAgent.json
new file mode 100644
index 000000000..6e832eafa
--- /dev/null
+++ b/arena/HuitzilAiAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/codetitlan/AutoGPT-CDTHB",
+ "timestamp": "2023-10-03T15:04:54.856291",
+ "commit_hash_to_benchmark": "3374fd181852d489e51ee33a25d12a064a0bb55d",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Hypeman.json b/arena/Hypeman.json
new file mode 100644
index 000000000..d32bcb9e4
--- /dev/null
+++ b/arena/Hypeman.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/kennyu/KenGPT",
+ "timestamp": "2023-09-27T19:50:31.443494",
+ "commit_hash_to_benchmark": "cf630e4f2cee04fd935612f95308322cd9eb1df7",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/IncredibubbleTea.json b/arena/IncredibubbleTea.json
new file mode 100644
index 000000000..6908e6be2
--- /dev/null
+++ b/arena/IncredibubbleTea.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/mariepop13/AutoGPT",
+ "timestamp": "2023-10-25T18:38:32.012583",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/JackGPT.json b/arena/JackGPT.json
new file mode 100644
index 000000000..007286814
--- /dev/null
+++ b/arena/JackGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/JackDance/AutoGPT",
+ "timestamp": "2023-10-09T08:26:35.181112",
+ "commit_hash_to_benchmark": "f77d383a9f5e66a35d6008bd43cab4d93999cb61",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Jarvis.json b/arena/Jarvis.json
new file mode 100644
index 000000000..bb098270e
--- /dev/null
+++ b/arena/Jarvis.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/elynch303/AutoGPT",
+ "timestamp": "2023-10-12T14:15:17.014333",
+ "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/JarvisAgent.json b/arena/JarvisAgent.json
new file mode 100644
index 000000000..f8cc9810f
--- /dev/null
+++ b/arena/JarvisAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/JadeCong/AutoGPT",
+ "timestamp": "2023-10-17T18:49:16.489653",
+ "commit_hash_to_benchmark": "0bd5d4420ec168194d5a93f62d890d33ab7d9940",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Jean-Michel.json b/arena/Jean-Michel.json
new file mode 100644
index 000000000..30791d295
--- /dev/null
+++ b/arena/Jean-Michel.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Yanniswein/Jean-Michel",
+ "timestamp": "2023-10-30T09:21:14.984080",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Job_GPT.json b/arena/Job_GPT.json
new file mode 100644
index 000000000..de73fba89
--- /dev/null
+++ b/arena/Job_GPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/SeaField-dev/AutoGPT.git",
+ "timestamp": "2023-09-25T09:35:03.022273",
+ "commit_hash_to_benchmark": "a09d2a581f7b435ea55aa32a5fc7bbb093f4d021",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/JoshAgent1.json b/arena/JoshAgent1.json
new file mode 100644
index 000000000..99378066a
--- /dev/null
+++ b/arena/JoshAgent1.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/joshyorko/AutoGPT",
+ "timestamp": "2023-09-28T17:05:27.689905",
+ "commit_hash_to_benchmark": "959e1304d11f126c5a6914c3bb886549638d6b35",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/KnowledgeExtractor.json b/arena/KnowledgeExtractor.json
new file mode 100644
index 000000000..4a184f2fb
--- /dev/null
+++ b/arena/KnowledgeExtractor.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/arromaljj/KnowledgeExtractor",
+ "timestamp": "2023-10-04T13:01:50.037123",
+ "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/LAWYER_EMAD.json b/arena/LAWYER_EMAD.json
new file mode 100644
index 000000000..5d84d0872
--- /dev/null
+++ b/arena/LAWYER_EMAD.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/emads7/AutoGPT.git",
+ "timestamp": "2023-10-19T15:06:37.481038",
+ "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/LHRobot.json b/arena/LHRobot.json
new file mode 100644
index 000000000..98feac3b9
--- /dev/null
+++ b/arena/LHRobot.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/LH-Enterprise/AutoGPT",
+ "timestamp": "2023-10-07T01:05:31.627432",
+ "commit_hash_to_benchmark": "b2d53d8d18c754a5b877ffeb9f42d3387c3324fd",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Lab49Agent.json b/arena/Lab49Agent.json
new file mode 100644
index 000000000..cbb992264
--- /dev/null
+++ b/arena/Lab49Agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/FutureProofTechnology/AutoGPT",
+ "timestamp": "2023-10-12T10:28:34.275827",
+ "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/LbAgent.json b/arena/LbAgent.json
new file mode 100644
index 000000000..8ff9c0cc0
--- /dev/null
+++ b/arena/LbAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/BiaoLiu2017/AutoGPT",
+ "timestamp": "2023-10-30T10:20:40.082545",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/LegalAgent.json b/arena/LegalAgent.json
new file mode 100644
index 000000000..c57b30f85
--- /dev/null
+++ b/arena/LegalAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/gengrui1983/LegalGPT",
+ "timestamp": "2023-10-25T02:46:41.860987",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Light_Agent.json b/arena/Light_Agent.json
new file mode 100644
index 000000000..17fee68be
--- /dev/null
+++ b/arena/Light_Agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/mohammed-radha-LightRing/AutoGPT",
+ "timestamp": "2023-10-01T07:10:46.497391",
+ "commit_hash_to_benchmark": "d6abb27db61142a70defd0c75b53985ea9a71fce",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/LinuzGPT.json b/arena/LinuzGPT.json
new file mode 100644
index 000000000..8cb096f0c
--- /dev/null
+++ b/arena/LinuzGPT.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/linusaltacc/AutoGPT", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/Lirum.json b/arena/Lirum.json
new file mode 100644
index 000000000..da8dddd76
--- /dev/null
+++ b/arena/Lirum.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/rogerioth/AutoGPT",
+ "timestamp": "2023-10-12T23:04:51.600862",
+ "commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/MANU.json b/arena/MANU.json
new file mode 100644
index 000000000..7e1caed1f
--- /dev/null
+++ b/arena/MANU.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/manuel-soria/AutoGPT",
+ "timestamp": "2023-10-07T16:50:11.634586",
+ "commit_hash_to_benchmark": "a00d880a3fd62373f53a0b0a45c9dcfdb45968e4",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/MEGATRON.json b/arena/MEGATRON.json
new file mode 100644
index 000000000..81182c372
--- /dev/null
+++ b/arena/MEGATRON.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/razorhasbeen/AutoGPT",
+ "timestamp": "2023-10-03T11:33:22.091896",
+ "commit_hash_to_benchmark": "d8d7fc4858a8d13407f6d7da360c6b5d398f2175",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/MOBILE.json b/arena/MOBILE.json
new file mode 100644
index 000000000..13b9c1752
--- /dev/null
+++ b/arena/MOBILE.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/nel349/AutoGPT",
+ "timestamp": "2023-10-08T03:10:40.860972",
+ "commit_hash_to_benchmark": "683257b697392e5551fb86c81a72728029d12aa0",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Maharathi.json b/arena/Maharathi.json
new file mode 100644
index 000000000..c2a312c82
--- /dev/null
+++ b/arena/Maharathi.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/sampatkalyan/AutoGPTHackathon", "timestamp": "2023-10-02T08:16:27Z", "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/MangoAI.json b/arena/MangoAI.json
new file mode 100644
index 000000000..32250c07f
--- /dev/null
+++ b/arena/MangoAI.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/stargatejy/MangoAI",
+ "timestamp": "2023-10-24T10:11:38.967772",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/MangoAgent-3.json b/arena/MangoAgent-3.json
new file mode 100644
index 000000000..72f5a8325
--- /dev/null
+++ b/arena/MangoAgent-3.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/stargatejy/MangoAI",
+ "timestamp": "2023-10-25T15:41:17.652038",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/MangoAgent-4.json b/arena/MangoAgent-4.json
new file mode 100644
index 000000000..b49ad8707
--- /dev/null
+++ b/arena/MangoAgent-4.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/stargatejy/MangoAI",
+ "timestamp": "2023-10-27T16:28:23.804390",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/MarketResearcherEduRob.json b/arena/MarketResearcherEduRob.json
new file mode 100644
index 000000000..6ee0afb41
--- /dev/null
+++ b/arena/MarketResearcherEduRob.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/fzoric8/AutoGPT",
+ "timestamp": "2023-11-01T09:36:16.357944",
+ "commit_hash_to_benchmark": "bc61ea35b5a52cc948657aac0ed8fc3f3191ec04",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Marx.json b/arena/Marx.json
new file mode 100644
index 000000000..69421b468
--- /dev/null
+++ b/arena/Marx.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/maxletemple/AutoGPT",
+ "timestamp": "2023-10-18T17:06:20.575710",
+ "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Mary.json b/arena/Mary.json
new file mode 100644
index 000000000..a47a8da58
--- /dev/null
+++ b/arena/Mary.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/bigfatball/Auto-GPT.git",
+ "timestamp": "2023-10-22T23:40:22.765334",
+ "commit_hash_to_benchmark": "16e266c65fb4620a1b1397532c503fa426ec191d",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Melang.json b/arena/Melang.json
new file mode 100644
index 000000000..5345ede63
--- /dev/null
+++ b/arena/Melang.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Brian-Mwangi-developer/AutoGPT.git",
+ "timestamp": "2023-10-06T08:50:14.080962",
+ "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Miao.json b/arena/Miao.json
new file mode 100644
index 000000000..f3a169e49
--- /dev/null
+++ b/arena/Miao.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/StefanWan-Durham/AutoGPT.git",
+ "timestamp": "2023-10-02T15:05:19.789945",
+ "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/MindwareGPT.json b/arena/MindwareGPT.json
new file mode 100644
index 000000000..1be44df5d
--- /dev/null
+++ b/arena/MindwareGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/0xtotaylor/MindwareGPT.git",
+ "timestamp": "2023-10-03T14:56:05.228408",
+ "commit_hash_to_benchmark": "3374fd181852d489e51ee33a25d12a064a0bb55d",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Mira.json b/arena/Mira.json
new file mode 100644
index 000000000..28585c526
--- /dev/null
+++ b/arena/Mira.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/enricofranke/EnricoAssistant",
+ "timestamp": "2023-10-25T23:21:35.799138",
+ "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/MoTS.json b/arena/MoTS.json
new file mode 100644
index 000000000..efad4ea97
--- /dev/null
+++ b/arena/MoTS.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/ghd9201/AutoGPT.git",
+ "timestamp": "2023-10-25T09:04:02.534683",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/MojoBurrito.json b/arena/MojoBurrito.json
new file mode 100644
index 000000000..b9c0ad780
--- /dev/null
+++ b/arena/MojoBurrito.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/dawnkelly09/MojoBurrito",
+ "timestamp": "2023-10-01T20:24:10.596062",
+ "commit_hash_to_benchmark": "de3e9e702a988c6028cc8b873aeffc9d5d82c572",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/MyAgent.json b/arena/MyAgent.json
new file mode 100644
index 000000000..d6f92e188
--- /dev/null
+++ b/arena/MyAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/AgentService/AutoGPT",
+ "timestamp": "2023-10-25T20:11:31.811596",
+ "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/MyExample.json b/arena/MyExample.json
new file mode 100644
index 000000000..508515aed
--- /dev/null
+++ b/arena/MyExample.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/gabenitez/AutoGPT",
+ "timestamp": "2023-10-19T22:00:47.453159",
+ "commit_hash_to_benchmark": "b4588f6425912316e1512391e4392ca30d61e144",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/MyExampleAgent.json b/arena/MyExampleAgent.json
new file mode 100644
index 000000000..cc3a9f86b
--- /dev/null
+++ b/arena/MyExampleAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/moizsajid/AutoGPT",
+ "timestamp": "2023-10-25T20:20:04.910747",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/MyFirstAgent.json b/arena/MyFirstAgent.json
new file mode 100644
index 000000000..783c90f54
--- /dev/null
+++ b/arena/MyFirstAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/smaxaddington/AutoGPT",
+ "timestamp": "2023-10-14T15:27:15.090035",
+ "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/MyFistAgent.json b/arena/MyFistAgent.json
new file mode 100644
index 000000000..baafc39a8
--- /dev/null
+++ b/arena/MyFistAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/eslam-fakhry/AutoGPT",
+ "timestamp": "2023-11-02T10:19:58.187866",
+ "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/MyTestAgent.json b/arena/MyTestAgent.json
new file mode 100644
index 000000000..a4c28dc7e
--- /dev/null
+++ b/arena/MyTestAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Penguin-N/AutoGPT.git",
+ "timestamp": "2023-10-18T14:01:28.986850",
+ "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/N.json b/arena/N.json
new file mode 100644
index 000000000..1d8b2dd9f
--- /dev/null
+++ b/arena/N.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/gentaag/AutoGPT",
+ "timestamp": "2023-10-28T15:16:15.189228",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/NASAssistant2.json b/arena/NASAssistant2.json
new file mode 100644
index 000000000..1359a3332
--- /dev/null
+++ b/arena/NASAssistant2.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/IHIaadj/AutoGPT",
+ "timestamp": "2023-10-07T22:06:59.410391",
+ "commit_hash_to_benchmark": "7a33af387e6959506eb8f01b49d296defe587e6d",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/NHAN_BOT.json b/arena/NHAN_BOT.json
new file mode 100644
index 000000000..a0e649b08
--- /dev/null
+++ b/arena/NHAN_BOT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Vannhanhk12/AutoGPT",
+ "timestamp": "2023-09-28T07:18:38.959135",
+ "commit_hash_to_benchmark": "a555e936c48bca8c794c7116d62a91628e59ac14",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/NadeemAgent.json b/arena/NadeemAgent.json
new file mode 100644
index 000000000..9898b7c19
--- /dev/null
+++ b/arena/NadeemAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/kiyanwang/AutoGPT",
+ "timestamp": "2023-10-19T14:11:40.660035",
+ "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/NanAutoGPT.json b/arena/NanAutoGPT.json
new file mode 100644
index 000000000..8dd47a130
--- /dev/null
+++ b/arena/NanAutoGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/open-nan/NanAutoGPT",
+ "timestamp": "2023-10-30T10:25:02.617275",
+ "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/NoobSupreme.json b/arena/NoobSupreme.json
new file mode 100644
index 000000000..42208e3d9
--- /dev/null
+++ b/arena/NoobSupreme.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Ch0daboy/NoobSupreme.git",
+ "timestamp": "2023-10-01T08:08:13.753099",
+ "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/NumberOne.json b/arena/NumberOne.json
new file mode 100644
index 000000000..36c626ca0
--- /dev/null
+++ b/arena/NumberOne.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/micwin/AutoGPT",
+ "timestamp": "2023-10-05T17:01:11.784397",
+ "commit_hash_to_benchmark": "3b7d83a1a6d3fef1d415bfd1d4ba32ca1ba797cc",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Orange.json b/arena/Orange.json
new file mode 100644
index 000000000..4a344241a
--- /dev/null
+++ b/arena/Orange.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/lewispeel/AutoGPT",
+ "timestamp": "2023-10-27T22:57:16.348948",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/PAgentAI.json b/arena/PAgentAI.json
new file mode 100644
index 000000000..55e7333e7
--- /dev/null
+++ b/arena/PAgentAI.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/plopez10/GPT",
+ "timestamp": "2023-10-26T03:25:27.221299",
+ "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Pacific.json b/arena/Pacific.json
new file mode 100644
index 000000000..f7f8d5a3a
--- /dev/null
+++ b/arena/Pacific.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/yifeng-qiu/AutoGPTAgent",
+ "timestamp": "2023-10-04T18:25:34.925806",
+ "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/ParalegalAgent.json b/arena/ParalegalAgent.json
new file mode 100644
index 000000000..92e4c2513
--- /dev/null
+++ b/arena/ParalegalAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/bRitch022/Auto-GPT",
+ "timestamp": "2023-10-06T18:48:23.644236",
+ "commit_hash_to_benchmark": "47eb5124fa97187d7f3fa4036e422cd771cf0ae7",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Pelle.json b/arena/Pelle.json
new file mode 100644
index 000000000..598c0708d
--- /dev/null
+++ b/arena/Pelle.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/pilotniq/AutoGPT",
+ "timestamp": "2023-10-23T19:14:27.176891",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Portalen.json b/arena/Portalen.json
new file mode 100644
index 000000000..6e4aa879f
--- /dev/null
+++ b/arena/Portalen.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/erlendjones/AutoGPT", "timestamp": "2023-09-22T20:39:08Z", "commit_hash_to_benchmark": "58d5b0d4a2fcc1bc12ed667db9d62a427a89c1a4", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/Pumu2_agent.json b/arena/Pumu2_agent.json
new file mode 100644
index 000000000..52510f0b0
--- /dev/null
+++ b/arena/Pumu2_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/xpineda/AutoGPT_xabyvng.git",
+ "timestamp": "2023-10-20T09:26:07.885410",
+ "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Q.json b/arena/Q.json
new file mode 100644
index 000000000..9fad0c9cf
--- /dev/null
+++ b/arena/Q.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/CopsGit/AutoGPT",
+ "timestamp": "2023-10-27T19:07:51.053794",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/QA_AGENT.json b/arena/QA_AGENT.json
new file mode 100644
index 000000000..14816293f
--- /dev/null
+++ b/arena/QA_AGENT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/ada-lovecraft/Ada-GPT",
+ "timestamp": "2023-09-20T08:14:19.186952",
+ "commit_hash_to_benchmark": "377d0af228bad019be0a9743c2824c033e039654",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/QuantumQuill.json b/arena/QuantumQuill.json
new file mode 100644
index 000000000..32e78e5ea
--- /dev/null
+++ b/arena/QuantumQuill.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/dleidisch/AutoAgent",
+ "timestamp": "2023-10-23T18:49:58.499309",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/RAGOptimizer.json b/arena/RAGOptimizer.json
new file mode 100644
index 000000000..f87cc692a
--- /dev/null
+++ b/arena/RAGOptimizer.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/nel349/AutoGPT",
+ "timestamp": "2023-10-07T22:51:51.507768",
+ "commit_hash_to_benchmark": "683257b697392e5551fb86c81a72728029d12aa0",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/RFPScanner.json b/arena/RFPScanner.json
new file mode 100644
index 000000000..bc4ba260d
--- /dev/null
+++ b/arena/RFPScanner.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/vidhatanand/AutoRFP",
+ "timestamp": "2023-10-09T12:37:08.692968",
+ "commit_hash_to_benchmark": "f77d383a9f5e66a35d6008bd43cab4d93999cb61",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/RONNIN.json b/arena/RONNIN.json
new file mode 100644
index 000000000..5e1b0ecc8
--- /dev/null
+++ b/arena/RONNIN.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Huarada/AutoGPT",
+ "timestamp": "2023-10-06T18:11:56.450481",
+ "commit_hash_to_benchmark": "a55ed27679f608003372feb9eb61f0104ca87858",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/RagsToRiches.json b/arena/RagsToRiches.json
new file mode 100644
index 000000000..7a3669733
--- /dev/null
+++ b/arena/RagsToRiches.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/faichele/AutoGPT",
+ "timestamp": "2023-09-28T11:01:12.962590",
+ "commit_hash_to_benchmark": "4f15b1c5825b3f044c901995e3399d4eacf7ec66",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/RandomVampirePictureBot.json b/arena/RandomVampirePictureBot.json
new file mode 100644
index 000000000..0c8b8dc48
--- /dev/null
+++ b/arena/RandomVampirePictureBot.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/KleinerCodeDrago/AutoGPT",
+ "timestamp": "2023-09-29T14:06:38.055747",
+ "commit_hash_to_benchmark": "76c321d6b1a3c6ed938c90149a2954b7dade761a",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Raslebot.json b/arena/Raslebot.json
new file mode 100644
index 000000000..11169825d
--- /dev/null
+++ b/arena/Raslebot.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Jimcau/AutoGPT.git",
+ "timestamp": "2023-10-16T10:50:47.524483",
+ "commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/ResearchAgent.json b/arena/ResearchAgent.json
new file mode 100644
index 000000000..94855c1ce
--- /dev/null
+++ b/arena/ResearchAgent.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/Umar-Azam/AutoGPT-ResearchAgent", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/RosterAgent.json b/arena/RosterAgent.json
new file mode 100644
index 000000000..172d48e27
--- /dev/null
+++ b/arena/RosterAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/ricowong0730/AutoGPT",
+ "timestamp": "2023-10-17T01:17:01.540294",
+ "commit_hash_to_benchmark": "265255120b1a64d1dd0a3a92ae3a7e697a103ecb",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/SaasWebDev.json b/arena/SaasWebDev.json
new file mode 100644
index 000000000..98324aa37
--- /dev/null
+++ b/arena/SaasWebDev.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/giggei/AutoGPT",
+ "timestamp": "2023-10-02T15:44:54.390181",
+ "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/SaveAsPDF2.json b/arena/SaveAsPDF2.json
new file mode 100644
index 000000000..6024d173b
--- /dev/null
+++ b/arena/SaveAsPDF2.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/LFarmbot/AutoFarm",
+ "timestamp": "2023-10-28T04:32:40.914756",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/ShiviBot.json b/arena/ShiviBot.json
new file mode 100644
index 000000000..c9ce171be
--- /dev/null
+++ b/arena/ShiviBot.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/kshivang/DabblerGPT",
+ "timestamp": "2023-10-07T01:30:06.292423",
+ "commit_hash_to_benchmark": "b2d53d8d18c754a5b877ffeb9f42d3387c3324fd",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/SkorkobaniecAgent.json b/arena/SkorkobaniecAgent.json
new file mode 100644
index 000000000..7b99a9e78
--- /dev/null
+++ b/arena/SkorkobaniecAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/RafalSkorka/AutoGPT",
+ "timestamp": "2023-10-30T19:05:24.676797",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/SmartAgent.json b/arena/SmartAgent.json
new file mode 100644
index 000000000..bc2f1563e
--- /dev/null
+++ b/arena/SmartAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/AgentService/AutoGPT",
+ "timestamp": "2023-10-25T20:06:46.743984",
+ "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/SmartGPT.json b/arena/SmartGPT.json
new file mode 100644
index 000000000..fb27875a2
--- /dev/null
+++ b/arena/SmartGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Mertkaann/AutoGPT.git",
+ "timestamp": "2023-09-29T21:46:29.940080",
+ "commit_hash_to_benchmark": "d6abb27db61142a70defd0c75b53985ea9a71fce",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/SouAgent.json b/arena/SouAgent.json
new file mode 100644
index 000000000..6a35c3699
--- /dev/null
+++ b/arena/SouAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/SouSingh/AutoGPT.git",
+ "timestamp": "2023-10-01T07:26:31.428044",
+ "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Stragegy_Steve.json b/arena/Stragegy_Steve.json
new file mode 100644
index 000000000..fc4aa7aae
--- /dev/null
+++ b/arena/Stragegy_Steve.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/talumo/AutoGPT",
+ "timestamp": "2023-09-28T14:31:36.771515",
+ "commit_hash_to_benchmark": "e374e516633b0afca1ab644b378fe1973c455782",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Susan.json b/arena/Susan.json
new file mode 100644
index 000000000..4689ef84e
--- /dev/null
+++ b/arena/Susan.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/CodeZeno/Susan",
+ "timestamp": "2023-11-03T11:29:28.704822",
+ "commit_hash_to_benchmark": "82fecfae1b4fb5d64050eefa77d8f028292aa8f3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/TEST_TPK.json b/arena/TEST_TPK.json
new file mode 100644
index 000000000..ec2967892
--- /dev/null
+++ b/arena/TEST_TPK.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/seeseesky/AutoGPT",
+ "timestamp": "2023-10-31T04:31:39.337182",
+ "commit_hash_to_benchmark": "c3569d1842e6568ab1327e577603e71ad1feb622",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/TLGPT.json b/arena/TLGPT.json
new file mode 100644
index 000000000..a402fcc6a
--- /dev/null
+++ b/arena/TLGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/TheoLawrence86/AutoGPT",
+ "timestamp": "2023-10-09T14:34:30.182635",
+ "commit_hash_to_benchmark": "f77d383a9f5e66a35d6008bd43cab4d93999cb61",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/TMarafon.json b/arena/TMarafon.json
new file mode 100644
index 000000000..9828a895b
--- /dev/null
+++ b/arena/TMarafon.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/TMarafon/AutoGPT",
+ "timestamp": "2023-10-28T05:34:54.785662",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/TRAVIS.json b/arena/TRAVIS.json
new file mode 100644
index 000000000..0e73f8841
--- /dev/null
+++ b/arena/TRAVIS.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/tskaggs/AutoGPT",
+ "timestamp": "2023-10-14T02:33:28.089406",
+ "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/TeslaBot.json b/arena/TeslaBot.json
new file mode 100644
index 000000000..e55ae0cd3
--- /dev/null
+++ b/arena/TeslaBot.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/momokrunic/AutoGPT",
+ "timestamp": "2023-11-02T17:17:06.663164",
+ "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Tessa_AutoGPT_agent.json b/arena/Tessa_AutoGPT_agent.json
new file mode 100644
index 000000000..3f12f4959
--- /dev/null
+++ b/arena/Tessa_AutoGPT_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/DelicaTessa/AutoGPT_hackathon",
+ "timestamp": "2023-10-03T14:10:19.975796",
+ "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/TestAgent.json b/arena/TestAgent.json
new file mode 100644
index 000000000..02c5b1b84
--- /dev/null
+++ b/arena/TestAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Nilllas/AutoGPT",
+ "timestamp": "2023-10-20T11:27:15.343842",
+ "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/TestLbAgent.json b/arena/TestLbAgent.json
new file mode 100644
index 000000000..9c5730450
--- /dev/null
+++ b/arena/TestLbAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/BiaoLiu2017/AutoGPT",
+ "timestamp": "2023-10-31T03:25:23.064470",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/TheAgency.json b/arena/TheAgency.json
new file mode 100644
index 000000000..8470fe1b9
--- /dev/null
+++ b/arena/TheAgency.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/shamantechnology/TheAgency", "timestamp": "2023-10-26T09:22:18Z", "commit_hash_to_benchmark": "3eef81f2579e3ab4822fb9155ee412c597fda9c2", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/TheAgent.json b/arena/TheAgent.json
new file mode 100644
index 000000000..4a515aaa1
--- /dev/null
+++ b/arena/TheAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/BiaoLiu2017/AutoGPT",
+ "timestamp": "2023-10-31T03:07:04.629241",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/TraceLLMAgent.json b/arena/TraceLLMAgent.json
new file mode 100644
index 000000000..d25ff491b
--- /dev/null
+++ b/arena/TraceLLMAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/vmwsree/AutoGPT",
+ "timestamp": "2023-10-15T21:48:38.027553",
+ "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/UGYUJI.json b/arena/UGYUJI.json
new file mode 100644
index 000000000..2d0abc304
--- /dev/null
+++ b/arena/UGYUJI.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/ugyuji/AutoGPT",
+ "timestamp": "2023-10-20T04:42:28.397067",
+ "commit_hash_to_benchmark": "052802ff8d9354f23620eb8b6a5fd68cda7e5c0e",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/UTC-Crew.json b/arena/UTC-Crew.json
new file mode 100644
index 000000000..832d484f1
--- /dev/null
+++ b/arena/UTC-Crew.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/isayahc/AutoGPT.git",
+ "timestamp": "2023-10-04T17:06:48.154911",
+ "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/UmaruAgent.json b/arena/UmaruAgent.json
new file mode 100644
index 000000000..f3168d47a
--- /dev/null
+++ b/arena/UmaruAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/g1331/Auto-GPT",
+ "timestamp": "2023-10-16T13:51:10.464650",
+ "commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/UniAgent.json b/arena/UniAgent.json
new file mode 100644
index 000000000..19d710fa2
--- /dev/null
+++ b/arena/UniAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/JovanKanevche/AutoGPT",
+ "timestamp": "2023-10-19T17:04:49.626683",
+ "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Verkiezingsprogrammas.json b/arena/Verkiezingsprogrammas.json
new file mode 100644
index 000000000..4a18be40c
--- /dev/null
+++ b/arena/Verkiezingsprogrammas.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/bergje0810/AutoGPT",
+ "timestamp": "2023-10-11T11:47:16.993332",
+ "commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/WRITER.json b/arena/WRITER.json
new file mode 100644
index 000000000..63849f43f
--- /dev/null
+++ b/arena/WRITER.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/ezirmusitua/AutoGPT",
+ "timestamp": "2023-10-27T09:43:35.725996",
+ "commit_hash_to_benchmark": "21b809794a90cf6f9a6aa41f179f420045becadc",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/WYC.json b/arena/WYC.json
new file mode 100644
index 000000000..0620b0aab
--- /dev/null
+++ b/arena/WYC.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/131250208/AutoGPT_YC",
+ "timestamp": "2023-10-20T07:42:11.493899",
+ "commit_hash_to_benchmark": "9219bfba0e028a557109b8e39c0fd91c1df243f8",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/WarlockAgent.json b/arena/WarlockAgent.json
new file mode 100644
index 000000000..55977a9f3
--- /dev/null
+++ b/arena/WarlockAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/warlockee/AutoGPT-wl",
+ "timestamp": "2023-10-27T21:30:11.455084",
+ "commit_hash_to_benchmark": "6f66376bb8a4116330fe867d9dff83f938f7aa14",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/WeatherInformer.json b/arena/WeatherInformer.json
new file mode 100644
index 000000000..4cc94787f
--- /dev/null
+++ b/arena/WeatherInformer.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/colour-me-bibi/Auto-GPT",
+ "timestamp": "2023-09-19T14:11:53.195135",
+ "commit_hash_to_benchmark": "2098e192da0ec8eecf0010ae62704e6727dfa42a",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/WiseAgent.json b/arena/WiseAgent.json
new file mode 100644
index 000000000..02c03c0a2
--- /dev/null
+++ b/arena/WiseAgent.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/Ashish-Soni08/SoniGPT", "timestamp": "2023-10-08T18:39:38Z", "commit_hash_to_benchmark": "b52aba4ef545add8fb6c7f8009615cb38e24db80", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/XXY.json b/arena/XXY.json
new file mode 100644
index 000000000..849438def
--- /dev/null
+++ b/arena/XXY.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/xuxiangyang/AutoGPT",
+ "timestamp": "2023-10-14T04:40:39.828483",
+ "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/YOU.json b/arena/YOU.json
new file mode 100644
index 000000000..64629cf40
--- /dev/null
+++ b/arena/YOU.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/gentaag/AutoGPT",
+ "timestamp": "2023-10-28T14:03:12.555466",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/YoudaoAutoGPT.json b/arena/YoudaoAutoGPT.json
new file mode 100644
index 000000000..8e81970eb
--- /dev/null
+++ b/arena/YoudaoAutoGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/jiezhangGt/AutoGPT",
+ "timestamp": "2023-10-20T03:02:17.342168",
+ "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/YoutubePost_agent.json b/arena/YoutubePost_agent.json
new file mode 100644
index 000000000..46b7d81b7
--- /dev/null
+++ b/arena/YoutubePost_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/ramirez3rg/Auto-GPT",
+ "timestamp": "2023-09-21T20:35:24.266598",
+ "commit_hash_to_benchmark": "c72a35e92e4f95aca25221e216c3a49d0dbc739b",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Yui3.json b/arena/Yui3.json
new file mode 100644
index 000000000..439183005
--- /dev/null
+++ b/arena/Yui3.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/YuiChan04233/AutoGPT1",
+ "timestamp": "2023-10-08T02:03:48.189959",
+ "commit_hash_to_benchmark": "b2d53d8d18c754a5b877ffeb9f42d3387c3324fd",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Yutan_agent.json b/arena/Yutan_agent.json
new file mode 100644
index 000000000..468f5f373
--- /dev/null
+++ b/arena/Yutan_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/zyt329/AutoGPT",
+ "timestamp": "2023-09-29T21:47:23.741942",
+ "commit_hash_to_benchmark": "d6abb27db61142a70defd0c75b53985ea9a71fce",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/ZJgpt.json b/arena/ZJgpt.json
new file mode 100644
index 000000000..0ac3d2567
--- /dev/null
+++ b/arena/ZJgpt.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/jiezhangGt/AutoGPT",
+ "timestamp": "2023-10-20T04:04:28.198603",
+ "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Zeus.json b/arena/Zeus.json
new file mode 100644
index 000000000..0529b52c4
--- /dev/null
+++ b/arena/Zeus.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/MerlimarCode/ZeusGPT",
+ "timestamp": "2023-10-08T02:31:50.347357",
+ "commit_hash_to_benchmark": "0d5c2a98c071336e1bb48716cc25d85df2656ced",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/ZhaoJianAutoGPT.json b/arena/ZhaoJianAutoGPT.json
new file mode 100644
index 000000000..b2aa60f7b
--- /dev/null
+++ b/arena/ZhaoJianAutoGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/zhaojianchn/AutoGPT",
+ "timestamp": "2023-10-17T09:41:06.331671",
+ "commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/ZoeyGPT.json b/arena/ZoeyGPT.json
new file mode 100644
index 000000000..c2be10804
--- /dev/null
+++ b/arena/ZoeyGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/carylaw/FatGPT",
+ "timestamp": "2023-10-25T10:03:47.295810",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/Zoidberg.json b/arena/Zoidberg.json
new file mode 100644
index 000000000..a56f26d43
--- /dev/null
+++ b/arena/Zoidberg.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/KapitanFernand/Zoidberg",
+ "timestamp": "2023-10-24T09:09:27.540179",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/aWOL.json b/arena/aWOL.json
new file mode 100644
index 000000000..62dc80261
--- /dev/null
+++ b/arena/aWOL.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/aodrasa/aWOL",
+ "timestamp": "2023-10-11T01:24:01.516559",
+ "commit_hash_to_benchmark": "0856f6806177b30989b2be78004e059658efbbb4",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/a_reverent_heart.json b/arena/a_reverent_heart.json
new file mode 100644
index 000000000..c0233bc38
--- /dev/null
+++ b/arena/a_reverent_heart.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/iamlockelightning/IAutoGPT",
+ "timestamp": "2023-10-08T08:03:31.352877",
+ "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/accidental-agent.json b/arena/accidental-agent.json
new file mode 100644
index 000000000..853068771
--- /dev/null
+++ b/arena/accidental-agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/siddharthsarda/accidental-agent",
+ "timestamp": "2023-09-20T08:07:08.337479",
+ "commit_hash_to_benchmark": "377d0af228bad019be0a9743c2824c033e039654",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/actor_tester.json b/arena/actor_tester.json
new file mode 100644
index 000000000..ec1f0138e
--- /dev/null
+++ b/arena/actor_tester.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/calmglow/mbtiagent",
+ "timestamp": "2023-10-25T13:15:04.296302",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/admariner.json b/arena/admariner.json
new file mode 100644
index 000000000..2811c5d5c
--- /dev/null
+++ b/arena/admariner.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/admariner/AutoGPT", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/ag1.json b/arena/ag1.json
new file mode 100644
index 000000000..0dcfe64d4
--- /dev/null
+++ b/arena/ag1.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/gensy421/AutoGensy",
+ "timestamp": "2023-10-26T06:31:27.588150",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/agent2.json b/arena/agent2.json
new file mode 100644
index 000000000..54b1247ca
--- /dev/null
+++ b/arena/agent2.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/lukadumancic/AutoGPT",
+ "timestamp": "2023-10-28T16:08:43.603669",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/agentSmith.json b/arena/agentSmith.json
new file mode 100644
index 000000000..805e720e8
--- /dev/null
+++ b/arena/agentSmith.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Nameless8243/AgentSmith",
+ "timestamp": "2023-10-28T20:05:53.168061",
+ "commit_hash_to_benchmark": "21b809794a90cf6f9a6aa41f179f420045becadc",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/agent_2.json b/arena/agent_2.json
new file mode 100644
index 000000000..1e169e0ee
--- /dev/null
+++ b/arena/agent_2.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Exstor/AutoGPT",
+ "timestamp": "2023-10-31T20:56:49.313875",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/agentgpt.json b/arena/agentgpt.json
new file mode 100644
index 000000000..15aed81c4
--- /dev/null
+++ b/arena/agentgpt.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/UdaySagar-Git/AutoGPT.git",
+ "timestamp": "2023-10-24T05:24:58.972720",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/agsCehAgent.json b/arena/agsCehAgent.json
new file mode 100644
index 000000000..e628e79a3
--- /dev/null
+++ b/arena/agsCehAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/albags/AutoGPT.git",
+ "timestamp": "2023-10-19T11:30:12.759675",
+ "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/ai_assistant.json b/arena/ai_assistant.json
new file mode 100644
index 000000000..2a0d85dee
--- /dev/null
+++ b/arena/ai_assistant.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/xhjxhj001/AutoGPT",
+ "timestamp": "2023-10-23T12:05:13.923218",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/aiaudit.json b/arena/aiaudit.json
new file mode 100644
index 000000000..e1ecbb1dd
--- /dev/null
+++ b/arena/aiaudit.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/bigsml/AutoGPT.git",
+ "timestamp": "2023-10-12T07:05:18.886183",
+ "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/aiwowo.json b/arena/aiwowo.json
new file mode 100644
index 000000000..3412ba3cd
--- /dev/null
+++ b/arena/aiwowo.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/jeffxtang/AutoGPT",
+ "timestamp": "2023-10-09T05:25:37.720553",
+ "commit_hash_to_benchmark": "027054ae02657c37be0d28502bb5a22823eae9d9",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/aixiaoxin.json b/arena/aixiaoxin.json
new file mode 100644
index 000000000..a6fe001c5
--- /dev/null
+++ b/arena/aixiaoxin.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/aixiaoxin123/AutoGPT",
+ "timestamp": "2023-10-27T05:44:49.265845",
+ "commit_hash_to_benchmark": "6c9152a95c8994898c47c85ea90ba58e0cc02c28",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/akela.json b/arena/akela.json
new file mode 100644
index 000000000..9c811d288
--- /dev/null
+++ b/arena/akela.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/SarahGrevy/AutoGPT",
+ "timestamp": "2023-10-20T18:56:31.210825",
+ "commit_hash_to_benchmark": "32300906c9aafea8c550fa2f9edcc113fbfc512c",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/analystgpt.json b/arena/analystgpt.json
new file mode 100644
index 000000000..9227c97a1
--- /dev/null
+++ b/arena/analystgpt.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/vleonidas/AutoGPT",
+ "timestamp": "2023-10-20T16:46:11.806635",
+ "commit_hash_to_benchmark": "825c3adf62879fa9f91a19c11010336de5c98bfc",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/arbetsformedlingen.json b/arena/arbetsformedlingen.json
new file mode 100644
index 000000000..5afc4316e
--- /dev/null
+++ b/arena/arbetsformedlingen.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/esaiaswestberg/AutoGPT",
+ "timestamp": "2023-11-02T12:35:40.378520",
+ "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/assistant1.json b/arena/assistant1.json
new file mode 100644
index 000000000..8bb51d2fe
--- /dev/null
+++ b/arena/assistant1.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/BarneyChambers/AutoGPT",
+ "timestamp": "2023-10-16T18:35:05.779206",
+ "commit_hash_to_benchmark": "546e08a5cf2413fcfb857e2c41d21c80c3364218",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/autoai.json b/arena/autoai.json
new file mode 100644
index 000000000..519790524
--- /dev/null
+++ b/arena/autoai.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/maanvithag/AutoGPT",
+ "timestamp": "2023-10-09T16:19:12.986257",
+ "commit_hash_to_benchmark": "3bd8ae48433fa46552719de050ded576a3bef4b9",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/autocoder.json b/arena/autocoder.json
new file mode 100644
index 000000000..8d1fd33e6
--- /dev/null
+++ b/arena/autocoder.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/mtx-light/AutoGPT",
+ "timestamp": "2023-10-29T07:33:17.228393",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/autogbd.json b/arena/autogbd.json
new file mode 100644
index 000000000..77f7f4b5d
--- /dev/null
+++ b/arena/autogbd.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/kylaro/AutoGBD",
+ "timestamp": "2023-10-09T11:45:26.637129",
+ "commit_hash_to_benchmark": "f77d383a9f5e66a35d6008bd43cab4d93999cb61",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/autogpt-hackathon2.json b/arena/autogpt-hackathon2.json
new file mode 100644
index 000000000..41960393b
--- /dev/null
+++ b/arena/autogpt-hackathon2.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/ThisisHubert/AutoGPT-hackathon", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/autogpt.json b/arena/autogpt.json
new file mode 100644
index 000000000..931aa3aa5
--- /dev/null
+++ b/arena/autogpt.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Significant-Gravitas/AutoGPT",
+ "timestamp": "2023-11-15T07:22:09.723393",
+ "commit_hash_to_benchmark": "fa357dd13928baa4d1e30054bc75edc5d68b08f1",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/autogpt_hackathon.json b/arena/autogpt_hackathon.json
new file mode 100644
index 000000000..41960393b
--- /dev/null
+++ b/arena/autogpt_hackathon.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/ThisisHubert/AutoGPT-hackathon", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/autogpt_hackathon1.json b/arena/autogpt_hackathon1.json
new file mode 100644
index 000000000..41960393b
--- /dev/null
+++ b/arena/autogpt_hackathon1.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/ThisisHubert/AutoGPT-hackathon", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/autogpt_warlock.json b/arena/autogpt_warlock.json
new file mode 100644
index 000000000..5f6e9c0a5
--- /dev/null
+++ b/arena/autogpt_warlock.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/warlockee/AutoGPT-wl",
+ "timestamp": "2023-10-27T00:46:05.266939",
+ "commit_hash_to_benchmark": "6f66376bb8a4116330fe867d9dff83f938f7aa14",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/autogptagent.json b/arena/autogptagent.json
new file mode 100644
index 000000000..589001597
--- /dev/null
+++ b/arena/autogptagent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/YasienDwieb/AutoGPT",
+ "timestamp": "2023-11-04T21:13:17.223261",
+ "commit_hash_to_benchmark": "0b55de62dc61a33ccf944d80b6d55c730286e07d",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/avengaGPT.json b/arena/avengaGPT.json
new file mode 100644
index 000000000..f95163865
--- /dev/null
+++ b/arena/avengaGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/sebabetz/AutoGPT",
+ "timestamp": "2023-10-24T05:25:26.059512",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/babe_perphorator_.json b/arena/babe_perphorator_.json
new file mode 100644
index 000000000..ed3396907
--- /dev/null
+++ b/arena/babe_perphorator_.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/beavishead/automaton.git",
+ "timestamp": "2023-10-11T09:43:19.859956",
+ "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/baby_agent.json b/arena/baby_agent.json
new file mode 100644
index 000000000..ee8f386cc
--- /dev/null
+++ b/arena/baby_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/keli-61/AutoK",
+ "timestamp": "2023-10-19T07:39:13.300108",
+ "commit_hash_to_benchmark": "1a30d00194b46f8b923bab191404ce9123e34bdf",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/bait.json b/arena/bait.json
new file mode 100644
index 000000000..9c886bfba
--- /dev/null
+++ b/arena/bait.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/a0balaton/AutoGPT",
+ "timestamp": "2023-11-03T07:38:34.616504",
+ "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/beyond.json b/arena/beyond.json
new file mode 100644
index 000000000..dd51cc2fe
--- /dev/null
+++ b/arena/beyond.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/sn0wdown/AutoGPT",
+ "timestamp": "2023-10-25T07:22:09.723393",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/bigman.json b/arena/bigman.json
new file mode 100644
index 000000000..00d439582
--- /dev/null
+++ b/arena/bigman.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/bathrobe/autogpt",
+ "timestamp": "2023-10-04T18:32:29.402925",
+ "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/billy.json b/arena/billy.json
new file mode 100644
index 000000000..44253eded
--- /dev/null
+++ b/arena/billy.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/khelil/AutoGPT",
+ "timestamp": "2023-10-14T17:51:54.044334",
+ "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/bingoTesting.json b/arena/bingoTesting.json
new file mode 100644
index 000000000..a8fd1e210
--- /dev/null
+++ b/arena/bingoTesting.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/bingotyty/AutoGPT",
+ "timestamp": "2023-11-06T04:16:38.612948",
+ "commit_hash_to_benchmark": "a1d60878141116641ea864ef6de7ca6142e9534c",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/bosaeed_agent.json b/arena/bosaeed_agent.json
new file mode 100644
index 000000000..e2a1dcc97
--- /dev/null
+++ b/arena/bosaeed_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/bosaeed/AutoGPT.git",
+ "timestamp": "2023-10-03T15:31:04.721867",
+ "commit_hash_to_benchmark": "3da29eae45683457131ee8736bedae7e2a74fbba",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/bot.json b/arena/bot.json
new file mode 100644
index 000000000..3552e7447
--- /dev/null
+++ b/arena/bot.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Sampson2016/AutoGPT",
+ "timestamp": "2023-09-26T07:44:15.563183",
+ "commit_hash_to_benchmark": "3d4307a848880c8509e8356bbb9146f0e6f917f4",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/bot01.json b/arena/bot01.json
new file mode 100644
index 000000000..eca05f793
--- /dev/null
+++ b/arena/bot01.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Arthur-Heng/AutoGPT",
+ "timestamp": "2023-10-12T04:16:30.658280",
+ "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/buddy.json b/arena/buddy.json
new file mode 100644
index 000000000..3b2653f9d
--- /dev/null
+++ b/arena/buddy.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/danhamilt/AutoGPT",
+ "timestamp": "2023-10-09T01:07:11.246485",
+ "commit_hash_to_benchmark": "b52aba4ef545add8fb6c7f8009615cb38e24db80",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/burt.json b/arena/burt.json
new file mode 100644
index 000000000..7f9acb5ef
--- /dev/null
+++ b/arena/burt.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/D4t4DrU1d/burt",
+ "timestamp": "2023-10-05T14:00:59.740170",
+ "commit_hash_to_benchmark": "a55ed27679f608003372feb9eb61f0104ca87858",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/business.json b/arena/business.json
new file mode 100644
index 000000000..c086daeaa
--- /dev/null
+++ b/arena/business.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/danielfebrero/AutoGPT",
+ "timestamp": "2023-10-21T16:12:05.424875",
+ "commit_hash_to_benchmark": "415b4ceed1417d0b21d87d7d4ea0cd38943e264f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/byl.json b/arena/byl.json
new file mode 100644
index 000000000..c57a574d5
--- /dev/null
+++ b/arena/byl.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/yoonh12/byl",
+ "timestamp": "2023-10-01T08:36:20.309716",
+ "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/career-agent.json b/arena/career-agent.json
new file mode 100644
index 000000000..ba2877abf
--- /dev/null
+++ b/arena/career-agent.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/asifdotpy/CareerGPT", "timestamp": "2023-10-23T09:20:51Z", "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/caud.json b/arena/caud.json
new file mode 100644
index 000000000..63dcaeef4
--- /dev/null
+++ b/arena/caud.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Swiftyos/CAUD",
+ "timestamp": "2023-10-07T15:44:40.526955",
+ "commit_hash_to_benchmark": "7a33af387e6959506eb8f01b49d296defe587e6d",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/ccace.json b/arena/ccace.json
new file mode 100644
index 000000000..ae1628cd8
--- /dev/null
+++ b/arena/ccace.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/ccsnow127/AutoGPT",
+ "timestamp": "2023-10-23T08:28:38.119283",
+ "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/chappigpt.json b/arena/chappigpt.json
new file mode 100644
index 000000000..a136db128
--- /dev/null
+++ b/arena/chappigpt.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Wiradjuri/chappi.git",
+ "timestamp": "2023-10-08T06:20:43.527806",
+ "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/chappyAi.json b/arena/chappyAi.json
new file mode 100644
index 000000000..3da98b8c7
--- /dev/null
+++ b/arena/chappyAi.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Wiradjuri/chappi.git",
+ "timestamp": "2023-10-08T06:50:59.175273",
+ "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/chatgpt_taller.json b/arena/chatgpt_taller.json
new file mode 100644
index 000000000..996c78970
--- /dev/null
+++ b/arena/chatgpt_taller.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/leobusar/AutoGPT",
+ "timestamp": "2023-10-10T04:06:42.480712",
+ "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/chengshu.json b/arena/chengshu.json
new file mode 100644
index 000000000..e4cffdb81
--- /dev/null
+++ b/arena/chengshu.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/skadai/AutoGPT",
+ "timestamp": "2023-10-26T06:54:04.511066",
+ "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/chenzo.json b/arena/chenzo.json
new file mode 100644
index 000000000..9717e91a7
--- /dev/null
+++ b/arena/chenzo.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/chenzino/AutoGPT",
+ "timestamp": "2023-10-05T00:25:37.141373",
+ "commit_hash_to_benchmark": "7f89b8aae8748bc88b29ca94c3604ba540bbef94",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/cislerk2.json b/arena/cislerk2.json
new file mode 100644
index 000000000..3d4c9dd10
--- /dev/null
+++ b/arena/cislerk2.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/cislerk/AutoGPT",
+ "timestamp": "2023-10-10T21:05:38.064647",
+ "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/codebutler.json b/arena/codebutler.json
new file mode 100644
index 000000000..c42fae827
--- /dev/null
+++ b/arena/codebutler.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/AJV009/AutoGPT", "timestamp": "2023-10-26T05:03:09Z", "commit_hash_to_benchmark": "03a95a5333db52ac5b129306e47423b638d649b0", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/coder_first.json b/arena/coder_first.json
new file mode 100644
index 000000000..5e8048a32
--- /dev/null
+++ b/arena/coder_first.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/mtx-light/AutoGPT",
+ "timestamp": "2023-10-29T07:22:26.774555",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/contentstrategy.json b/arena/contentstrategy.json
new file mode 100644
index 000000000..d1b9dd5af
--- /dev/null
+++ b/arena/contentstrategy.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/banderson12/AutoGPT", "timestamp": "2023-10-21T04:13:13Z", "commit_hash_to_benchmark": "415b4ceed1417d0b21d87d7d4ea0cd38943e264f", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/cssupdater.json b/arena/cssupdater.json
new file mode 100644
index 000000000..91959adcb
--- /dev/null
+++ b/arena/cssupdater.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/mplummeridge/AutoGPT",
+ "timestamp": "2023-10-24T01:25:47.059251",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/da-agent.json b/arena/da-agent.json
new file mode 100644
index 000000000..78bce3e7e
--- /dev/null
+++ b/arena/da-agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/rayzh-lab/AutoGPT",
+ "timestamp": "2023-10-12T13:37:26.964846",
+ "commit_hash_to_benchmark": "766796ae1e8c07cf2a03b607621c3da6e1f01a31",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/date-buffer.json b/arena/date-buffer.json
new file mode 100644
index 000000000..ea91442b8
--- /dev/null
+++ b/arena/date-buffer.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/jackbullen/AutoGPT",
+ "timestamp": "2023-10-14T03:55:27.817045",
+ "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/davidtest1.json b/arena/davidtest1.json
new file mode 100644
index 000000000..fbaa94451
--- /dev/null
+++ b/arena/davidtest1.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/ningzero/AutoGPTTest",
+ "timestamp": "2023-11-01T10:08:15.790059",
+ "commit_hash_to_benchmark": "bc61ea35b5a52cc948657aac0ed8fc3f3191ec04",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/davidtestagent.json b/arena/davidtestagent.json
new file mode 100644
index 000000000..0fd27d2b5
--- /dev/null
+++ b/arena/davidtestagent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/ningzero/AutoGPTTest",
+ "timestamp": "2023-11-01T09:29:35.474709",
+ "commit_hash_to_benchmark": "bc61ea35b5a52cc948657aac0ed8fc3f3191ec04",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/dda.json b/arena/dda.json
new file mode 100644
index 000000000..3f628dd87
--- /dev/null
+++ b/arena/dda.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/aiherrera1/AutoGPT",
+ "timestamp": "2023-10-15T18:03:04.765167",
+ "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/decision-maker.json b/arena/decision-maker.json
new file mode 100644
index 000000000..623522fe2
--- /dev/null
+++ b/arena/decision-maker.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/vishnub1626/AutoGPT",
+ "timestamp": "2023-09-28T11:33:39.045838",
+ "commit_hash_to_benchmark": "4f15b1c5825b3f044c901995e3399d4eacf7ec66",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/dev_agent.json b/arena/dev_agent.json
new file mode 100644
index 000000000..25aec8ac7
--- /dev/null
+++ b/arena/dev_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/pedrovvitor/AutoGPT",
+ "timestamp": "2023-10-15T14:25:07.534330",
+ "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/devagent.json b/arena/devagent.json
new file mode 100644
index 000000000..f65809e14
--- /dev/null
+++ b/arena/devagent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/w6m6/kkgpt",
+ "timestamp": "2023-10-20T08:29:25.708364",
+ "commit_hash_to_benchmark": "052802ff8d9354f23620eb8b6a5fd68cda7e5c0e",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/dive2code.json b/arena/dive2code.json
new file mode 100644
index 000000000..2280c1bef
--- /dev/null
+++ b/arena/dive2code.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/qwdqwqdwqd/autogpt", "timestamp": "2023-10-25T17:55:18Z", "commit_hash_to_benchmark": "c8d239ef6492d7fe30c099909e01a2eede678b70", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/dndagent.json b/arena/dndagent.json
new file mode 100644
index 000000000..9617293db
--- /dev/null
+++ b/arena/dndagent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/xSudoNymx/AutoGPT",
+ "timestamp": "2023-10-13T04:48:12.424344",
+ "commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/dy_agent.json b/arena/dy_agent.json
new file mode 100644
index 000000000..fd5c981b1
--- /dev/null
+++ b/arena/dy_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/dyabel/AutoGPT",
+ "timestamp": "2023-09-24T07:25:55.818276",
+ "commit_hash_to_benchmark": "a09d2a581f7b435ea55aa32a5fc7bbb093f4d021",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/dy_agent2.json b/arena/dy_agent2.json
new file mode 100644
index 000000000..c6ae45ee6
--- /dev/null
+++ b/arena/dy_agent2.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/dyabel/AutoGPT",
+ "timestamp": "2023-09-24T09:30:13.885689",
+ "commit_hash_to_benchmark": "a09d2a581f7b435ea55aa32a5fc7bbb093f4d021",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/easn.json b/arena/easn.json
new file mode 100644
index 000000000..c7ba6bcad
--- /dev/null
+++ b/arena/easn.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/qazwsxdshb/AutoGPT",
+ "timestamp": "2023-10-21T08:00:39.287093",
+ "commit_hash_to_benchmark": "415b4ceed1417d0b21d87d7d4ea0cd38943e264f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/eddy.json b/arena/eddy.json
new file mode 100644
index 000000000..12e625b4c
--- /dev/null
+++ b/arena/eddy.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/ltxctdbnn/AutoGPT",
+ "timestamp": "2023-10-17T08:42:59.396592",
+ "commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/ekc911_agent.json b/arena/ekc911_agent.json
new file mode 100644
index 000000000..f755e78ea
--- /dev/null
+++ b/arena/ekc911_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/seacrest/ekc911GPT.git",
+ "timestamp": "2023-10-05T03:09:36.845932",
+ "commit_hash_to_benchmark": "73ef89e03a719ec1b2f01b0f04e9b1f64ffb2a7d",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/engineer.json b/arena/engineer.json
new file mode 100644
index 000000000..ef0a2f12e
--- /dev/null
+++ b/arena/engineer.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/bary12/AutoGPT",
+ "timestamp": "2023-10-18T07:21:47.127207",
+ "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/evlyn.json b/arena/evlyn.json
new file mode 100644
index 000000000..115c41e11
--- /dev/null
+++ b/arena/evlyn.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/TimothyZhang/AutoGPT",
+ "timestamp": "2023-09-26T04:13:50.107902",
+ "commit_hash_to_benchmark": "e8aae7731919ee37444fd0871d05bff38f03ab66",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/evo-ninja.json b/arena/evo-ninja.json
new file mode 100644
index 000000000..e7ec02cb9
--- /dev/null
+++ b/arena/evo-ninja.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/polywrap/evo.ninja",
+ "timestamp": "2023-10-26T09:05:21.013962",
+ "commit_hash_to_benchmark": "8832a1008607ab8a27de81fbea69bc73c3febb6f",
+ "branch_to_benchmark": "dev"
+} \ No newline at end of file
diff --git a/arena/evo.json b/arena/evo.json
new file mode 100644
index 000000000..48de830fe
--- /dev/null
+++ b/arena/evo.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/abdllahdev/evo",
+ "timestamp": "2023-09-24T04:36:48.363989",
+ "commit_hash_to_benchmark": "075529ddc9cbca45ff98f0701baed9b89a712c23",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/faran.json b/arena/faran.json
new file mode 100644
index 000000000..d67d39544
--- /dev/null
+++ b/arena/faran.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/faranbutt/AutoGPT",
+ "timestamp": "2023-10-03T11:37:15.047378",
+ "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/first-agent.json b/arena/first-agent.json
new file mode 100644
index 000000000..34eb08d44
--- /dev/null
+++ b/arena/first-agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/DG1202/AutoGPT.git",
+ "timestamp": "2023-10-22T15:08:00.869208",
+ "commit_hash_to_benchmark": "16e266c65fb4620a1b1397532c503fa426ec191d",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/foobar.json b/arena/foobar.json
new file mode 100644
index 000000000..e50206676
--- /dev/null
+++ b/arena/foobar.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/sosthoff/AutoGPT",
+ "timestamp": "2023-10-07T17:23:59.763991",
+ "commit_hash_to_benchmark": "a00d880a3fd62373f53a0b0a45c9dcfdb45968e4",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/frankgarcia.json b/arena/frankgarcia.json
new file mode 100644
index 000000000..b02dd557d
--- /dev/null
+++ b/arena/frankgarcia.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/variavista/AutoGPT",
+ "timestamp": "2023-09-28T07:03:33.140557",
+ "commit_hash_to_benchmark": "a555e936c48bca8c794c7116d62a91628e59ac14",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/fritzgpt.json b/arena/fritzgpt.json
new file mode 100644
index 000000000..e9bae34ff
--- /dev/null
+++ b/arena/fritzgpt.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/bsenst/FritzGPT", "timestamp": "2023-10-07T11:54:36Z", "commit_hash_to_benchmark": "bb960ffb9fadc45fe4fb5277053caa831f196578", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/fst.json b/arena/fst.json
new file mode 100644
index 000000000..97216c4dd
--- /dev/null
+++ b/arena/fst.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/onewesong/AutoGPT",
+ "timestamp": "2023-10-10T07:04:45.268630",
+ "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/fuzz_gen.json b/arena/fuzz_gen.json
new file mode 100644
index 000000000..87273ae48
--- /dev/null
+++ b/arena/fuzz_gen.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/stplaydog/AutoGPT",
+ "timestamp": "2023-09-29T16:15:33.360163",
+ "commit_hash_to_benchmark": "76c321d6b1a3c6ed938c90149a2954b7dade761a",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/gaby_agent.json b/arena/gaby_agent.json
new file mode 100644
index 000000000..7b57d6dae
--- /dev/null
+++ b/arena/gaby_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://ggonza156: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@github.com/ggonza156/AutoGPT",
+ "timestamp": "2023-10-21T23:52:39.199690",
+ "commit_hash_to_benchmark": "eda21d51921899756bf866cf5c4d0f2dcd3e2e23",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/gen_fuzz.json b/arena/gen_fuzz.json
new file mode 100644
index 000000000..c6486156c
--- /dev/null
+++ b/arena/gen_fuzz.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/stplaydog/AutoGPT",
+ "timestamp": "2023-09-29T17:45:56.921760",
+ "commit_hash_to_benchmark": "76c321d6b1a3c6ed938c90149a2954b7dade761a",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/ghostcoder.json b/arena/ghostcoder.json
new file mode 100644
index 000000000..738061238
--- /dev/null
+++ b/arena/ghostcoder.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/aorwall/AutoGPT", "timestamp": "2023-10-26T07:02:18Z", "commit_hash_to_benchmark": "580b4467851b879ef6ce369128e8c7a0399f8877", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/gipity.json b/arena/gipity.json
new file mode 100644
index 000000000..84d2d893e
--- /dev/null
+++ b/arena/gipity.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/koad/gpt",
+ "timestamp": "2023-10-02T19:47:45.668048",
+ "commit_hash_to_benchmark": "163ab75379e1ee7792f50d4d70a1f482ca9cb6a1",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/gpt-dev-engineer-agent.json b/arena/gpt-dev-engineer-agent.json
new file mode 100644
index 000000000..080c9ab04
--- /dev/null
+++ b/arena/gpt-dev-engineer-agent.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/ATheorell/AutoGPTArenaHack", "timestamp": "2023-10-26T09:33:03Z", "commit_hash_to_benchmark": "1e4f2dc004b92b9f236543674f94fb9f0af9bb2e", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/gpt-eng-forge.json b/arena/gpt-eng-forge.json
new file mode 100644
index 000000000..348120b3a
--- /dev/null
+++ b/arena/gpt-eng-forge.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/pbharrin/AutoGPT",
+ "timestamp": "2023-09-26T17:55:18.530567",
+ "commit_hash_to_benchmark": "a09d2a581f7b435ea55aa32a5fc7bbb093f4d021",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/gpt-engineer.json b/arena/gpt-engineer.json
new file mode 100644
index 000000000..080c9ab04
--- /dev/null
+++ b/arena/gpt-engineer.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/ATheorell/AutoGPTArenaHack", "timestamp": "2023-10-26T09:33:03Z", "commit_hash_to_benchmark": "1e4f2dc004b92b9f236543674f94fb9f0af9bb2e", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/gpt_for_beans.json b/arena/gpt_for_beans.json
new file mode 100644
index 000000000..5f9e89282
--- /dev/null
+++ b/arena/gpt_for_beans.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/xiazaiba7/AutoGPT.git",
+ "timestamp": "2023-11-02T06:07:34.435957",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/hall_oto.json b/arena/hall_oto.json
new file mode 100644
index 000000000..09928183c
--- /dev/null
+++ b/arena/hall_oto.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/helloworld4774/AutoGPT.git",
+ "timestamp": "2023-10-01T17:47:00.644268",
+ "commit_hash_to_benchmark": "26cf7c2e3f7b8f61ecda9e301f7a4b36f2b14f2f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/han.json b/arena/han.json
new file mode 100644
index 000000000..8cf8cb54c
--- /dev/null
+++ b/arena/han.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Hanhan0831/AutoGPT",
+ "timestamp": "2023-10-14T01:01:58.300995",
+ "commit_hash_to_benchmark": "93e3ec36ed6cd9e5e60585f016ad3bef4e1c52cb",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/happy_guy.json b/arena/happy_guy.json
new file mode 100644
index 000000000..d1df91da3
--- /dev/null
+++ b/arena/happy_guy.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/jianglonghui/AutoGPT",
+ "timestamp": "2023-11-03T08:54:39.949387",
+ "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/hello.json b/arena/hello.json
new file mode 100644
index 000000000..44d8836c8
--- /dev/null
+++ b/arena/hello.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/ldnvnbl/AutoGPT",
+ "timestamp": "2023-10-20T09:37:16.860422",
+ "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/hodri.json b/arena/hodri.json
new file mode 100644
index 000000000..32e489bfc
--- /dev/null
+++ b/arena/hodri.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/bilisim1995/AutoGPT",
+ "timestamp": "2023-10-27T10:51:20.447157",
+ "commit_hash_to_benchmark": "f4985395a94da84b79252bd4d88e040472e1bf6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/houxe.json b/arena/houxe.json
new file mode 100644
index 000000000..ab5a7072c
--- /dev/null
+++ b/arena/houxe.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/jiarung/AutoGPTTest",
+ "timestamp": "2023-10-30T08:30:59.320850",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/icode.json b/arena/icode.json
new file mode 100644
index 000000000..d71f8df81
--- /dev/null
+++ b/arena/icode.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/krishnaji/AutoGPT",
+ "timestamp": "2023-10-13T01:09:31.395541",
+ "commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/iku2.json b/arena/iku2.json
new file mode 100644
index 000000000..63b33adfb
--- /dev/null
+++ b/arena/iku2.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/IkuOhama/AutoGPT",
+ "timestamp": "2023-09-27T22:46:33.754238",
+ "commit_hash_to_benchmark": "793ff1c163bb0f9bd3e0c788b4978b8dc193ba6a",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/illynet.json b/arena/illynet.json
new file mode 100644
index 000000000..269222fc6
--- /dev/null
+++ b/arena/illynet.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/illyx1/AutoGPT.git",
+ "timestamp": "2023-10-26T06:51:32.589776",
+ "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/illynetV2.json b/arena/illynetV2.json
new file mode 100644
index 000000000..005672b39
--- /dev/null
+++ b/arena/illynetV2.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/illyx1/AutoGPT.git",
+ "timestamp": "2023-10-26T13:14:45.725000",
+ "commit_hash_to_benchmark": "19175badeefc1325f3fa1a7797ddcfb913c23076",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/illyx1.json b/arena/illyx1.json
new file mode 100644
index 000000000..9cedd5c60
--- /dev/null
+++ b/arena/illyx1.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/illyx1/AutoGPT.git",
+ "timestamp": "2023-10-26T12:36:26.810636",
+ "commit_hash_to_benchmark": "19175badeefc1325f3fa1a7797ddcfb913c23076",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/info-retrieval.json b/arena/info-retrieval.json
new file mode 100644
index 000000000..1aa51aac7
--- /dev/null
+++ b/arena/info-retrieval.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/paperMoose/AutoGPT",
+ "timestamp": "2023-10-07T21:38:11.070180",
+ "commit_hash_to_benchmark": "a00d880a3fd62373f53a0b0a45c9dcfdb45968e4",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/ivangpt_agent.json b/arena/ivangpt_agent.json
new file mode 100644
index 000000000..edf940b22
--- /dev/null
+++ b/arena/ivangpt_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/ivanliu1989/Auto-GPT",
+ "timestamp": "2023-10-29T11:24:30.873532",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/jarvis2.json b/arena/jarvis2.json
new file mode 100644
index 000000000..c628f8f54
--- /dev/null
+++ b/arena/jarvis2.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/MissingDLL/AutoGPT",
+ "timestamp": "2023-10-08T15:23:46.256775",
+ "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/jarvis3.json b/arena/jarvis3.json
new file mode 100644
index 000000000..c54000f16
--- /dev/null
+++ b/arena/jarvis3.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/MissingDLL/AutoGPT",
+ "timestamp": "2023-10-08T15:58:33.790030",
+ "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/jaxbob1.json b/arena/jaxbob1.json
new file mode 100644
index 000000000..db115ceb2
--- /dev/null
+++ b/arena/jaxbob1.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/redthing1/AutoGPT",
+ "timestamp": "2023-10-05T20:02:22.372414",
+ "commit_hash_to_benchmark": "3b7d83a1a6d3fef1d415bfd1d4ba32ca1ba797cc",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/job_apply.json b/arena/job_apply.json
new file mode 100644
index 000000000..afbeed4e9
--- /dev/null
+++ b/arena/job_apply.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/tkbeili/AutoGPT",
+ "timestamp": "2023-10-01T04:49:20.239338",
+ "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/jonesyboi.json b/arena/jonesyboi.json
new file mode 100644
index 000000000..93b617c17
--- /dev/null
+++ b/arena/jonesyboi.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/neilrjones/AutoGPT",
+ "timestamp": "2023-10-18T02:39:02.039894",
+ "commit_hash_to_benchmark": "d173dd772dfbcce1b75148271857092bc8c22b5c",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/justwondering.json b/arena/justwondering.json
new file mode 100644
index 000000000..0d27545a7
--- /dev/null
+++ b/arena/justwondering.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/tbxy09/JustWondering", "timestamp": "2023-10-26T09:48:15Z", "commit_hash_to_benchmark": "b52fea9ba7510adb8c1e7e5cfb83f5fa181d73cf", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/kingmitch.json b/arena/kingmitch.json
new file mode 100644
index 000000000..304ea0521
--- /dev/null
+++ b/arena/kingmitch.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/mitch11223/AutoGPT.git",
+ "timestamp": "2023-10-20T17:15:31.044252",
+ "commit_hash_to_benchmark": "825c3adf62879fa9f91a19c11010336de5c98bfc",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/lawk.json b/arena/lawk.json
new file mode 100644
index 000000000..09d5cab74
--- /dev/null
+++ b/arena/lawk.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/xiaolongtuan-yuan/AutoGPT",
+ "timestamp": "2023-10-26T06:18:01.049166",
+ "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/lcdegpt.json b/arena/lcdegpt.json
new file mode 100644
index 000000000..637e1e1fa
--- /dev/null
+++ b/arena/lcdegpt.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/tablc/lcdegpt",
+ "timestamp": "2023-10-17T07:00:24.125505",
+ "commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/letst.json b/arena/letst.json
new file mode 100644
index 000000000..0a0d582af
--- /dev/null
+++ b/arena/letst.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/jianhuanggo/AutoTestTest",
+ "timestamp": "2023-10-16T19:07:43.009481",
+ "commit_hash_to_benchmark": "546e08a5cf2413fcfb857e2c41d21c80c3364218",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/letstest.json b/arena/letstest.json
new file mode 100644
index 000000000..5862da1a7
--- /dev/null
+++ b/arena/letstest.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/jianhuanggo/AutoTestTest",
+ "timestamp": "2023-10-16T18:38:28.787259",
+ "commit_hash_to_benchmark": "546e08a5cf2413fcfb857e2c41d21c80c3364218",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/lilAgent.json b/arena/lilAgent.json
new file mode 100644
index 000000000..cbd9f2fb0
--- /dev/null
+++ b/arena/lilAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/mvuthegoat/AutoGPT.git",
+ "timestamp": "2023-10-29T17:17:08.476300",
+ "commit_hash_to_benchmark": "9a30e0f9a43fe05005e36f0bad8531e3a92fd9e6",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/linggong.json b/arena/linggong.json
new file mode 100644
index 000000000..c89fd2fe4
--- /dev/null
+++ b/arena/linggong.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/linggong2023/AutoGPT",
+ "timestamp": "2023-10-24T12:40:35.679665",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/liuzh.json b/arena/liuzh.json
new file mode 100644
index 000000000..5b95e218d
--- /dev/null
+++ b/arena/liuzh.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Hanzhang-lang/AutoGPT_zh",
+ "timestamp": "2023-10-24T10:25:02.790189",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/ltzAgent.json b/arena/ltzAgent.json
new file mode 100644
index 000000000..59635f03c
--- /dev/null
+++ b/arena/ltzAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/ltzmaxwell/AutoGPT",
+ "timestamp": "2023-10-25T08:58:41.646491",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/martingpt.json b/arena/martingpt.json
new file mode 100644
index 000000000..849f42003
--- /dev/null
+++ b/arena/martingpt.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/martinpeng/AutoGPT",
+ "timestamp": "2023-10-18T05:30:19.072793",
+ "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/medical-agent.json b/arena/medical-agent.json
new file mode 100644
index 000000000..47e0a6a08
--- /dev/null
+++ b/arena/medical-agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/daviddhc20120601/AutoGPT",
+ "timestamp": "2023-11-02T02:08:34.264727",
+ "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/metware.json b/arena/metware.json
new file mode 100644
index 000000000..8f433581c
--- /dev/null
+++ b/arena/metware.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/chenxuya/AutoGPT",
+ "timestamp": "2023-10-23T02:23:48.775561",
+ "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/miniAgent.json b/arena/miniAgent.json
new file mode 100644
index 000000000..ad71b21b9
--- /dev/null
+++ b/arena/miniAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/bigzz/AutoGPT",
+ "timestamp": "2023-10-23T02:41:41.828607",
+ "commit_hash_to_benchmark": "1a30d00194b46f8b923bab191404ce9123e34bdf",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/minister_agent.json b/arena/minister_agent.json
new file mode 100644
index 000000000..b66f0b76a
--- /dev/null
+++ b/arena/minister_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/miandai/AutoGPT",
+ "timestamp": "2023-10-25T11:58:34.781500",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/misslu.json b/arena/misslu.json
new file mode 100644
index 000000000..21dc02a45
--- /dev/null
+++ b/arena/misslu.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/JasonZhang95/AutoGPT",
+ "timestamp": "2023-10-02T11:37:30.488121",
+ "commit_hash_to_benchmark": "062d286c239dc863ede4ad475d7348698722f5fa",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/mljar-agent.json b/arena/mljar-agent.json
new file mode 100644
index 000000000..70c2e7b6d
--- /dev/null
+++ b/arena/mljar-agent.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/mljar/mljar-agent", "timestamp": "2023-10-25T14:04:51Z", "commit_hash_to_benchmark": "2fbc4d6ef48f0201c046b649e7bc74b9d11ae4e5", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/momo.json b/arena/momo.json
new file mode 100644
index 000000000..b2045bccc
--- /dev/null
+++ b/arena/momo.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/UICJohn/AutoGPT", "timestamp": "2023-10-19T09:52:19Z", "commit_hash_to_benchmark": "3aa92c082ac6912b45583b39d59a13cfda665322", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/monthly_summary.json b/arena/monthly_summary.json
new file mode 100644
index 000000000..3f222a61e
--- /dev/null
+++ b/arena/monthly_summary.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/corpetty/AutoGPT",
+ "timestamp": "2023-09-26T19:43:56.005780",
+ "commit_hash_to_benchmark": "cf630e4f2cee04fd935612f95308322cd9eb1df7",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/mrSabelotodo.json b/arena/mrSabelotodo.json
new file mode 100644
index 000000000..4d8a49f6c
--- /dev/null
+++ b/arena/mrSabelotodo.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/joslangarica/AutoGPT.git",
+ "timestamp": "2023-10-03T01:11:32.290733",
+ "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/myGPT.json b/arena/myGPT.json
new file mode 100644
index 000000000..f5592ec06
--- /dev/null
+++ b/arena/myGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Tianxu-Jia/AutoGPT.git",
+ "timestamp": "2023-10-03T10:59:48.149445",
+ "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/my_AutoGPT.json b/arena/my_AutoGPT.json
new file mode 100644
index 000000000..2b48e64bd
--- /dev/null
+++ b/arena/my_AutoGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Tianxu-Jia/AutoGPT.git",
+ "timestamp": "2023-10-03T08:57:28.681756",
+ "commit_hash_to_benchmark": "949ab477a87cfb7a3668d7961e9443922081e098",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/my_fx_agent.json b/arena/my_fx_agent.json
new file mode 100644
index 000000000..314e63482
--- /dev/null
+++ b/arena/my_fx_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Significant-Gravitas/AutoGPT.git",
+ "timestamp": "2023-10-18T07:09:36.565783",
+ "commit_hash_to_benchmark": "e9b64adae9fce180a392c726457e150177e746fb",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/my_gpt.json b/arena/my_gpt.json
new file mode 100644
index 000000000..2eb700672
--- /dev/null
+++ b/arena/my_gpt.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/dawnchen123/AutoGPT",
+ "timestamp": "2023-11-01T02:08:06.032041",
+ "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/mygent.json b/arena/mygent.json
new file mode 100644
index 000000000..5eda9ff63
--- /dev/null
+++ b/arena/mygent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/prashanthi-instalily/AutoGPT",
+ "timestamp": "2023-10-24T13:31:28.287257",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/nawalj.json b/arena/nawalj.json
new file mode 100644
index 000000000..0506380f1
--- /dev/null
+++ b/arena/nawalj.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/nawaljunaid/AutoGPT.git",
+ "timestamp": "2023-10-03T18:41:12.930097",
+ "commit_hash_to_benchmark": "3374fd181852d489e51ee33a25d12a064a0bb55d",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/newAgent.json b/arena/newAgent.json
new file mode 100644
index 000000000..9ace7df0a
--- /dev/null
+++ b/arena/newAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/alexsoshnikov/AutoGPT",
+ "timestamp": "2023-10-10T09:27:10.249840",
+ "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/northfork.json b/arena/northfork.json
new file mode 100644
index 000000000..0b5076ce7
--- /dev/null
+++ b/arena/northfork.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Sewen/AutoGPT",
+ "timestamp": "2023-09-26T07:18:29.975526",
+ "commit_hash_to_benchmark": "3d4307a848880c8509e8356bbb9146f0e6f917f4",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/od_agent_1.json b/arena/od_agent_1.json
new file mode 100644
index 000000000..068becf68
--- /dev/null
+++ b/arena/od_agent_1.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/mattsinnock/AutoGPT",
+ "timestamp": "2023-10-05T01:13:15.930770",
+ "commit_hash_to_benchmark": "73ef89e03a719ec1b2f01b0f04e9b1f64ffb2a7d",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/operationAgent.json b/arena/operationAgent.json
new file mode 100644
index 000000000..f4587aaa0
--- /dev/null
+++ b/arena/operationAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/KMing-L/AutoGPT",
+ "timestamp": "2023-10-09T02:21:56.002832",
+ "commit_hash_to_benchmark": "2d865cc9e6d0b3c7f10777849adf9492b6400904",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/personal-al-website.json b/arena/personal-al-website.json
new file mode 100644
index 000000000..905ae4ade
--- /dev/null
+++ b/arena/personal-al-website.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Hazzari/AutoGPT",
+ "timestamp": "2023-10-01T11:59:23.504561",
+ "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/piGPT.json b/arena/piGPT.json
new file mode 100644
index 000000000..a01cb6c4a
--- /dev/null
+++ b/arena/piGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/pihanya/AutoGPT",
+ "timestamp": "2023-10-06T20:37:37.445255",
+ "commit_hash_to_benchmark": "abf88fe5097770b1da3383a19208b5a23e2371f3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/pipeline.json b/arena/pipeline.json
new file mode 100644
index 000000000..4ce4eed21
--- /dev/null
+++ b/arena/pipeline.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/geesugar/AutoGPT",
+ "timestamp": "2023-09-26T04:52:08.379642",
+ "commit_hash_to_benchmark": "075529ddc9cbca45ff98f0701baed9b89a712c23",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/podcast_agent.json b/arena/podcast_agent.json
new file mode 100644
index 000000000..6b7487bfa
--- /dev/null
+++ b/arena/podcast_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/aliumujib/AutoGPT",
+ "timestamp": "2023-10-28T06:03:18.488676",
+ "commit_hash_to_benchmark": "2bd05827f97e471af798b8c2f04e8772dad101d3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/potato.json b/arena/potato.json
new file mode 100644
index 000000000..c78cec9d6
--- /dev/null
+++ b/arena/potato.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/volkov/AutoGPT", "timestamp": "2023-10-23T05:24:11Z", "commit_hash_to_benchmark": "7d2532c1814d624725e7a1fce8831dc0def27fb8", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/project_assitant.json b/arena/project_assitant.json
new file mode 100644
index 000000000..239a7c921
--- /dev/null
+++ b/arena/project_assitant.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/milog1994/AutoGPT.git",
+ "timestamp": "2023-10-30T21:08:25.083221",
+ "commit_hash_to_benchmark": "d9fbd26b8563e5f59d705623bae0d5cf9c9499c7",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/project_master.json b/arena/project_master.json
new file mode 100644
index 000000000..79e0f5a23
--- /dev/null
+++ b/arena/project_master.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/milog1994/AutoGPT.git",
+ "timestamp": "2023-10-30T21:14:18.974130",
+ "commit_hash_to_benchmark": "d9fbd26b8563e5f59d705623bae0d5cf9c9499c7",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/project_review.json b/arena/project_review.json
new file mode 100644
index 000000000..e5889d49a
--- /dev/null
+++ b/arena/project_review.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/oneforce/AutoGPT",
+ "timestamp": "2023-10-24T09:51:05.658251",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/prometheus.json b/arena/prometheus.json
new file mode 100644
index 000000000..bcd8f6660
--- /dev/null
+++ b/arena/prometheus.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/yashrahurikar23/prometheus",
+ "timestamp": "2023-10-04T15:21:16.474459",
+ "commit_hash_to_benchmark": "1bd85cbc09473c0252928fb849ae8373607d6065",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/proudgpt.json b/arena/proudgpt.json
new file mode 100644
index 000000000..383a4a2f8
--- /dev/null
+++ b/arena/proudgpt.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/OmarHory/Star-Agent",
+ "timestamp": "2023-10-01T22:11:15.978902",
+ "commit_hash_to_benchmark": "8252a2fa8fee852a22093bf7fd8755f86c6b0ad5",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/qinghu3.json b/arena/qinghu3.json
new file mode 100644
index 000000000..06b4a4d94
--- /dev/null
+++ b/arena/qinghu3.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/QingHu1227/AutoGPT.git",
+ "timestamp": "2023-11-06T04:11:34.227212",
+ "commit_hash_to_benchmark": "a1d60878141116641ea864ef6de7ca6142e9534c",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/ra.json b/arena/ra.json
new file mode 100644
index 000000000..b29e96cec
--- /dev/null
+++ b/arena/ra.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/aramfaghfouri/AutoGPT",
+ "timestamp": "2023-10-23T18:03:39.069151",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/ra1.json b/arena/ra1.json
new file mode 100644
index 000000000..4b50158c6
--- /dev/null
+++ b/arena/ra1.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/aramfaghfouri/AutoGPT",
+ "timestamp": "2023-10-23T18:12:20.095032",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/rachael.json b/arena/rachael.json
new file mode 100644
index 000000000..fe57a0c5d
--- /dev/null
+++ b/arena/rachael.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/dotdust/rachael.git",
+ "timestamp": "2023-10-08T13:18:35.946639",
+ "commit_hash_to_benchmark": "e99e9b6181f091a9625ef9b922dac15dd5f0a885",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/raindrop.json b/arena/raindrop.json
new file mode 100644
index 000000000..10decc9c8
--- /dev/null
+++ b/arena/raindrop.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/tianbinraindrop/AutoGPT",
+ "timestamp": "2023-10-01T02:24:57.822495",
+ "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/researchGPT.json b/arena/researchGPT.json
new file mode 100644
index 000000000..3784933f0
--- /dev/null
+++ b/arena/researchGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/gty3310/AutoGPT",
+ "timestamp": "2023-10-09T23:36:29.771968",
+ "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/researchGPT2.json b/arena/researchGPT2.json
new file mode 100644
index 000000000..eadb82df0
--- /dev/null
+++ b/arena/researchGPT2.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/gty3310/AutoGPT",
+ "timestamp": "2023-10-17T15:22:36.628578",
+ "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/research_analyst.json b/arena/research_analyst.json
new file mode 100644
index 000000000..675df1ad8
--- /dev/null
+++ b/arena/research_analyst.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/aramfaghfouri/AutoGPT",
+ "timestamp": "2023-10-23T17:53:54.235178",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/robita.json b/arena/robita.json
new file mode 100644
index 000000000..15f3d44ac
--- /dev/null
+++ b/arena/robita.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/atetsuka/AutoGPT",
+ "timestamp": "2023-10-02T07:16:13.845473",
+ "commit_hash_to_benchmark": "7ec92d8c063fc041eefd9522450e4ef52e5a34da",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/robot.json b/arena/robot.json
new file mode 100644
index 000000000..3f1eded5c
--- /dev/null
+++ b/arena/robot.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/brisklad/AutoGPT",
+ "timestamp": "2023-10-15T13:49:47.384228",
+ "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/searchagent.json b/arena/searchagent.json
new file mode 100644
index 000000000..8136c1345
--- /dev/null
+++ b/arena/searchagent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/sanjeevsaara/AutoGPT",
+ "timestamp": "2023-10-16T00:01:53.051453",
+ "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/set.json b/arena/set.json
new file mode 100644
index 000000000..14efa0819
--- /dev/null
+++ b/arena/set.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/victorrica/AutoGPT",
+ "timestamp": "2023-10-24T05:12:51.971269",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/sgpt.json b/arena/sgpt.json
new file mode 100644
index 000000000..cf2ab22c0
--- /dev/null
+++ b/arena/sgpt.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/ya5has/sgpt",
+ "timestamp": "2023-11-02T05:51:01.446153",
+ "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/shivi.json b/arena/shivi.json
new file mode 100644
index 000000000..e7ed40a85
--- /dev/null
+++ b/arena/shivi.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/kshivang/DabblerGPT",
+ "timestamp": "2023-10-07T01:39:16.601657",
+ "commit_hash_to_benchmark": "b2d53d8d18c754a5b877ffeb9f42d3387c3324fd",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/sky.json b/arena/sky.json
new file mode 100644
index 000000000..49690196d
--- /dev/null
+++ b/arena/sky.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/hmslsky/Auto-GPT",
+ "timestamp": "2023-10-31T15:48:50.123435",
+ "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/smith.json b/arena/smith.json
new file mode 100644
index 000000000..c3bfd5978
--- /dev/null
+++ b/arena/smith.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/kevinboudot/AutoGPT",
+ "timestamp": "2023-10-11T12:25:09.516293",
+ "commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/songyalei.json b/arena/songyalei.json
new file mode 100644
index 000000000..2c3b7dcc3
--- /dev/null
+++ b/arena/songyalei.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/songyalei/AutoGPT",
+ "timestamp": "2023-11-16T07:11:39.746384",
+ "commit_hash_to_benchmark": "fa357dd13928baa4d1e30054bc75edc5d68b08f1",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/sql.json b/arena/sql.json
new file mode 100644
index 000000000..a9b357a80
--- /dev/null
+++ b/arena/sql.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/monsterooo/AutoGPT",
+ "timestamp": "2023-09-26T06:46:35.721082",
+ "commit_hash_to_benchmark": "bec207568a93e38bff971525c53612813aa60730",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/stefan.json b/arena/stefan.json
new file mode 100644
index 000000000..96987be6b
--- /dev/null
+++ b/arena/stefan.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/sutefu23/AutoGPT",
+ "timestamp": "2023-10-21T01:03:06.362579",
+ "commit_hash_to_benchmark": "03e56fece5008d119dd5ae97da57eb4db3d14a1d",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/stockAgent.json b/arena/stockAgent.json
new file mode 100644
index 000000000..b4a9c5d3d
--- /dev/null
+++ b/arena/stockAgent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/SnowYoung/StockAgent",
+ "timestamp": "2023-10-19T09:49:44.372589",
+ "commit_hash_to_benchmark": "f62651ff3f1ece5520916bee7ee441e1949855f9",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/swarms.json b/arena/swarms.json
new file mode 100644
index 000000000..7bd572350
--- /dev/null
+++ b/arena/swarms.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/ZackBradshaw/Auto-Swarms", "timestamp": "2023-10-16T15:03:21Z", "commit_hash_to_benchmark": "96b591c6f0918265e2256cb9c76ca2ff50f3983f", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/tdev.json b/arena/tdev.json
new file mode 100644
index 000000000..68518c814
--- /dev/null
+++ b/arena/tdev.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/miguelcas12/tdev.git",
+ "timestamp": "2023-09-26T17:36:53.829436",
+ "commit_hash_to_benchmark": "cf630e4f2cee04fd935612f95308322cd9eb1df7",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/teacher.json b/arena/teacher.json
new file mode 100644
index 000000000..0e0291c00
--- /dev/null
+++ b/arena/teacher.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/miaowacao/AutoGPT1",
+ "timestamp": "2023-10-16T07:21:48.209351",
+ "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/test-tpk.json b/arena/test-tpk.json
new file mode 100644
index 000000000..87f4f4e2c
--- /dev/null
+++ b/arena/test-tpk.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/seeseesky/AutoGPT",
+ "timestamp": "2023-10-27T04:06:10.599340",
+ "commit_hash_to_benchmark": "21b809794a90cf6f9a6aa41f179f420045becadc",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/test.json b/arena/test.json
new file mode 100644
index 000000000..00b762a09
--- /dev/null
+++ b/arena/test.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Nivek92/AutoGPT",
+ "timestamp": "2023-10-01T15:46:07.871808",
+ "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/test1.json b/arena/test1.json
new file mode 100644
index 000000000..e9f9ff00a
--- /dev/null
+++ b/arena/test1.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/mplummeridge/AutoGPT",
+ "timestamp": "2023-10-24T01:06:24.100385",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/testGPT.json b/arena/testGPT.json
new file mode 100644
index 000000000..f1078ed65
--- /dev/null
+++ b/arena/testGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/PZON2/testGPT",
+ "timestamp": "2023-10-15T12:06:56.373935",
+ "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/thebestagent.json b/arena/thebestagent.json
new file mode 100644
index 000000000..0003b82b6
--- /dev/null
+++ b/arena/thebestagent.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/hisandan/AutoGPT", "timestamp": "2023-10-09T14:10:20Z", "commit_hash_to_benchmark": "da5109b07d94ae3de1b3399ad2be6171b14cb304", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/theone.json b/arena/theone.json
new file mode 100644
index 000000000..0003b82b6
--- /dev/null
+++ b/arena/theone.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/hisandan/AutoGPT", "timestamp": "2023-10-09T14:10:20Z", "commit_hash_to_benchmark": "da5109b07d94ae3de1b3399ad2be6171b14cb304", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/tiffGPT.json b/arena/tiffGPT.json
new file mode 100644
index 000000000..84833b637
--- /dev/null
+++ b/arena/tiffGPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/darkcyber-ninja/AutoGPT",
+ "timestamp": "2023-10-31T18:25:58.281391",
+ "commit_hash_to_benchmark": "c65b71d51d8f849663172c5a128953b4ca92b2b0",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/trend_agent.json b/arena/trend_agent.json
new file mode 100644
index 000000000..ba7d6839c
--- /dev/null
+++ b/arena/trend_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Asmedeus998/AutoGPT.git",
+ "timestamp": "2023-10-01T23:04:42.429686",
+ "commit_hash_to_benchmark": "8252a2fa8fee852a22093bf7fd8755f86c6b0ad5",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/umiuni_agent.json b/arena/umiuni_agent.json
new file mode 100644
index 000000000..0dd76a137
--- /dev/null
+++ b/arena/umiuni_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/umiuni-community/AutoGPT.git",
+ "timestamp": "2023-10-01T11:37:00.284821",
+ "commit_hash_to_benchmark": "a0fba5d1f13d35a1c4a8b7718550677bf62b5101",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/uply.json b/arena/uply.json
new file mode 100644
index 000000000..f3058753e
--- /dev/null
+++ b/arena/uply.json
@@ -0,0 +1 @@
+{"github_repo_url": "https://github.com/uply23333/Uply-GPT", "timestamp": "2023-10-20T00:48:01Z", "commit_hash_to_benchmark": "052802ff8d9354f23620eb8b6a5fd68cda7e5c0e", "branch_to_benchmark": "master"} \ No newline at end of file
diff --git a/arena/url-to-lead.json b/arena/url-to-lead.json
new file mode 100644
index 000000000..f7564d863
--- /dev/null
+++ b/arena/url-to-lead.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/nikolajlovenhardt/AutoGPT",
+ "timestamp": "2023-11-01T15:18:00.402718",
+ "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/v-gpt.json b/arena/v-gpt.json
new file mode 100644
index 000000000..153719457
--- /dev/null
+++ b/arena/v-gpt.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Varun565/AutoGPT",
+ "timestamp": "2023-10-05T03:17:36.972978",
+ "commit_hash_to_benchmark": "3374fd181852d489e51ee33a25d12a064a0bb55d",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/victor2-0.json b/arena/victor2-0.json
new file mode 100644
index 000000000..b984c1bcc
--- /dev/null
+++ b/arena/victor2-0.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/victorleduc/AutoGPT",
+ "timestamp": "2023-10-23T23:35:53.044545",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/web_developer.json b/arena/web_developer.json
new file mode 100644
index 000000000..7f1f9c4af
--- /dev/null
+++ b/arena/web_developer.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/paul726/AutoGPT",
+ "timestamp": "2023-10-15T13:36:03.387061",
+ "commit_hash_to_benchmark": "74ee69daf1c0a2603f19bdb1edcfdf1f4e06bcff",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/webagent.json b/arena/webagent.json
new file mode 100644
index 000000000..f1bccc9f7
--- /dev/null
+++ b/arena/webagent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/ddNTP/myagent.git",
+ "timestamp": "2023-09-20T11:21:05.331950",
+ "commit_hash_to_benchmark": "377d0af228bad019be0a9743c2824c033e039654",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/webgeek.json b/arena/webgeek.json
new file mode 100644
index 000000000..33789db6b
--- /dev/null
+++ b/arena/webgeek.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/webgeeksai/AutoGPT.git",
+ "timestamp": "2023-10-13T06:22:22.056151",
+ "commit_hash_to_benchmark": "38790a27ed2c1b63a301b6a67e7590f2d30de53e",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/wedding-planner.json b/arena/wedding-planner.json
new file mode 100644
index 000000000..b2acfa686
--- /dev/null
+++ b/arena/wedding-planner.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/mogronalol/AutoGPT",
+ "timestamp": "2023-10-08T20:31:43.422977",
+ "commit_hash_to_benchmark": "b52aba4ef545add8fb6c7f8009615cb38e24db80",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/woohoo_agent.json b/arena/woohoo_agent.json
new file mode 100644
index 000000000..a805c3498
--- /dev/null
+++ b/arena/woohoo_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/FIresInWind/AutoGPT",
+ "timestamp": "2023-10-19T15:14:59.786203",
+ "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/wyjagent.json b/arena/wyjagent.json
new file mode 100644
index 000000000..e96772536
--- /dev/null
+++ b/arena/wyjagent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/wangyijunlyy/AutoGPT",
+ "timestamp": "2023-11-03T09:21:36.143887",
+ "commit_hash_to_benchmark": "d9ec0ac3ad7b48eb44e6403e88d2dc5696fd4950",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/xmly.json b/arena/xmly.json
new file mode 100644
index 000000000..23cf046e5
--- /dev/null
+++ b/arena/xmly.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/dongdaoguang/AutoGPT",
+ "timestamp": "2023-10-11T06:30:06.866694",
+ "commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/xq_agent.json b/arena/xq_agent.json
new file mode 100644
index 000000000..cccf5586b
--- /dev/null
+++ b/arena/xq_agent.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/emptykid/AutoGPT",
+ "timestamp": "2023-10-24T10:37:55.170776",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/xt0m-GPT.json b/arena/xt0m-GPT.json
new file mode 100644
index 000000000..130bbae2f
--- /dev/null
+++ b/arena/xt0m-GPT.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/jcartes/xt0m-GPT",
+ "timestamp": "2023-10-15T01:31:05.785913",
+ "commit_hash_to_benchmark": "57bcbdf45c6c1493a4e5f6a4e72594ea13c10f93",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/xtest.json b/arena/xtest.json
new file mode 100644
index 000000000..e189babe3
--- /dev/null
+++ b/arena/xtest.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/popperxu/AutoGPT",
+ "timestamp": "2023-10-31T06:25:36.338549",
+ "commit_hash_to_benchmark": "c3569d1842e6568ab1327e577603e71ad1feb622",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/yarbis.json b/arena/yarbis.json
new file mode 100644
index 000000000..65d6c50f2
--- /dev/null
+++ b/arena/yarbis.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/sintecba/AutoGPT",
+ "timestamp": "2023-10-10T18:11:07.473738",
+ "commit_hash_to_benchmark": "c77ade5b2f62c5373fc7573e5c45581f003c77a3",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/zaheer.json b/arena/zaheer.json
new file mode 100644
index 000000000..01e4e72c8
--- /dev/null
+++ b/arena/zaheer.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/zaheerahmad33/AutoGPT",
+ "timestamp": "2023-10-22T21:48:48.414779",
+ "commit_hash_to_benchmark": "b4ee485906c1d8da71ce9b3093996383322980fe",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/zcb.json b/arena/zcb.json
new file mode 100644
index 000000000..c18921070
--- /dev/null
+++ b/arena/zcb.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/asasasheshou/AutoGPT",
+ "timestamp": "2023-10-25T09:15:30.114147",
+ "commit_hash_to_benchmark": "ab362f96c3255052350e8e8081b363c7b97ffd6f",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/zczc.json b/arena/zczc.json
new file mode 100644
index 000000000..b484f0bef
--- /dev/null
+++ b/arena/zczc.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/Howard-Cheung/AutoGPT",
+ "timestamp": "2023-10-26T12:48:30.729105",
+ "commit_hash_to_benchmark": "ab2a61833584c42ededa805cbac50718c72aa5ae",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/zhizhi.json b/arena/zhizhi.json
new file mode 100644
index 000000000..58d86008e
--- /dev/null
+++ b/arena/zhizhi.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/bolyage/zhizhi",
+ "timestamp": "2023-10-19T11:38:51.332966",
+ "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/zlipknot_1.json b/arena/zlipknot_1.json
new file mode 100644
index 000000000..053241796
--- /dev/null
+++ b/arena/zlipknot_1.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/zlipknot/AutoGPT.git",
+ "timestamp": "2023-10-25T19:20:38.529540",
+ "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/zlipknot_test_agent_4.json b/arena/zlipknot_test_agent_4.json
new file mode 100644
index 000000000..2096d67b5
--- /dev/null
+++ b/arena/zlipknot_test_agent_4.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/zlipknot/AutoGPT.git",
+ "timestamp": "2023-10-25T19:13:02.418676",
+ "commit_hash_to_benchmark": "89d333f3bb422495f21e04bdd2bba3cb8c1a34ae",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/arena/zze.json b/arena/zze.json
new file mode 100644
index 000000000..7b69f1872
--- /dev/null
+++ b/arena/zze.json
@@ -0,0 +1,6 @@
+{
+ "github_repo_url": "https://github.com/quasimodo7614/AutoGPT",
+ "timestamp": "2023-10-16T07:49:29.399457",
+ "commit_hash_to_benchmark": "2f79caa6b901d006a78c1ac9e69db4465c0f971a",
+ "branch_to_benchmark": "master"
+} \ No newline at end of file
diff --git a/autogpt/__init__.py b/autogpt/__init__.py
deleted file mode 100644
index 909f8bf4b..000000000
--- a/autogpt/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import os
-import random
-import sys
-
-from dotenv import load_dotenv
-
-if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"):
- print("Setting random seed to 42")
- random.seed(42)
-
-# Load the users .env file into environment variables
-load_dotenv(verbose=True, override=True)
-
-del load_dotenv
diff --git a/autogpt/__main__.py b/autogpt/__main__.py
deleted file mode 100644
index 128f9eea4..000000000
--- a/autogpt/__main__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""Auto-GPT: A GPT powered AI Assistant"""
-import autogpt.cli
-
-if __name__ == "__main__":
- autogpt.cli.main()
diff --git a/autogpt/agent/__init__.py b/autogpt/agent/__init__.py
deleted file mode 100644
index 90d1148c2..000000000
--- a/autogpt/agent/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from autogpt.agent.agent import Agent
-
-__all__ = ["Agent"]
diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py
deleted file mode 100644
index 88b3fa809..000000000
--- a/autogpt/agent/agent.py
+++ /dev/null
@@ -1,309 +0,0 @@
-import json
-import signal
-import sys
-from datetime import datetime
-from pathlib import Path
-
-from colorama import Fore, Style
-
-from autogpt.config import Config
-from autogpt.config.ai_config import AIConfig
-from autogpt.json_utils.utilities import extract_json_from_response, validate_json
-from autogpt.llm.chat import chat_with_ai
-from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
-from autogpt.llm.utils import count_string_tokens
-from autogpt.logs import (
- FULL_MESSAGE_HISTORY_FILE_NAME,
- NEXT_ACTION_FILE_NAME,
- USER_INPUT_FILE_NAME,
- LogCycleHandler,
- logger,
- print_assistant_thoughts,
- remove_ansi_escape,
-)
-from autogpt.memory.message_history import MessageHistory
-from autogpt.memory.vector import VectorMemory
-from autogpt.models.command_registry import CommandRegistry
-from autogpt.speech import say_text
-from autogpt.spinner import Spinner
-from autogpt.utils import clean_input
-from autogpt.workspace import Workspace
-
-
-class Agent:
- """Agent class for interacting with Auto-GPT.
-
- Attributes:
- ai_name: The name of the agent.
- memory: The memory object to use.
- next_action_count: The number of actions to execute.
- system_prompt: The system prompt is the initial prompt that defines everything
- the AI needs to know to achieve its task successfully.
- Currently, the dynamic and customizable information in the system prompt are
- ai_name, description and goals.
-
- triggering_prompt: The last sentence the AI will see before answering.
- For Auto-GPT, this prompt is:
- Determine exactly one command to use, and respond using the format specified
- above:
- The triggering prompt is not part of the system prompt because between the
- system prompt and the triggering
- prompt we have contextual information that can distract the AI and make it
- forget that its goal is to find the next task to achieve.
- SYSTEM PROMPT
- CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant)
- TRIGGERING PROMPT
-
- The triggering prompt reminds the AI about its short term meta task
- (defining the next task)
- """
-
- def __init__(
- self,
- ai_name: str,
- memory: VectorMemory,
- next_action_count: int,
- command_registry: CommandRegistry,
- ai_config: AIConfig,
- system_prompt: str,
- triggering_prompt: str,
- workspace_directory: str | Path,
- config: Config,
- ):
- self.ai_name = ai_name
- self.memory = memory
- self.history = MessageHistory.for_model(config.smart_llm, agent=self)
- self.next_action_count = next_action_count
- self.command_registry = command_registry
- self.config = config
- self.ai_config = ai_config
- self.system_prompt = system_prompt
- self.triggering_prompt = triggering_prompt
- self.workspace = Workspace(workspace_directory, config.restrict_to_workspace)
- self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
- self.cycle_count = 0
- self.log_cycle_handler = LogCycleHandler()
- self.smart_token_limit = OPEN_AI_CHAT_MODELS.get(config.smart_llm).max_tokens
-
- def start_interaction_loop(self):
- # Avoid circular imports
- from autogpt.app import execute_command, extract_command
-
- # Interaction Loop
- self.cycle_count = 0
- command_name = None
- arguments = None
- user_input = ""
-
- # Signal handler for interrupting y -N
- def signal_handler(signum, frame):
- if self.next_action_count == 0:
- sys.exit()
- else:
- print(
- Fore.RED
- + "Interrupt signal received. Stopping continuous command execution."
- + Style.RESET_ALL
- )
- self.next_action_count = 0
-
- signal.signal(signal.SIGINT, signal_handler)
-
- while True:
- # Discontinue if continuous limit is reached
- self.cycle_count += 1
- self.log_cycle_handler.log_count_within_cycle = 0
- self.log_cycle_handler.log_cycle(
- self.ai_config.ai_name,
- self.created_at,
- self.cycle_count,
- [m.raw() for m in self.history],
- FULL_MESSAGE_HISTORY_FILE_NAME,
- )
- if (
- self.config.continuous_mode
- and self.config.continuous_limit > 0
- and self.cycle_count > self.config.continuous_limit
- ):
- logger.typewriter_log(
- "Continuous Limit Reached: ",
- Fore.YELLOW,
- f"{self.config.continuous_limit}",
- )
- break
- # Send message to AI, get response
- with Spinner("Thinking... ", plain_output=self.config.plain_output):
- assistant_reply = chat_with_ai(
- self.config,
- self,
- self.system_prompt,
- self.triggering_prompt,
- self.smart_token_limit,
- self.config.smart_llm,
- )
-
- try:
- assistant_reply_json = extract_json_from_response(
- assistant_reply.content
- )
- validate_json(assistant_reply_json, self.config)
- except json.JSONDecodeError as e:
- logger.error(f"Exception while validating assistant reply JSON: {e}")
- assistant_reply_json = {}
-
- for plugin in self.config.plugins:
- if not plugin.can_handle_post_planning():
- continue
- assistant_reply_json = plugin.post_planning(assistant_reply_json)
-
- # Print Assistant thoughts
- if assistant_reply_json != {}:
- # Get command name and arguments
- try:
- print_assistant_thoughts(
- self.ai_name, assistant_reply_json, self.config
- )
- command_name, arguments = extract_command(
- assistant_reply_json, assistant_reply, self.config
- )
- if self.config.speak_mode:
- say_text(f"I want to execute {command_name}", self.config)
-
- except Exception as e:
- logger.error("Error: \n", str(e))
- self.log_cycle_handler.log_cycle(
- self.ai_config.ai_name,
- self.created_at,
- self.cycle_count,
- assistant_reply_json,
- NEXT_ACTION_FILE_NAME,
- )
-
- # First log new-line so user can differentiate sections better in console
- logger.typewriter_log("\n")
- logger.typewriter_log(
- "NEXT ACTION: ",
- Fore.CYAN,
- f"COMMAND = {Fore.CYAN}{remove_ansi_escape(command_name)}{Style.RESET_ALL} "
- f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
- )
-
- if not self.config.continuous_mode and self.next_action_count == 0:
- # ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
- # Get key press: Prompt the user to press enter to continue or escape
- # to exit
- self.user_input = ""
- logger.info(
- f"Enter '{self.config.authorise_key}' to authorise command, "
- f"'{self.config.authorise_key} -N' to run N continuous commands, "
- f"'{self.config.exit_key}' to exit program, or enter feedback for "
- f"{self.ai_name}..."
- )
- while True:
- if self.config.chat_messages_enabled:
- console_input = clean_input(
- self.config, "Waiting for your response..."
- )
- else:
- console_input = clean_input(
- self.config, Fore.MAGENTA + "Input:" + Style.RESET_ALL
- )
- if console_input.lower().strip() == self.config.authorise_key:
- user_input = "GENERATE NEXT COMMAND JSON"
- break
- elif console_input.lower().strip() == "":
- logger.warn("Invalid input format.")
- continue
- elif console_input.lower().startswith(
- f"{self.config.authorise_key} -"
- ):
- try:
- self.next_action_count = abs(
- int(console_input.split(" ")[1])
- )
- user_input = "GENERATE NEXT COMMAND JSON"
- except ValueError:
- logger.warn(
- f"Invalid input format. Please enter '{self.config.authorise_key} -n' "
- "where n is the number of continuous tasks."
- )
- continue
- break
- elif console_input.lower() == self.config.exit_key:
- user_input = "EXIT"
- break
- else:
- user_input = console_input
- command_name = "human_feedback"
- self.log_cycle_handler.log_cycle(
- self.ai_config.ai_name,
- self.created_at,
- self.cycle_count,
- user_input,
- USER_INPUT_FILE_NAME,
- )
- break
-
- if user_input == "GENERATE NEXT COMMAND JSON":
- logger.typewriter_log(
- "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
- Fore.MAGENTA,
- "",
- )
- elif user_input == "EXIT":
- logger.info("Exiting...")
- break
- else:
- # First log new-line so user can differentiate sections better in console
- logger.typewriter_log("\n")
- # Print authorized commands left value
- logger.typewriter_log(
- f"{Fore.CYAN}AUTHORISED COMMANDS LEFT: {Style.RESET_ALL}{self.next_action_count}"
- )
-
- # Execute command
- if command_name is not None and command_name.lower().startswith("error"):
- result = f"Could not execute command: {arguments}"
- elif command_name == "human_feedback":
- result = f"Human feedback: {user_input}"
- else:
- for plugin in self.config.plugins:
- if not plugin.can_handle_pre_command():
- continue
- command_name, arguments = plugin.pre_command(
- command_name, arguments
- )
- command_result = execute_command(
- command_name=command_name,
- arguments=arguments,
- agent=self,
- )
- result = f"Command {command_name} returned: " f"{command_result}"
-
- result_tlength = count_string_tokens(
- str(command_result), self.config.smart_llm
- )
- memory_tlength = count_string_tokens(
- str(self.history.summary_message()), self.config.smart_llm
- )
- if result_tlength + memory_tlength + 600 > self.smart_token_limit:
- result = f"Failure: command {command_name} returned too much output. \
- Do not execute this command again with the same arguments."
-
- for plugin in self.config.plugins:
- if not plugin.can_handle_post_command():
- continue
- result = plugin.post_command(command_name, result)
- if self.next_action_count > 0:
- self.next_action_count -= 1
-
- # Check if there's a result from the command append it to the message
- # history
- if result is not None:
- self.history.add("system", result, "action_result")
- logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
- else:
- self.history.add("system", "Unable to execute command", "action_result")
- logger.typewriter_log(
- "SYSTEM: ", Fore.YELLOW, "Unable to execute command"
- )
diff --git a/autogpt/app.py b/autogpt/app.py
deleted file mode 100644
index ea5072f81..000000000
--- a/autogpt/app.py
+++ /dev/null
@@ -1,114 +0,0 @@
-""" Command and Control """
-import json
-from typing import Dict
-
-from autogpt.agent.agent import Agent
-from autogpt.config import Config
-from autogpt.llm import ChatModelResponse
-
-
-def is_valid_int(value: str) -> bool:
- """Check if the value is a valid integer
-
- Args:
- value (str): The value to check
-
- Returns:
- bool: True if the value is a valid integer, False otherwise
- """
- try:
- int(value)
- return True
- except ValueError:
- return False
-
-
-def extract_command(
- assistant_reply_json: Dict, assistant_reply: ChatModelResponse, config: Config
-):
- """Parse the response and return the command name and arguments
-
- Args:
- assistant_reply_json (dict): The response object from the AI
- assistant_reply (ChatModelResponse): The model response from the AI
- config (Config): The config object
-
- Returns:
- tuple: The command name and arguments
-
- Raises:
- json.decoder.JSONDecodeError: If the response is not valid JSON
-
- Exception: If any other error occurs
- """
- if config.openai_functions:
- if assistant_reply.function_call is None:
- return "Error:", "No 'function_call' in assistant reply"
- assistant_reply_json["command"] = {
- "name": assistant_reply.function_call.name,
- "args": json.loads(assistant_reply.function_call.arguments),
- }
- try:
- if "command" not in assistant_reply_json:
- return "Error:", "Missing 'command' object in JSON"
-
- if not isinstance(assistant_reply_json, dict):
- return (
- "Error:",
- f"The previous message sent was not a dictionary {assistant_reply_json}",
- )
-
- command = assistant_reply_json["command"]
- if not isinstance(command, dict):
- return "Error:", "'command' object is not a dictionary"
-
- if "name" not in command:
- return "Error:", "Missing 'name' field in 'command' object"
-
- command_name = command["name"]
-
- # Use an empty dictionary if 'args' field is not present in 'command' object
- arguments = command.get("args", {})
-
- return command_name, arguments
- except json.decoder.JSONDecodeError:
- return "Error:", "Invalid JSON"
- # All other errors, return "Error: + error message"
- except Exception as e:
- return "Error:", str(e)
-
-
-def execute_command(
- command_name: str,
- arguments: dict[str, str],
- agent: Agent,
-):
- """Execute the command and return the result
-
- Args:
- command_name (str): The name of the command to execute
- arguments (dict): The arguments for the command
- agent (Agent): The agent that is executing the command
-
- Returns:
- str: The result of the command
- """
- try:
- # Execute a native command with the same name or alias, if it exists
- if command := agent.command_registry.get_command(command_name):
- return command(**arguments, agent=agent)
-
- # Handle non-native commands (e.g. from plugins)
- for command in agent.ai_config.prompt_generator.commands:
- if (
- command_name == command["label"].lower()
- or command_name == command["name"].lower()
- ):
- return command["function"](**arguments)
-
- raise RuntimeError(
- f"Cannot execute '{command_name}': unknown command."
- " Do not try to use this command again."
- )
- except Exception as e:
- return f"Error: {str(e)}"
diff --git a/autogpt/cli.py b/autogpt/cli.py
deleted file mode 100644
index 690c16261..000000000
--- a/autogpt/cli.py
+++ /dev/null
@@ -1,140 +0,0 @@
-"""Main script for the autogpt package."""
-from typing import Optional
-
-import click
-
-
-@click.group(invoke_without_command=True)
-@click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode")
-@click.option(
- "--skip-reprompt",
- "-y",
- is_flag=True,
- help="Skips the re-prompting messages at the beginning of the script",
-)
-@click.option(
- "--ai-settings",
- "-C",
- help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
-)
-@click.option(
- "--prompt-settings",
- "-P",
- help="Specifies which prompt_settings.yaml file to use.",
-)
-@click.option(
- "-l",
- "--continuous-limit",
- type=int,
- help="Defines the number of times to run in continuous mode",
-)
-@click.option("--speak", is_flag=True, help="Enable Speak Mode")
-@click.option("--debug", is_flag=True, help="Enable Debug Mode")
-@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode")
-@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode")
-@click.option(
- "--use-memory",
- "-m",
- "memory_type",
- type=str,
- help="Defines which Memory backend to use",
-)
-@click.option(
- "-b",
- "--browser-name",
- help="Specifies which web-browser to use when using selenium to scrape the web.",
-)
-@click.option(
- "--allow-downloads",
- is_flag=True,
- help="Dangerous: Allows Auto-GPT to download files natively.",
-)
-@click.option(
- "--skip-news",
- is_flag=True,
- help="Specifies whether to suppress the output of latest news on startup.",
-)
-@click.option(
- # TODO: this is a hidden option for now, necessary for integration testing.
- # We should make this public once we're ready to roll out agent specific workspaces.
- "--workspace-directory",
- "-w",
- type=click.Path(),
- hidden=True,
-)
-@click.option(
- "--install-plugin-deps",
- is_flag=True,
- help="Installs external dependencies for 3rd party plugins.",
-)
-@click.option(
- "--ai-name",
- type=str,
- help="AI name override",
-)
-@click.option(
- "--ai-role",
- type=str,
- help="AI role override",
-)
-@click.option(
- "--ai-goal",
- type=str,
- multiple=True,
- help="AI goal override; may be used multiple times to pass multiple goals",
-)
-@click.pass_context
-def main(
- ctx: click.Context,
- continuous: bool,
- continuous_limit: int,
- ai_settings: str,
- prompt_settings: str,
- skip_reprompt: bool,
- speak: bool,
- debug: bool,
- gpt3only: bool,
- gpt4only: bool,
- memory_type: str,
- browser_name: str,
- allow_downloads: bool,
- skip_news: bool,
- workspace_directory: str,
- install_plugin_deps: bool,
- ai_name: Optional[str],
- ai_role: Optional[str],
- ai_goal: tuple[str],
-) -> None:
- """
- Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI.
-
- Start an Auto-GPT assistant.
- """
- # Put imports inside function to avoid importing everything when starting the CLI
- from autogpt.main import run_auto_gpt
-
- if ctx.invoked_subcommand is None:
- run_auto_gpt(
- continuous,
- continuous_limit,
- ai_settings,
- prompt_settings,
- skip_reprompt,
- speak,
- debug,
- gpt3only,
- gpt4only,
- memory_type,
- browser_name,
- allow_downloads,
- skip_news,
- workspace_directory,
- install_plugin_deps,
- ai_name,
- ai_role,
- ai_goal,
- )
-
-
-if __name__ == "__main__":
- main()
diff --git a/autogpt/command_decorator.py b/autogpt/command_decorator.py
deleted file mode 100644
index d082d9bf5..000000000
--- a/autogpt/command_decorator.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import functools
-from typing import Any, Callable, Optional, TypedDict
-
-from autogpt.config import Config
-from autogpt.models.command import Command, CommandParameter
-
-# Unique identifier for auto-gpt commands
-AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
-
-
-class CommandParameterSpec(TypedDict):
- type: str
- description: str
- required: bool
-
-
-def command(
- name: str,
- description: str,
- parameters: dict[str, CommandParameterSpec],
- enabled: bool | Callable[[Config], bool] = True,
- disabled_reason: Optional[str] = None,
- aliases: list[str] = [],
-) -> Callable[..., Any]:
- """The command decorator is used to create Command objects from ordinary functions."""
-
- def decorator(func: Callable[..., Any]) -> Command:
- typed_parameters = [
- CommandParameter(
- name=param_name,
- description=parameter.get("description"),
- type=parameter.get("type", "string"),
- required=parameter.get("required", False),
- )
- for param_name, parameter in parameters.items()
- ]
- cmd = Command(
- name=name,
- description=description,
- method=func,
- parameters=typed_parameters,
- enabled=enabled,
- disabled_reason=disabled_reason,
- aliases=aliases,
- )
-
- @functools.wraps(func)
- def wrapper(*args, **kwargs) -> Any:
- return func(*args, **kwargs)
-
- wrapper.command = cmd
-
- setattr(wrapper, AUTO_GPT_COMMAND_IDENTIFIER, True)
-
- return wrapper
-
- return decorator
diff --git a/autogpt/commands/decorators.py b/autogpt/commands/decorators.py
deleted file mode 100644
index 3528af04b..000000000
--- a/autogpt/commands/decorators.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import functools
-from pathlib import Path
-from typing import Callable
-
-from autogpt.agent.agent import Agent
-from autogpt.logs import logger
-
-
-def sanitize_path_arg(arg_name: str):
- def decorator(func: Callable):
- # Get position of path parameter, in case it is passed as a positional argument
- try:
- arg_index = list(func.__annotations__.keys()).index(arg_name)
- except ValueError:
- raise TypeError(
- f"Sanitized parameter '{arg_name}' absent or not annotated on function '{func.__name__}'"
- )
-
- # Get position of agent parameter, in case it is passed as a positional argument
- try:
- agent_arg_index = list(func.__annotations__.keys()).index("agent")
- except ValueError:
- raise TypeError(
- f"Parameter 'agent' absent or not annotated on function '{func.__name__}'"
- )
-
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- logger.debug(f"Sanitizing arg '{arg_name}' on function '{func.__name__}'")
- logger.debug(f"Function annotations: {func.__annotations__}")
-
- # Get Agent from the called function's arguments
- agent = kwargs.get(
- "agent", len(args) > agent_arg_index and args[agent_arg_index]
- )
- logger.debug(f"Args: {args}")
- logger.debug(f"KWArgs: {kwargs}")
- logger.debug(f"Agent argument lifted from function call: {agent}")
- if not isinstance(agent, Agent):
- raise RuntimeError("Could not get Agent from decorated command's args")
-
- # Sanitize the specified path argument, if one is given
- given_path: str | Path | None = kwargs.get(
- arg_name, len(args) > arg_index and args[arg_index] or None
- )
- if given_path:
- if given_path in {"", "/"}:
- sanitized_path = str(agent.workspace.root)
- else:
- sanitized_path = str(agent.workspace.get_path(given_path))
-
- if arg_name in kwargs:
- kwargs[arg_name] = sanitized_path
- else:
- # args is an immutable tuple; must be converted to a list to update
- arg_list = list(args)
- arg_list[arg_index] = sanitized_path
- args = tuple(arg_list)
-
- return func(*args, **kwargs)
-
- return wrapper
-
- return decorator
diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py
deleted file mode 100644
index aad93193e..000000000
--- a/autogpt/commands/execute_code.py
+++ /dev/null
@@ -1,303 +0,0 @@
-"""Execute code in a Docker container"""
-import os
-import subprocess
-from pathlib import Path
-
-import docker
-from docker.errors import DockerException, ImageNotFound
-from docker.models.containers import Container as DockerContainer
-
-from autogpt.agent.agent import Agent
-from autogpt.command_decorator import command
-from autogpt.config import Config
-from autogpt.logs import logger
-
-from .decorators import sanitize_path_arg
-
-ALLOWLIST_CONTROL = "allowlist"
-DENYLIST_CONTROL = "denylist"
-
-
-@command(
- "execute_python_code",
- "Creates a Python file and executes it",
- {
- "code": {
- "type": "string",
- "description": "The Python code to run",
- "required": True,
- },
- "name": {
- "type": "string",
- "description": "A name to be given to the python file",
- "required": True,
- },
- },
-)
-def execute_python_code(code: str, name: str, agent: Agent) -> str:
- """Create and execute a Python file in a Docker container and return the STDOUT of the
- executed code. If there is any data that needs to be captured use a print statement
-
- Args:
- code (str): The Python code to run
- name (str): A name to be given to the Python file
-
- Returns:
- str: The STDOUT captured from the code when it ran
- """
- ai_name = agent.ai_config.ai_name
- code_dir = agent.workspace.get_path(Path(ai_name, "executed_code"))
- os.makedirs(code_dir, exist_ok=True)
-
- if not name.endswith(".py"):
- name = name + ".py"
-
- # The `name` arg is not covered by @sanitize_path_arg,
- # so sanitization must be done here to prevent path traversal.
- file_path = agent.workspace.get_path(code_dir / name)
- if not file_path.is_relative_to(code_dir):
- return "Error: 'name' argument resulted in path traversal, operation aborted"
-
- try:
- with open(file_path, "w+", encoding="utf-8") as f:
- f.write(code)
-
- return execute_python_file(str(file_path), agent)
- except Exception as e:
- return f"Error: {str(e)}"
-
-
-@command(
- "execute_python_file",
- "Executes an existing Python file",
- {
- "filename": {
- "type": "string",
- "description": "The name of te file to execute",
- "required": True,
- },
- },
-)
-@sanitize_path_arg("filename")
-def execute_python_file(filename: str, agent: Agent) -> str:
- """Execute a Python file in a Docker container and return the output
-
- Args:
- filename (str): The name of the file to execute
-
- Returns:
- str: The output of the file
- """
- logger.info(
- f"Executing python file '{filename}' in working directory '{agent.config.workspace_path}'"
- )
-
- if not filename.endswith(".py"):
- return "Error: Invalid file type. Only .py files are allowed."
-
- file_path = Path(filename)
- if not file_path.is_file():
- # Mimic the response that you get from the command line so that it's easier to identify
- return (
- f"python: can't open file '{filename}': [Errno 2] No such file or directory"
- )
-
- if we_are_running_in_a_docker_container():
- logger.debug(
- f"Auto-GPT is running in a Docker container; executing {file_path} directly..."
- )
- result = subprocess.run(
- ["python", str(file_path)],
- capture_output=True,
- encoding="utf8",
- cwd=agent.config.workspace_path,
- )
- if result.returncode == 0:
- return result.stdout
- else:
- return f"Error: {result.stderr}"
-
- logger.debug("Auto-GPT is not running in a Docker container")
- try:
- client = docker.from_env()
- # You can replace this with the desired Python image/version
- # You can find available Python images on Docker Hub:
- # https://hub.docker.com/_/python
- image_name = "python:3-alpine"
- try:
- client.images.get(image_name)
- logger.debug(f"Image '{image_name}' found locally")
- except ImageNotFound:
- logger.info(
- f"Image '{image_name}' not found locally, pulling from Docker Hub..."
- )
- # Use the low-level API to stream the pull response
- low_level_client = docker.APIClient()
- for line in low_level_client.pull(image_name, stream=True, decode=True):
- # Print the status and progress, if available
- status = line.get("status")
- progress = line.get("progress")
- if status and progress:
- logger.info(f"{status}: {progress}")
- elif status:
- logger.info(status)
-
- logger.debug(f"Running {file_path} in a {image_name} container...")
- container: DockerContainer = client.containers.run(
- image_name,
- ["python", str(file_path.relative_to(agent.workspace.root))],
- volumes={
- agent.config.workspace_path: {
- "bind": "/workspace",
- "mode": "ro",
- }
- },
- working_dir="/workspace",
- stderr=True,
- stdout=True,
- detach=True,
- ) # type: ignore
-
- container.wait()
- logs = container.logs().decode("utf-8")
- container.remove()
-
- # print(f"Execution complete. Output: {output}")
- # print(f"Logs: {logs}")
-
- return logs
-
- except DockerException as e:
- logger.warn(
- "Could not run the script in a container. If you haven't already, please install Docker https://docs.docker.com/get-docker/"
- )
- return f"Error: {str(e)}"
-
- except Exception as e:
- return f"Error: {str(e)}"
-
-
-def validate_command(command: str, config: Config) -> bool:
- """Validate a command to ensure it is allowed
-
- Args:
- command (str): The command to validate
- config (Config): The config to use to validate the command
-
- Returns:
- bool: True if the command is allowed, False otherwise
- """
- if not command:
- return False
-
- command_name = command.split()[0]
-
- if config.shell_command_control == ALLOWLIST_CONTROL:
- return command_name in config.shell_allowlist
- else:
- return command_name not in config.shell_denylist
-
-
-@command(
- "execute_shell",
- "Executes a Shell Command, non-interactive commands only",
- {
- "command_line": {
- "type": "string",
- "description": "The command line to execute",
- "required": True,
- }
- },
- enabled=lambda config: config.execute_local_commands,
- disabled_reason="You are not allowed to run local shell commands. To execute"
- " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
- "in your config file: .env - do not attempt to bypass the restriction.",
-)
-def execute_shell(command_line: str, agent: Agent) -> str:
- """Execute a shell command and return the output
-
- Args:
- command_line (str): The command line to execute
-
- Returns:
- str: The output of the command
- """
- if not validate_command(command_line, agent.config):
- logger.info(f"Command '{command_line}' not allowed")
- return "Error: This Shell Command is not allowed."
-
- current_dir = Path.cwd()
- # Change dir into workspace if necessary
- if not current_dir.is_relative_to(agent.config.workspace_path):
- os.chdir(agent.config.workspace_path)
-
- logger.info(
- f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
- )
-
- result = subprocess.run(command_line, capture_output=True, shell=True)
- output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}"
-
- # Change back to whatever the prior working dir was
-
- os.chdir(current_dir)
- return output
-
-
-@command(
- "execute_shell_popen",
- "Executes a Shell Command, non-interactive commands only",
- {
- "query": {
- "type": "string",
- "description": "The search query",
- "required": True,
- }
- },
- lambda config: config.execute_local_commands,
- "You are not allowed to run local shell commands. To execute"
- " shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
- "in your config. Do not attempt to bypass the restriction.",
-)
-def execute_shell_popen(command_line, agent: Agent) -> str:
- """Execute a shell command with Popen and returns an english description
- of the event and the process id
-
- Args:
- command_line (str): The command line to execute
-
- Returns:
- str: Description of the fact that the process started and its id
- """
- if not validate_command(command_line, agent.config):
- logger.info(f"Command '{command_line}' not allowed")
- return "Error: This Shell Command is not allowed."
-
- current_dir = os.getcwd()
- # Change dir into workspace if necessary
- if agent.config.workspace_path not in current_dir:
- os.chdir(agent.config.workspace_path)
-
- logger.info(
- f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
- )
-
- do_not_show_output = subprocess.DEVNULL
- process = subprocess.Popen(
- command_line, shell=True, stdout=do_not_show_output, stderr=do_not_show_output
- )
-
- # Change back to whatever the prior working dir was
-
- os.chdir(current_dir)
-
- return f"Subprocess started with PID:'{str(process.pid)}'"
-
-
-def we_are_running_in_a_docker_container() -> bool:
- """Check if we are running in a Docker container
-
- Returns:
- bool: True if we are running in a Docker container, False otherwise
- """
- return os.path.exists("/.dockerenv")
diff --git a/autogpt/commands/file_operations.py b/autogpt/commands/file_operations.py
deleted file mode 100644
index 0a06da318..000000000
--- a/autogpt/commands/file_operations.py
+++ /dev/null
@@ -1,340 +0,0 @@
-"""File operations for AutoGPT"""
-from __future__ import annotations
-
-import contextlib
-import hashlib
-import os
-import os.path
-from pathlib import Path
-from typing import Generator, Literal
-
-from autogpt.agent.agent import Agent
-from autogpt.command_decorator import command
-from autogpt.logs import logger
-from autogpt.memory.vector import MemoryItem, VectorMemory
-
-from .decorators import sanitize_path_arg
-from .file_operations_utils import read_textual_file
-
-Operation = Literal["write", "append", "delete"]
-
-
-def text_checksum(text: str) -> str:
- """Get the hex checksum for the given text."""
- return hashlib.md5(text.encode("utf-8")).hexdigest()
-
-
-def operations_from_log(
- log_path: str,
-) -> Generator[tuple[Operation, str, str | None], None, None]:
- """Parse the file operations log and return a tuple containing the log entries"""
- try:
- log = open(log_path, "r", encoding="utf-8")
- except FileNotFoundError:
- return
-
- for line in log:
- line = line.replace("File Operation Logger", "").strip()
- if not line:
- continue
- operation, tail = line.split(": ", maxsplit=1)
- operation = operation.strip()
- if operation in ("write", "append"):
- try:
- path, checksum = (x.strip() for x in tail.rsplit(" #", maxsplit=1))
- except ValueError:
- logger.warn(f"File log entry lacks checksum: '{line}'")
- path, checksum = tail.strip(), None
- yield (operation, path, checksum)
- elif operation == "delete":
- yield (operation, tail.strip(), None)
-
- log.close()
-
-
-def file_operations_state(log_path: str) -> dict[str, str]:
- """Iterates over the operations log and returns the expected state.
-
- Parses a log file at config.file_logger_path to construct a dictionary that maps
- each file path written or appended to its checksum. Deleted files are removed
- from the dictionary.
-
- Returns:
- A dictionary mapping file paths to their checksums.
-
- Raises:
- FileNotFoundError: If config.file_logger_path is not found.
- ValueError: If the log file content is not in the expected format.
- """
- state = {}
- for operation, path, checksum in operations_from_log(log_path):
- if operation in ("write", "append"):
- state[path] = checksum
- elif operation == "delete":
- del state[path]
- return state
-
-
-@sanitize_path_arg("filename")
-def is_duplicate_operation(
- operation: Operation, filename: str, agent: Agent, checksum: str | None = None
-) -> bool:
- """Check if the operation has already been performed
-
- Args:
- operation: The operation to check for
- filename: The name of the file to check for
- agent: The agent
- checksum: The checksum of the contents to be written
-
- Returns:
- True if the operation has already been performed on the file
- """
- # Make the filename into a relative path if possible
- with contextlib.suppress(ValueError):
- filename = str(Path(filename).relative_to(agent.workspace.root))
-
- state = file_operations_state(agent.config.file_logger_path)
- if operation == "delete" and filename not in state:
- return True
- if operation == "write" and state.get(filename) == checksum:
- return True
- return False
-
-
-@sanitize_path_arg("filename")
-def log_operation(
- operation: Operation, filename: str, agent: Agent, checksum: str | None = None
-) -> None:
- """Log the file operation to the file_logger.txt
-
- Args:
- operation: The operation to log
- filename: The name of the file the operation was performed on
- checksum: The checksum of the contents to be written
- """
- # Make the filename into a relative path if possible
- with contextlib.suppress(ValueError):
- filename = str(Path(filename).relative_to(agent.workspace.root))
-
- log_entry = f"{operation}: {filename}"
- if checksum is not None:
- log_entry += f" #{checksum}"
- logger.debug(f"Logging file operation: {log_entry}")
- append_to_file(
- agent.config.file_logger_path, f"{log_entry}\n", agent, should_log=False
- )
-
-
-@command(
- "read_file",
- "Read an existing file",
- {
- "filename": {
- "type": "string",
- "description": "The path of the file to read",
- "required": True,
- }
- },
-)
-@sanitize_path_arg("filename")
-def read_file(filename: str, agent: Agent) -> str:
- """Read a file and return the contents
-
- Args:
- filename (str): The name of the file to read
-
- Returns:
- str: The contents of the file
- """
- try:
- content = read_textual_file(filename, logger)
-
- # TODO: invalidate/update memory when file is edited
- file_memory = MemoryItem.from_text_file(content, filename, agent.config)
- if len(file_memory.chunks) > 1:
- return file_memory.summary
-
- return content
- except Exception as e:
- return f"Error: {str(e)}"
-
-
-def ingest_file(
- filename: str,
- memory: VectorMemory,
-) -> None:
- """
- Ingest a file by reading its content, splitting it into chunks with a specified
- maximum length and overlap, and adding the chunks to the memory storage.
-
- Args:
- filename: The name of the file to ingest
- memory: An object with an add() method to store the chunks in memory
- """
- try:
- logger.info(f"Ingesting file {filename}")
- content = read_file(filename)
-
- # TODO: differentiate between different types of files
- file_memory = MemoryItem.from_text_file(content, filename)
- logger.debug(f"Created memory: {file_memory.dump(True)}")
- memory.add(file_memory)
-
- logger.info(f"Ingested {len(file_memory.e_chunks)} chunks from {filename}")
- except Exception as err:
- logger.warn(f"Error while ingesting file '{filename}': {err}")
-
-
-@command(
- "write_to_file",
- "Writes to a file",
- {
- "filename": {
- "type": "string",
- "description": "The name of the file to write to",
- "required": True,
- },
- "text": {
- "type": "string",
- "description": "The text to write to the file",
- "required": True,
- },
- },
- aliases=["write_file", "create_file"],
-)
-@sanitize_path_arg("filename")
-def write_to_file(filename: str, text: str, agent: Agent) -> str:
- """Write text to a file
-
- Args:
- filename (str): The name of the file to write to
- text (str): The text to write to the file
-
- Returns:
- str: A message indicating success or failure
- """
- checksum = text_checksum(text)
- if is_duplicate_operation("write", filename, agent, checksum):
- return "Error: File has already been updated."
- try:
- directory = os.path.dirname(filename)
- os.makedirs(directory, exist_ok=True)
- with open(filename, "w", encoding="utf-8") as f:
- f.write(text)
- log_operation("write", filename, agent, checksum)
- return "File written to successfully."
- except Exception as err:
- return f"Error: {err}"
-
-
-@command(
- "append_to_file",
- "Appends to a file",
- {
- "filename": {
- "type": "string",
- "description": "The name of the file to write to",
- "required": True,
- },
- "text": {
- "type": "string",
- "description": "The text to write to the file",
- "required": True,
- },
- },
-)
-@sanitize_path_arg("filename")
-def append_to_file(
- filename: str, text: str, agent: Agent, should_log: bool = True
-) -> str:
- """Append text to a file
-
- Args:
- filename (str): The name of the file to append to
- text (str): The text to append to the file
- should_log (bool): Should log output
-
- Returns:
- str: A message indicating success or failure
- """
- try:
- directory = os.path.dirname(filename)
- os.makedirs(directory, exist_ok=True)
- with open(filename, "a", encoding="utf-8") as f:
- f.write(text)
-
- if should_log:
- with open(filename, "r", encoding="utf-8") as f:
- checksum = text_checksum(f.read())
- log_operation("append", filename, agent, checksum=checksum)
-
- return "Text appended successfully."
- except Exception as err:
- return f"Error: {err}"
-
-
-@command(
- "delete_file",
- "Deletes a file",
- {
- "filename": {
- "type": "string",
- "description": "The name of the file to delete",
- "required": True,
- }
- },
-)
-@sanitize_path_arg("filename")
-def delete_file(filename: str, agent: Agent) -> str:
- """Delete a file
-
- Args:
- filename (str): The name of the file to delete
-
- Returns:
- str: A message indicating success or failure
- """
- if is_duplicate_operation("delete", filename, agent):
- return "Error: File has already been deleted."
- try:
- os.remove(filename)
- log_operation("delete", filename, agent)
- return "File deleted successfully."
- except Exception as err:
- return f"Error: {err}"
-
-
-@command(
- "list_files",
- "Lists Files in a Directory",
- {
- "directory": {
- "type": "string",
- "description": "The directory to list files in",
- "required": True,
- }
- },
-)
-@sanitize_path_arg("directory")
-def list_files(directory: str, agent: Agent) -> list[str]:
- """lists files in a directory recursively
-
- Args:
- directory (str): The directory to search in
-
- Returns:
- list[str]: A list of files found in the directory
- """
- found_files = []
-
- for root, _, files in os.walk(directory):
- for file in files:
- if file.startswith("."):
- continue
- relative_path = os.path.relpath(
- os.path.join(root, file), agent.config.workspace_path
- )
- found_files.append(relative_path)
-
- return found_files
diff --git a/autogpt/commands/file_operations_utils.py b/autogpt/commands/file_operations_utils.py
deleted file mode 100644
index b00779688..000000000
--- a/autogpt/commands/file_operations_utils.py
+++ /dev/null
@@ -1,161 +0,0 @@
-import json
-import os
-
-import charset_normalizer
-import docx
-import markdown
-import PyPDF2
-import yaml
-from bs4 import BeautifulSoup
-from pylatexenc.latex2text import LatexNodes2Text
-
-from autogpt import logs
-from autogpt.logs import logger
-
-
-class ParserStrategy:
- def read(self, file_path: str) -> str:
- raise NotImplementedError
-
-
-# Basic text file reading
-class TXTParser(ParserStrategy):
- def read(self, file_path: str) -> str:
- charset_match = charset_normalizer.from_path(file_path).best()
- logger.debug(f"Reading '{file_path}' with encoding '{charset_match.encoding}'")
- return str(charset_match)
-
-
-# Reading text from binary file using pdf parser
-class PDFParser(ParserStrategy):
- def read(self, file_path: str) -> str:
- parser = PyPDF2.PdfReader(file_path)
- text = ""
- for page_idx in range(len(parser.pages)):
- text += parser.pages[page_idx].extract_text()
- return text
-
-
-# Reading text from binary file using docs parser
-class DOCXParser(ParserStrategy):
- def read(self, file_path: str) -> str:
- doc_file = docx.Document(file_path)
- text = ""
- for para in doc_file.paragraphs:
- text += para.text
- return text
-
-
-# Reading as dictionary and returning string format
-class JSONParser(ParserStrategy):
- def read(self, file_path: str) -> str:
- with open(file_path, "r") as f:
- data = json.load(f)
- text = str(data)
- return text
-
-
-class XMLParser(ParserStrategy):
- def read(self, file_path: str) -> str:
- with open(file_path, "r") as f:
- soup = BeautifulSoup(f, "xml")
- text = soup.get_text()
- return text
-
-
-# Reading as dictionary and returning string format
-class YAMLParser(ParserStrategy):
- def read(self, file_path: str) -> str:
- with open(file_path, "r") as f:
- data = yaml.load(f, Loader=yaml.FullLoader)
- text = str(data)
- return text
-
-
-class HTMLParser(ParserStrategy):
- def read(self, file_path: str) -> str:
- with open(file_path, "r") as f:
- soup = BeautifulSoup(f, "html.parser")
- text = soup.get_text()
- return text
-
-
-class MarkdownParser(ParserStrategy):
- def read(self, file_path: str) -> str:
- with open(file_path, "r") as f:
- html = markdown.markdown(f.read())
- text = "".join(BeautifulSoup(html, "html.parser").findAll(string=True))
- return text
-
-
-class LaTeXParser(ParserStrategy):
- def read(self, file_path: str) -> str:
- with open(file_path, "r") as f:
- latex = f.read()
- text = LatexNodes2Text().latex_to_text(latex)
- return text
-
-
-class FileContext:
- def __init__(self, parser: ParserStrategy, logger: logs.Logger):
- self.parser = parser
- self.logger = logger
-
- def set_parser(self, parser: ParserStrategy) -> None:
- self.logger.debug(f"Setting Context Parser to {parser}")
- self.parser = parser
-
- def read_file(self, file_path) -> str:
- self.logger.debug(f"Reading file {file_path} with parser {self.parser}")
- return self.parser.read(file_path)
-
-
-extension_to_parser = {
- ".txt": TXTParser(),
- ".csv": TXTParser(),
- ".pdf": PDFParser(),
- ".docx": DOCXParser(),
- ".json": JSONParser(),
- ".xml": XMLParser(),
- ".yaml": YAMLParser(),
- ".yml": YAMLParser(),
- ".html": HTMLParser(),
- ".htm": HTMLParser(),
- ".xhtml": HTMLParser(),
- ".md": MarkdownParser(),
- ".markdown": MarkdownParser(),
- ".tex": LaTeXParser(),
-}
-
-
-def is_file_binary_fn(file_path: str):
- """Given a file path load all its content and checks if the null bytes is present
-
- Args:
- file_path (_type_): _description_
-
- Returns:
- bool: is_binary
- """
- with open(file_path, "rb") as f:
- file_data = f.read()
- if b"\x00" in file_data:
- return True
- return False
-
-
-def read_textual_file(file_path: str, logger: logs.Logger) -> str:
- if not os.path.isfile(file_path):
- raise FileNotFoundError(
- f"read_file {file_path} failed: no such file or directory"
- )
- is_binary = is_file_binary_fn(file_path)
- file_extension = os.path.splitext(file_path)[1].lower()
- parser = extension_to_parser.get(file_extension)
- if not parser:
- if is_binary:
- raise ValueError(f"Unsupported binary file format: {file_extension}")
- # fallback to txt file parser (to support script and code files loading)
- parser = TXTParser()
- file_context = FileContext(parser, logger)
- return file_context.read_file(file_path)
diff --git a/autogpt/commands/git_operations.py b/autogpt/commands/git_operations.py
deleted file mode 100644
index 276031f78..000000000
--- a/autogpt/commands/git_operations.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""Git operations for autogpt"""
-
-from git.repo import Repo
-
-from autogpt.agent.agent import Agent
-from autogpt.command_decorator import command
-from autogpt.url_utils.validators import validate_url
-
-from .decorators import sanitize_path_arg
-
-
-@command(
- "clone_repository",
- "Clones a Repository",
- {
- "url": {
- "type": "string",
- "description": "The URL of the repository to clone",
- "required": True,
- },
- "clone_path": {
- "type": "string",
- "description": "The path to clone the repository to",
- "required": True,
- },
- },
- lambda config: bool(config.github_username and config.github_api_key),
- "Configure github_username and github_api_key.",
-)
-@sanitize_path_arg("clone_path")
-@validate_url
-def clone_repository(url: str, clone_path: str, agent: Agent) -> str:
- """Clone a GitHub repository locally.
-
- Args:
- url (str): The URL of the repository to clone.
- clone_path (str): The path to clone the repository to.
-
- Returns:
- str: The result of the clone operation.
- """
- split_url = url.split("//")
- auth_repo_url = (
- f"//{agent.config.github_username}:{agent.config.github_api_key}@".join(
- split_url
- )
- )
- try:
- Repo.clone_from(url=auth_repo_url, to_path=clone_path)
- return f"""Cloned {url} to {clone_path}"""
- except Exception as e:
- return f"Error: {str(e)}"
diff --git a/autogpt/commands/image_gen.py b/autogpt/commands/image_gen.py
deleted file mode 100644
index b1a89b289..000000000
--- a/autogpt/commands/image_gen.py
+++ /dev/null
@@ -1,200 +0,0 @@
-""" Image Generation Module for AutoGPT."""
-import io
-import json
-import time
-import uuid
-from base64 import b64decode
-
-import openai
-import requests
-from PIL import Image
-
-from autogpt.agent.agent import Agent
-from autogpt.command_decorator import command
-from autogpt.logs import logger
-
-
-@command(
- "generate_image",
- "Generates an Image",
- {
- "prompt": {
- "type": "string",
- "description": "The prompt used to generate the image",
- "required": True,
- },
- },
- lambda config: bool(config.image_provider),
- "Requires a image provider to be set.",
-)
-def generate_image(prompt: str, agent: Agent, size: int = 256) -> str:
- """Generate an image from a prompt.
-
- Args:
- prompt (str): The prompt to use
- size (int, optional): The size of the image. Defaults to 256. (Not supported by HuggingFace)
-
- Returns:
- str: The filename of the image
- """
- filename = f"{agent.config.workspace_path}/{str(uuid.uuid4())}.jpg"
-
- # DALL-E
- if agent.config.image_provider == "dalle":
- return generate_image_with_dalle(prompt, filename, size, agent)
- # HuggingFace
- elif agent.config.image_provider == "huggingface":
- return generate_image_with_hf(prompt, filename, agent)
- # SD WebUI
- elif agent.config.image_provider == "sdwebui":
- return generate_image_with_sd_webui(prompt, filename, agent, size)
- return "No Image Provider Set"
-
-
-def generate_image_with_hf(prompt: str, filename: str, agent: Agent) -> str:
- """Generate an image with HuggingFace's API.
-
- Args:
- prompt (str): The prompt to use
- filename (str): The filename to save the image to
-
- Returns:
- str: The filename of the image
- """
- API_URL = f"https://api-inference.huggingface.co/models/{agent.config.huggingface_image_model}"
- if agent.config.huggingface_api_token is None:
- raise ValueError(
- "You need to set your Hugging Face API token in the config file."
- )
- headers = {
- "Authorization": f"Bearer {agent.config.huggingface_api_token}",
- "X-Use-Cache": "false",
- }
-
- retry_count = 0
- while retry_count < 10:
- response = requests.post(
- API_URL,
- headers=headers,
- json={
- "inputs": prompt,
- },
- )
-
- if response.ok:
- try:
- image = Image.open(io.BytesIO(response.content))
- logger.info(f"Image Generated for prompt:{prompt}")
- image.save(filename)
- return f"Saved to disk:{filename}"
- except Exception as e:
- logger.error(e)
- break
- else:
- try:
- error = json.loads(response.text)
- if "estimated_time" in error:
- delay = error["estimated_time"]
- logger.debug(response.text)
- logger.info("Retrying in", delay)
- time.sleep(delay)
- else:
- break
- except Exception as e:
- logger.error(e)
- break
-
- retry_count += 1
-
- return f"Error creating image."
-
-
-def generate_image_with_dalle(
- prompt: str, filename: str, size: int, agent: Agent
-) -> str:
- """Generate an image with DALL-E.
-
- Args:
- prompt (str): The prompt to use
- filename (str): The filename to save the image to
- size (int): The size of the image
-
- Returns:
- str: The filename of the image
- """
-
- # Check for supported image sizes
- if size not in [256, 512, 1024]:
- closest = min([256, 512, 1024], key=lambda x: abs(x - size))
- logger.info(
- f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}."
- )
- size = closest
-
- response = openai.Image.create(
- prompt=prompt,
- n=1,
- size=f"{size}x{size}",
- response_format="b64_json",
- api_key=agent.config.openai_api_key,
- )
-
- logger.info(f"Image Generated for prompt:{prompt}")
-
- image_data = b64decode(response["data"][0]["b64_json"])
-
- with open(filename, mode="wb") as png:
- png.write(image_data)
-
- return f"Saved to disk:{filename}"
-
-
-def generate_image_with_sd_webui(
- prompt: str,
- filename: str,
- agent: Agent,
- size: int = 512,
- negative_prompt: str = "",
- extra: dict = {},
-) -> str:
- """Generate an image with Stable Diffusion webui.
- Args:
- prompt (str): The prompt to use
- filename (str): The filename to save the image to
- size (int, optional): The size of the image. Defaults to 256.
- negative_prompt (str, optional): The negative prompt to use. Defaults to "".
- extra (dict, optional): Extra parameters to pass to the API. Defaults to {}.
- Returns:
- str: The filename of the image
- """
- # Create a session and set the basic auth if needed
- s = requests.Session()
- if agent.config.sd_webui_auth:
- username, password = agent.config.sd_webui_auth.split(":")
- s.auth = (username, password or "")
-
- # Generate the images
- response = requests.post(
- f"{agent.config.sd_webui_url}/sdapi/v1/txt2img",
- json={
- "prompt": prompt,
- "negative_prompt": negative_prompt,
- "sampler_index": "DDIM",
- "steps": 20,
- "config_scale": 7.0,
- "width": size,
- "height": size,
- "n_iter": 1,
- **extra,
- },
- )
-
- logger.info(f"Image Generated for prompt:{prompt}")
-
- # Save the image to disk
- response = response.json()
- b64 = b64decode(response["images"][0].split(",", 1)[0])
- image = Image.open(io.BytesIO(b64))
- image.save(filename)
-
- return f"Saved to disk:{filename}"
diff --git a/autogpt/commands/task_statuses.py b/autogpt/commands/task_statuses.py
deleted file mode 100644
index 062ebe3a4..000000000
--- a/autogpt/commands/task_statuses.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""Task Statuses module."""
-from __future__ import annotations
-
-from typing import NoReturn
-
-from autogpt.agent.agent import Agent
-from autogpt.command_decorator import command
-from autogpt.logs import logger
-
-
-@command(
- "goals_accomplished",
- "Goals are accomplished and there is nothing left to do",
- {
- "reason": {
- "type": "string",
- "description": "A summary to the user of how the goals were accomplished",
- "required": True,
- }
- },
-)
-def task_complete(reason: str, agent: Agent) -> NoReturn:
- """
- A function that takes in a string and exits the program
-
- Parameters:
- reason (str): A summary to the user of how the goals were accomplished.
- Returns:
- A result string from create chat completion. A list of suggestions to
- improve the code.
- """
- logger.info(title="Shutting down...\n", message=reason)
- quit()
diff --git a/autogpt/commands/web_search.py b/autogpt/commands/web_search.py
deleted file mode 100644
index d47d680b2..000000000
--- a/autogpt/commands/web_search.py
+++ /dev/null
@@ -1,143 +0,0 @@
-"""Google search command for Autogpt."""
-from __future__ import annotations
-
-import json
-import time
-from itertools import islice
-
-from duckduckgo_search import DDGS
-
-from autogpt.agent.agent import Agent
-from autogpt.command_decorator import command
-
-DUCKDUCKGO_MAX_ATTEMPTS = 3
-
-
-@command(
- "web_search",
- "Searches the web",
- {
- "query": {
- "type": "string",
- "description": "The search query",
- "required": True,
- }
- },
- aliases=["search"],
-)
-def web_search(query: str, agent: Agent, num_results: int = 8) -> str:
- """Return the results of a Google search
-
- Args:
- query (str): The search query.
- num_results (int): The number of results to return.
-
- Returns:
- str: The results of the search.
- """
- search_results = []
- attempts = 0
-
- while attempts < DUCKDUCKGO_MAX_ATTEMPTS:
- if not query:
- return json.dumps(search_results)
-
- results = DDGS().text(query)
- search_results = list(islice(results, num_results))
-
- if search_results:
- break
-
- time.sleep(1)
- attempts += 1
-
- results = json.dumps(search_results, ensure_ascii=False, indent=4)
- return safe_google_results(results)
-
-
-@command(
- "google",
- "Google Search",
- {
- "query": {
- "type": "string",
- "description": "The search query",
- "required": True,
- }
- },
- lambda config: bool(config.google_api_key)
- and bool(config.google_custom_search_engine_id),
- "Configure google_api_key and custom_search_engine_id.",
- aliases=["search"],
-)
-def google(query: str, agent: Agent, num_results: int = 8) -> str | list[str]:
- """Return the results of a Google search using the official Google API
-
- Args:
- query (str): The search query.
- num_results (int): The number of results to return.
-
- Returns:
- str: The results of the search.
- """
-
- from googleapiclient.discovery import build
- from googleapiclient.errors import HttpError
-
- try:
- # Get the Google API key and Custom Search Engine ID from the config file
- api_key = agent.config.google_api_key
- custom_search_engine_id = agent.config.google_custom_search_engine_id
-
- # Initialize the Custom Search API service
- service = build("customsearch", "v1", developerKey=api_key)
-
- # Send the search query and retrieve the results
- result = (
- service.cse()
- .list(q=query, cx=custom_search_engine_id, num=num_results)
- .execute()
- )
-
- # Extract the search result items from the response
- search_results = result.get("items", [])
-
- # Create a list of only the URLs from the search results
- search_results_links = [item["link"] for item in search_results]
-
- except HttpError as e:
- # Handle errors in the API call
- error_details = json.loads(e.content.decode())
-
- # Check if the error is related to an invalid or missing API key
- if error_details.get("error", {}).get(
- "code"
- ) == 403 and "invalid API key" in error_details.get("error", {}).get(
- "message", ""
- ):
- return "Error: The provided Google API key is invalid or missing."
- else:
- return f"Error: {e}"
- # google_result can be a list or a string depending on the search results
-
- # Return the list of search result URLs
- return safe_google_results(search_results_links)
-
-
-def safe_google_results(results: str | list) -> str:
- """
- Return the results of a Google search in a safe format.
-
- Args:
- results (str | list): The search results.
-
- Returns:
- str: The results of the search.
- """
- if isinstance(results, list):
- safe_message = json.dumps(
- [result.encode("utf-8", "ignore").decode("utf-8") for result in results]
- )
- else:
- safe_message = results.encode("utf-8", "ignore").decode("utf-8")
- return safe_message
diff --git a/autogpt/commands/web_selenium.py b/autogpt/commands/web_selenium.py
deleted file mode 100644
index 821957f3e..000000000
--- a/autogpt/commands/web_selenium.py
+++ /dev/null
@@ -1,237 +0,0 @@
-"""Selenium web scraping module."""
-from __future__ import annotations
-
-import logging
-from pathlib import Path
-from sys import platform
-from typing import Optional, Type
-
-from bs4 import BeautifulSoup
-from selenium.common.exceptions import WebDriverException
-from selenium.webdriver.chrome.options import Options as ChromeOptions
-from selenium.webdriver.chrome.service import Service as ChromeDriverService
-from selenium.webdriver.chrome.webdriver import WebDriver as ChromeDriver
-from selenium.webdriver.common.by import By
-from selenium.webdriver.edge.options import Options as EdgeOptions
-from selenium.webdriver.edge.service import Service as EdgeDriverService
-from selenium.webdriver.edge.webdriver import WebDriver as EdgeDriver
-from selenium.webdriver.firefox.options import Options as FirefoxOptions
-from selenium.webdriver.firefox.service import Service as GeckoDriverService
-from selenium.webdriver.firefox.webdriver import WebDriver as FirefoxDriver
-from selenium.webdriver.remote.webdriver import WebDriver
-from selenium.webdriver.safari.options import Options as SafariOptions
-from selenium.webdriver.safari.webdriver import WebDriver as SafariDriver
-from selenium.webdriver.support import expected_conditions as EC
-from selenium.webdriver.support.wait import WebDriverWait
-from webdriver_manager.chrome import ChromeDriverManager
-from webdriver_manager.firefox import GeckoDriverManager
-from webdriver_manager.microsoft import EdgeChromiumDriverManager as EdgeDriverManager
-
-from autogpt.agent.agent import Agent
-from autogpt.command_decorator import command
-from autogpt.logs import logger
-from autogpt.memory.vector import MemoryItem, get_memory
-from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
-from autogpt.url_utils.validators import validate_url
-
-BrowserOptions = ChromeOptions | EdgeOptions | FirefoxOptions | SafariOptions
-
-FILE_DIR = Path(__file__).parent.parent
-
-
-@command(
- "browse_website",
- "Browses a Website",
- {
- "url": {"type": "string", "description": "The URL to visit", "required": True},
- "question": {
- "type": "string",
- "description": "What you want to find on the website",
- "required": True,
- },
- },
-)
-@validate_url
-def browse_website(url: str, question: str, agent: Agent) -> str:
- """Browse a website and return the answer and links to the user
-
- Args:
- url (str): The url of the website to browse
- question (str): The question asked by the user
-
- Returns:
- Tuple[str, WebDriver]: The answer and links to the user and the webdriver
- """
- try:
- driver, text = scrape_text_with_selenium(url, agent)
- except WebDriverException as e:
- # These errors are often quite long and include lots of context.
- # Just grab the first line.
- msg = e.msg.split("\n")[0]
- return f"Error: {msg}"
-
- add_header(driver)
- summary = summarize_memorize_webpage(url, text, question, agent, driver)
- links = scrape_links_with_selenium(driver, url)
-
- # Limit links to 5
- if len(links) > 5:
- links = links[:5]
- close_browser(driver)
- return f"Answer gathered from website: {summary}\n\nLinks: {links}"
-
-
-def scrape_text_with_selenium(url: str, agent: Agent) -> tuple[WebDriver, str]:
- """Scrape text from a website using selenium
-
- Args:
- url (str): The url of the website to scrape
-
- Returns:
- Tuple[WebDriver, str]: The webdriver and the text scraped from the website
- """
- logging.getLogger("selenium").setLevel(logging.CRITICAL)
-
- options_available: dict[str, Type[BrowserOptions]] = {
- "chrome": ChromeOptions,
- "edge": EdgeOptions,
- "firefox": FirefoxOptions,
- "safari": SafariOptions,
- }
-
- options: BrowserOptions = options_available[agent.config.selenium_web_browser]()
- options.add_argument(
- "user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
- )
-
- if agent.config.selenium_web_browser == "firefox":
- if agent.config.selenium_headless:
- options.headless = True
- options.add_argument("--disable-gpu")
- driver = FirefoxDriver(
- service=GeckoDriverService(GeckoDriverManager().install()), options=options
- )
- elif agent.config.selenium_web_browser == "edge":
- driver = EdgeDriver(
- service=EdgeDriverService(EdgeDriverManager().install()), options=options
- )
- elif agent.config.selenium_web_browser == "safari":
- # Requires a bit more setup on the users end
- # See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
- driver = SafariDriver(options=options)
- else:
- if platform == "linux" or platform == "linux2":
- options.add_argument("--disable-dev-shm-usage")
- options.add_argument("--remote-debugging-port=9222")
-
- options.add_argument("--no-sandbox")
- if agent.config.selenium_headless:
- options.add_argument("--headless=new")
- options.add_argument("--disable-gpu")
-
- chromium_driver_path = Path("/usr/bin/chromedriver")
-
- driver = ChromeDriver(
- service=ChromeDriverService(str(chromium_driver_path))
- if chromium_driver_path.exists()
- else ChromeDriverService(ChromeDriverManager().install()),
- options=options,
- )
- driver.get(url)
-
- WebDriverWait(driver, 10).until(
- EC.presence_of_element_located((By.TAG_NAME, "body"))
- )
-
- # Get the HTML content directly from the browser's DOM
- page_source = driver.execute_script("return document.body.outerHTML;")
- soup = BeautifulSoup(page_source, "html.parser")
-
- for script in soup(["script", "style"]):
- script.extract()
-
- text = soup.get_text()
- lines = (line.strip() for line in text.splitlines())
- chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
- text = "\n".join(chunk for chunk in chunks if chunk)
- return driver, text
-
-
-def scrape_links_with_selenium(driver: WebDriver, url: str) -> list[str]:
- """Scrape links from a website using selenium
-
- Args:
- driver (WebDriver): The webdriver to use to scrape the links
-
- Returns:
- List[str]: The links scraped from the website
- """
- page_source = driver.page_source
- soup = BeautifulSoup(page_source, "html.parser")
-
- for script in soup(["script", "style"]):
- script.extract()
-
- hyperlinks = extract_hyperlinks(soup, url)
-
- return format_hyperlinks(hyperlinks)
-
-
-def close_browser(driver: WebDriver) -> None:
- """Close the browser
-
- Args:
- driver (WebDriver): The webdriver to close
-
- Returns:
- None
- """
- driver.quit()
-
-
-def add_header(driver: WebDriver) -> None:
- """Add a header to the website
-
- Args:
- driver (WebDriver): The webdriver to use to add the header
-
- Returns:
- None
- """
- try:
- with open(f"{FILE_DIR}/js/overlay.js", "r") as overlay_file:
- overlay_script = overlay_file.read()
- driver.execute_script(overlay_script)
- except Exception as e:
- print(f"Error executing overlay.js: {e}")
-
-
-def summarize_memorize_webpage(
- url: str,
- text: str,
- question: str,
- agent: Agent,
- driver: Optional[WebDriver] = None,
-) -> str:
- """Summarize text using the OpenAI API
-
- Args:
- url (str): The url of the text
- text (str): The text to summarize
- question (str): The question to ask the model
- driver (WebDriver): The webdriver to use to scroll the page
-
- Returns:
- str: The summary of the text
- """
- if not text:
- return "Error: No text to summarize"
-
- text_length = len(text)
- logger.info(f"Text length: {text_length} characters")
-
- memory = get_memory(agent.config)
-
- new_memory = MemoryItem.from_webpage(text, url, agent.config, question=question)
- memory.add(new_memory)
- return new_memory.summary
diff --git a/autogpt/config/__init__.py b/autogpt/config/__init__.py
deleted file mode 100644
index 1b98989b9..000000000
--- a/autogpt/config/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""
-This module contains the configuration classes for AutoGPT.
-"""
-from .ai_config import AIConfig
-from .config import Config, ConfigBuilder, check_openai_api_key
-
-__all__ = [
- "check_openai_api_key",
- "AIConfig",
- "Config",
- "ConfigBuilder",
-]
diff --git a/autogpt/config/ai_config.py b/autogpt/config/ai_config.py
deleted file mode 100644
index a2952c9dd..000000000
--- a/autogpt/config/ai_config.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# sourcery skip: do-not-use-staticmethod
-"""
-A module that contains the AIConfig class object that contains the configuration
-"""
-from __future__ import annotations
-
-import os
-import platform
-from pathlib import Path
-from typing import TYPE_CHECKING, Optional
-
-import distro
-import yaml
-
-if TYPE_CHECKING:
- from autogpt.models.command_registry import CommandRegistry
- from autogpt.prompts.generator import PromptGenerator
-
-# Soon this will go in a folder where it remembers more stuff about the run(s)
-SAVE_FILE = str(Path(os.getcwd()) / "ai_settings.yaml")
-
-
-class AIConfig:
- """
- A class object that contains the configuration information for the AI
-
- Attributes:
- ai_name (str): The name of the AI.
- ai_role (str): The description of the AI's role.
- ai_goals (list): The list of objectives the AI is supposed to complete.
- api_budget (float): The maximum dollar value for API calls (0.0 means infinite)
- """
-
- def __init__(
- self,
- ai_name: str = "",
- ai_role: str = "",
- ai_goals: list[str] = [],
- api_budget: float = 0.0,
- ) -> None:
- """
- Initialize a class instance
-
- Parameters:
- ai_name (str): The name of the AI.
- ai_role (str): The description of the AI's role.
- ai_goals (list): The list of objectives the AI is supposed to complete.
- api_budget (float): The maximum dollar value for API calls (0.0 means infinite)
- Returns:
- None
- """
- self.ai_name = ai_name
- self.ai_role = ai_role
- self.ai_goals = ai_goals
- self.api_budget = api_budget
- self.prompt_generator: PromptGenerator | None = None
- self.command_registry: CommandRegistry | None = None
-
- @staticmethod
- def load(ai_settings_file: str = SAVE_FILE) -> "AIConfig":
- """
- Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget)
- loaded from yaml file if yaml file exists, else returns class with no parameters.
-
- Parameters:
- ai_settings_file (int): The path to the config yaml file.
- DEFAULT: "../ai_settings.yaml"
-
- Returns:
- cls (object): An instance of given cls object
- """
-
- try:
- with open(ai_settings_file, encoding="utf-8") as file:
- config_params = yaml.load(file, Loader=yaml.FullLoader) or {}
- except FileNotFoundError:
- config_params = {}
-
- ai_name = config_params.get("ai_name", "")
- ai_role = config_params.get("ai_role", "")
- ai_goals = [
- str(goal).strip("{}").replace("'", "").replace('"', "")
- if isinstance(goal, dict)
- else str(goal)
- for goal in config_params.get("ai_goals", [])
- ]
- api_budget = config_params.get("api_budget", 0.0)
- # type: Type[AIConfig]
- return AIConfig(ai_name, ai_role, ai_goals, api_budget)
-
- def save(self, ai_settings_file: str = SAVE_FILE) -> None:
- """
- Saves the class parameters to the specified file yaml file path as a yaml file.
-
- Parameters:
- ai_settings_file(str): The path to the config yaml file.
- DEFAULT: "../ai_settings.yaml"
-
- Returns:
- None
- """
-
- config = {
- "ai_name": self.ai_name,
- "ai_role": self.ai_role,
- "ai_goals": self.ai_goals,
- "api_budget": self.api_budget,
- }
- with open(ai_settings_file, "w", encoding="utf-8") as file:
- yaml.dump(config, file, allow_unicode=True)
-
- def construct_full_prompt(
- self, config, prompt_generator: Optional[PromptGenerator] = None
- ) -> str:
- """
- Returns a prompt to the user with the class information in an organized fashion.
-
- Parameters:
- None
-
- Returns:
- full_prompt (str): A string containing the initial prompt for the user
- including the ai_name, ai_role, ai_goals, and api_budget.
- """
-
- prompt_start = (
- "Your decisions must always be made independently without"
- " seeking user assistance. Play to your strengths as an LLM and pursue"
- " simple strategies with no legal complications."
- ""
- )
-
- from autogpt.prompts.prompt import build_default_prompt_generator
-
- if prompt_generator is None:
- prompt_generator = build_default_prompt_generator(config)
- prompt_generator.goals = self.ai_goals
- prompt_generator.name = self.ai_name
- prompt_generator.role = self.ai_role
- prompt_generator.command_registry = self.command_registry
- for plugin in config.plugins:
- if not plugin.can_handle_post_prompt():
- continue
- prompt_generator = plugin.post_prompt(prompt_generator)
-
- if config.execute_local_commands:
- # add OS info to prompt
- os_name = platform.system()
- os_info = (
- platform.platform(terse=True)
- if os_name != "Linux"
- else distro.name(pretty=True)
- )
-
- prompt_start += f"\nThe OS you are running on is: {os_info}"
-
- # Construct full prompt
- full_prompt = f"You are {prompt_generator.name}, {prompt_generator.role}\n{prompt_start}\n\nGOALS:\n\n"
- for i, goal in enumerate(self.ai_goals):
- full_prompt += f"{i+1}. {goal}\n"
- if self.api_budget > 0.0:
- full_prompt += f"\nIt takes money to let you run. Your API budget is ${self.api_budget:.3f}"
- self.prompt_generator = prompt_generator
- full_prompt += f"\n\n{prompt_generator.generate_prompt_string(config)}"
- return full_prompt
diff --git a/autogpt/config/config.py b/autogpt/config/config.py
deleted file mode 100644
index 579f0c4e6..000000000
--- a/autogpt/config/config.py
+++ /dev/null
@@ -1,373 +0,0 @@
-"""Configuration class to store the state of bools for different scripts access."""
-from __future__ import annotations
-
-import contextlib
-import os
-import re
-from typing import Any, Dict, Optional, Union
-
-import yaml
-from auto_gpt_plugin_template import AutoGPTPluginTemplate
-from colorama import Fore
-from pydantic import Field, validator
-
-from autogpt.core.configuration.schema import Configurable, SystemSettings
-from autogpt.plugins.plugins_config import PluginsConfig
-
-AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "../..", "azure.yaml")
-PLUGINS_CONFIG_FILE = os.path.join(
- os.path.dirname(__file__), "../..", "plugins_config.yaml"
-)
-GPT_4_MODEL = "gpt-4"
-GPT_3_MODEL = "gpt-3.5-turbo"
-
-
-class Config(SystemSettings, arbitrary_types_allowed=True):
- name: str = "Auto-GPT configuration"
- description: str = "Default configuration for the Auto-GPT application."
- ########################
- # Application Settings #
- ########################
- skip_news: bool = False
- skip_reprompt: bool = False
- authorise_key: str = "y"
- exit_key: str = "n"
- debug_mode: bool = False
- plain_output: bool = False
- chat_messages_enabled: bool = True
- # TTS configuration
- speak_mode: bool = False
- text_to_speech_provider: str = "gtts"
- streamelements_voice: str = "Brian"
- elevenlabs_voice_id: Optional[str] = None
-
- ##########################
- # Agent Control Settings #
- ##########################
- # Paths
- ai_settings_file: str = "ai_settings.yaml"
- prompt_settings_file: str = "prompt_settings.yaml"
- workspace_path: Optional[str] = None
- file_logger_path: Optional[str] = None
- # Model configuration
- fast_llm: str = "gpt-3.5-turbo"
- smart_llm: str = "gpt-4"
- temperature: float = 0
- openai_functions: bool = False
- embedding_model: str = "text-embedding-ada-002"
- browse_spacy_language_model: str = "en_core_web_sm"
- # Run loop configuration
- continuous_mode: bool = False
- continuous_limit: int = 0
-
- ##########
- # Memory #
- ##########
- memory_backend: str = "json_file"
- memory_index: str = "auto-gpt-memory"
- redis_host: str = "localhost"
- redis_port: int = 6379
- redis_password: str = ""
- wipe_redis_on_start: bool = True
-
- ############
- # Commands #
- ############
- # General
- disabled_command_categories: list[str] = Field(default_factory=list)
- # File ops
- restrict_to_workspace: bool = True
- allow_downloads: bool = False
- # Shell commands
- shell_command_control: str = "denylist"
- execute_local_commands: bool = False
- shell_denylist: list[str] = Field(default_factory=lambda: ["sudo", "su"])
- shell_allowlist: list[str] = Field(default_factory=list)
- # Text to image
- image_provider: Optional[str] = None
- huggingface_image_model: str = "CompVis/stable-diffusion-v1-4"
- sd_webui_url: Optional[str] = "http://localhost:7860"
- image_size: int = 256
- # Audio to text
- audio_to_text_provider: str = "huggingface"
- huggingface_audio_to_text_model: Optional[str] = None
- # Web browsing
- selenium_web_browser: str = "chrome"
- selenium_headless: bool = True
- user_agent: str = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
-
- ###################
- # Plugin Settings #
- ###################
- plugins_dir: str = "plugins"
- plugins_config_file: str = PLUGINS_CONFIG_FILE
- plugins_config: PluginsConfig = Field(
- default_factory=lambda: PluginsConfig(plugins={})
- )
- plugins: list[AutoGPTPluginTemplate] = Field(default_factory=list, exclude=True)
- plugins_allowlist: list[str] = Field(default_factory=list)
- plugins_denylist: list[str] = Field(default_factory=list)
- plugins_openai: list[str] = Field(default_factory=list)
-
- ###############
- # Credentials #
- ###############
- # OpenAI
- openai_api_key: Optional[str] = None
- openai_api_type: Optional[str] = None
- openai_api_base: Optional[str] = None
- openai_api_version: Optional[str] = None
- openai_organization: Optional[str] = None
- use_azure: bool = False
- azure_config_file: Optional[str] = AZURE_CONFIG_FILE
- azure_model_to_deployment_id_map: Optional[Dict[str, str]] = None
- # Elevenlabs
- elevenlabs_api_key: Optional[str] = None
- # Github
- github_api_key: Optional[str] = None
- github_username: Optional[str] = None
- # Google
- google_api_key: Optional[str] = None
- google_custom_search_engine_id: Optional[str] = None
- # Huggingface
- huggingface_api_token: Optional[str] = None
- # Stable Diffusion
- sd_webui_auth: Optional[str] = None
-
- @validator("plugins", each_item=True)
- def validate_plugins(cls, p: AutoGPTPluginTemplate | Any):
- assert issubclass(
- p.__class__, AutoGPTPluginTemplate
- ), f"{p} does not subclass AutoGPTPluginTemplate"
- assert (
- p.__class__.__name__ != "AutoGPTPluginTemplate"
- ), f"Plugins must subclass AutoGPTPluginTemplate; {p} is a template instance"
- return p
-
- def get_azure_kwargs(self, model: str) -> dict[str, str]:
- """Get the kwargs for the Azure API."""
-
- # Fix --gpt3only and --gpt4only in combination with Azure
- fast_llm = (
- self.fast_llm
- if not (
- self.fast_llm == self.smart_llm
- and self.fast_llm.startswith(GPT_4_MODEL)
- )
- else f"not_{self.fast_llm}"
- )
- smart_llm = (
- self.smart_llm
- if not (
- self.smart_llm == self.fast_llm
- and self.smart_llm.startswith(GPT_3_MODEL)
- )
- else f"not_{self.smart_llm}"
- )
-
- deployment_id = {
- fast_llm: self.azure_model_to_deployment_id_map.get(
- "fast_llm_deployment_id",
- self.azure_model_to_deployment_id_map.get(
- "fast_llm_model_deployment_id" # backwards compatibility
- ),
- ),
- smart_llm: self.azure_model_to_deployment_id_map.get(
- "smart_llm_deployment_id",
- self.azure_model_to_deployment_id_map.get(
- "smart_llm_model_deployment_id" # backwards compatibility
- ),
- ),
- self.embedding_model: self.azure_model_to_deployment_id_map.get(
- "embedding_model_deployment_id"
- ),
- }.get(model, None)
-
- kwargs = {
- "api_type": self.openai_api_type,
- "api_base": self.openai_api_base,
- "api_version": self.openai_api_version,
- }
- if model == self.embedding_model:
- kwargs["engine"] = deployment_id
- else:
- kwargs["deployment_id"] = deployment_id
- return kwargs
-
-
-class ConfigBuilder(Configurable[Config]):
- default_settings = Config()
-
- @classmethod
- def build_config_from_env(cls) -> Config:
- """Initialize the Config class"""
- config_dict = {
- "authorise_key": os.getenv("AUTHORISE_COMMAND_KEY"),
- "exit_key": os.getenv("EXIT_KEY"),
- "plain_output": os.getenv("PLAIN_OUTPUT", "False") == "True",
- "shell_command_control": os.getenv("SHELL_COMMAND_CONTROL"),
- "ai_settings_file": os.getenv("AI_SETTINGS_FILE"),
- "prompt_settings_file": os.getenv("PROMPT_SETTINGS_FILE"),
- "fast_llm": os.getenv("FAST_LLM", os.getenv("FAST_LLM_MODEL")),
- "smart_llm": os.getenv("SMART_LLM", os.getenv("SMART_LLM_MODEL")),
- "embedding_model": os.getenv("EMBEDDING_MODEL"),
- "browse_spacy_language_model": os.getenv("BROWSE_SPACY_LANGUAGE_MODEL"),
- "openai_api_key": os.getenv("OPENAI_API_KEY"),
- "use_azure": os.getenv("USE_AZURE") == "True",
- "azure_config_file": os.getenv("AZURE_CONFIG_FILE", AZURE_CONFIG_FILE),
- "execute_local_commands": os.getenv("EXECUTE_LOCAL_COMMANDS", "False")
- == "True",
- "restrict_to_workspace": os.getenv("RESTRICT_TO_WORKSPACE", "True")
- == "True",
- "openai_functions": os.getenv("OPENAI_FUNCTIONS", "False") == "True",
- "elevenlabs_api_key": os.getenv("ELEVENLABS_API_KEY"),
- "streamelements_voice": os.getenv("STREAMELEMENTS_VOICE"),
- "text_to_speech_provider": os.getenv("TEXT_TO_SPEECH_PROVIDER"),
- "github_api_key": os.getenv("GITHUB_API_KEY"),
- "github_username": os.getenv("GITHUB_USERNAME"),
- "google_api_key": os.getenv("GOOGLE_API_KEY"),
- "image_provider": os.getenv("IMAGE_PROVIDER"),
- "huggingface_api_token": os.getenv("HUGGINGFACE_API_TOKEN"),
- "huggingface_image_model": os.getenv("HUGGINGFACE_IMAGE_MODEL"),
- "audio_to_text_provider": os.getenv("AUDIO_TO_TEXT_PROVIDER"),
- "huggingface_audio_to_text_model": os.getenv(
- "HUGGINGFACE_AUDIO_TO_TEXT_MODEL"
- ),
- "sd_webui_url": os.getenv("SD_WEBUI_URL"),
- "sd_webui_auth": os.getenv("SD_WEBUI_AUTH"),
- "selenium_web_browser": os.getenv("USE_WEB_BROWSER"),
- "selenium_headless": os.getenv("HEADLESS_BROWSER", "True") == "True",
- "user_agent": os.getenv("USER_AGENT"),
- "memory_backend": os.getenv("MEMORY_BACKEND"),
- "memory_index": os.getenv("MEMORY_INDEX"),
- "redis_host": os.getenv("REDIS_HOST"),
- "redis_password": os.getenv("REDIS_PASSWORD"),
- "wipe_redis_on_start": os.getenv("WIPE_REDIS_ON_START", "True") == "True",
- "plugins_dir": os.getenv("PLUGINS_DIR"),
- "plugins_config_file": os.getenv("PLUGINS_CONFIG_FILE"),
- "chat_messages_enabled": os.getenv("CHAT_MESSAGES_ENABLED") == "True",
- }
-
- config_dict["disabled_command_categories"] = _safe_split(
- os.getenv("DISABLED_COMMAND_CATEGORIES")
- )
-
- config_dict["shell_denylist"] = _safe_split(
- os.getenv("SHELL_DENYLIST", os.getenv("DENY_COMMANDS"))
- )
- config_dict["shell_allowlist"] = _safe_split(
- os.getenv("SHELL_ALLOWLIST", os.getenv("ALLOW_COMMANDS"))
- )
-
- config_dict["google_custom_search_engine_id"] = os.getenv(
- "GOOGLE_CUSTOM_SEARCH_ENGINE_ID", os.getenv("CUSTOM_SEARCH_ENGINE_ID")
- )
-
- config_dict["elevenlabs_voice_id"] = os.getenv(
- "ELEVENLABS_VOICE_ID", os.getenv("ELEVENLABS_VOICE_1_ID")
- )
- elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
- if os.getenv("USE_MAC_OS_TTS"):
- default_tts_provider = "macos"
- elif elevenlabs_api_key:
- default_tts_provider = "elevenlabs"
- elif os.getenv("USE_BRIAN_TTS"):
- default_tts_provider = "streamelements"
- else:
- default_tts_provider = "gtts"
- config_dict["text_to_speech_provider"] = default_tts_provider
-
- config_dict["plugins_allowlist"] = _safe_split(os.getenv("ALLOWLISTED_PLUGINS"))
- config_dict["plugins_denylist"] = _safe_split(os.getenv("DENYLISTED_PLUGINS"))
- config_dict["plugins_config"] = PluginsConfig.load_config(
- config_dict["plugins_config_file"],
- config_dict["plugins_denylist"],
- config_dict["plugins_allowlist"],
- )
-
- with contextlib.suppress(TypeError):
- config_dict["image_size"] = int(os.getenv("IMAGE_SIZE"))
- with contextlib.suppress(TypeError):
- config_dict["redis_port"] = int(os.getenv("REDIS_PORT"))
- with contextlib.suppress(TypeError):
- config_dict["temperature"] = float(os.getenv("TEMPERATURE"))
-
- if config_dict["use_azure"]:
- azure_config = cls.load_azure_config(config_dict["azure_config_file"])
- config_dict.update(azure_config)
-
- elif os.getenv("OPENAI_API_BASE_URL"):
- config_dict["openai_api_base"] = os.getenv("OPENAI_API_BASE_URL")
-
- openai_organization = os.getenv("OPENAI_ORGANIZATION")
- if openai_organization is not None:
- config_dict["openai_organization"] = openai_organization
-
- config_dict_without_none_values = {
- k: v for k, v in config_dict.items() if v is not None
- }
-
- return cls.build_agent_configuration(config_dict_without_none_values)
-
- @classmethod
- def load_azure_config(cls, config_file: str = AZURE_CONFIG_FILE) -> Dict[str, str]:
- """
- Loads the configuration parameters for Azure hosting from the specified file
- path as a yaml file.
-
- Parameters:
- config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml"
-
- Returns:
- Dict
- """
- with open(config_file) as file:
- config_params = yaml.load(file, Loader=yaml.FullLoader) or {}
-
- return {
- "openai_api_type": config_params.get("azure_api_type", "azure"),
- "openai_api_base": config_params.get("azure_api_base", ""),
- "openai_api_version": config_params.get(
- "azure_api_version", "2023-03-15-preview"
- ),
- "azure_model_to_deployment_id_map": config_params.get(
- "azure_model_map", {}
- ),
- }
-
-
-def check_openai_api_key(config: Config) -> None:
- """Check if the OpenAI API key is set in config.py or as an environment variable."""
- if not config.openai_api_key:
- print(
- Fore.RED
- + "Please set your OpenAI API key in .env or as an environment variable."
- + Fore.RESET
- )
- print("You can get your key from https://platform.openai.com/account/api-keys")
- openai_api_key = input(
- "If you do have the key, please enter your OpenAI API key now:\n"
- )
- key_pattern = r"^sk-\w{48}"
- openai_api_key = openai_api_key.strip()
- if re.search(key_pattern, openai_api_key):
- os.environ["OPENAI_API_KEY"] = openai_api_key
- config.openai_api_key = openai_api_key
- print(
- Fore.GREEN
- + "OpenAI API key successfully set!\n"
- + Fore.ORANGE
- + "NOTE: The API key you've set is only temporary.\n"
- + "For longer sessions, please set it in .env file"
- + Fore.RESET
- )
- else:
- print("Invalid OpenAI API key!")
- exit(1)
-
-
-def _safe_split(s: Union[str, None], sep: str = ",") -> list[str]:
- """Split a string by a separator. Return an empty list if the string is None."""
- if s is None:
- return []
- return s.split(sep)
diff --git a/autogpt/config/prompt_config.py b/autogpt/config/prompt_config.py
deleted file mode 100644
index 793bb4440..000000000
--- a/autogpt/config/prompt_config.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# sourcery skip: do-not-use-staticmethod
-"""
-A module that contains the PromptConfig class object that contains the configuration
-"""
-import yaml
-from colorama import Fore
-
-from autogpt import utils
-from autogpt.logs import logger
-
-
-class PromptConfig:
- """
- A class object that contains the configuration information for the prompt, which will be used by the prompt generator
-
- Attributes:
- constraints (list): Constraints list for the prompt generator.
- resources (list): Resources list for the prompt generator.
- performance_evaluations (list): Performance evaluation list for the prompt generator.
- """
-
- def __init__(self, prompt_settings_file: str) -> None:
- """
- Initialize a class instance with parameters (constraints, resources, performance_evaluations) loaded from
- yaml file if yaml file exists,
- else raises error.
-
- Parameters:
- constraints (list): Constraints list for the prompt generator.
- resources (list): Resources list for the prompt generator.
- performance_evaluations (list): Performance evaluation list for the prompt generator.
- Returns:
- None
- """
- # Validate file
- (validated, message) = utils.validate_yaml_file(prompt_settings_file)
- if not validated:
- logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
- logger.double_check()
- exit(1)
-
- with open(prompt_settings_file, encoding="utf-8") as file:
- config_params = yaml.load(file, Loader=yaml.FullLoader)
-
- self.constraints = config_params.get("constraints", [])
- self.resources = config_params.get("resources", [])
- self.performance_evaluations = config_params.get("performance_evaluations", [])
diff --git a/autogpt/configurator.py b/autogpt/configurator.py
deleted file mode 100644
index fa6b4c588..000000000
--- a/autogpt/configurator.py
+++ /dev/null
@@ -1,167 +0,0 @@
-"""Configurator module."""
-from __future__ import annotations
-
-from typing import TYPE_CHECKING
-
-import click
-from colorama import Back, Fore, Style
-
-from autogpt import utils
-from autogpt.config.config import GPT_3_MODEL, GPT_4_MODEL
-from autogpt.llm.utils import check_model
-from autogpt.logs import logger
-from autogpt.memory.vector import get_supported_memory_backends
-
-if TYPE_CHECKING:
- from autogpt.config import Config
-
-
-def create_config(
- config: Config,
- continuous: bool,
- continuous_limit: int,
- ai_settings_file: str,
- prompt_settings_file: str,
- skip_reprompt: bool,
- speak: bool,
- debug: bool,
- gpt3only: bool,
- gpt4only: bool,
- memory_type: str,
- browser_name: str,
- allow_downloads: bool,
- skip_news: bool,
-) -> None:
- """Updates the config object with the given arguments.
-
- Args:
- continuous (bool): Whether to run in continuous mode
- continuous_limit (int): The number of times to run in continuous mode
- ai_settings_file (str): The path to the ai_settings.yaml file
- prompt_settings_file (str): The path to the prompt_settings.yaml file
- skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script
- speak (bool): Whether to enable speak mode
- debug (bool): Whether to enable debug mode
- gpt3only (bool): Whether to enable GPT3.5 only mode
- gpt4only (bool): Whether to enable GPT4 only mode
- memory_type (str): The type of memory backend to use
- browser_name (str): The name of the browser to use when using selenium to scrape the web
- allow_downloads (bool): Whether to allow Auto-GPT to download files natively
- skips_news (bool): Whether to suppress the output of latest news on startup
- """
- config.debug_mode = False
- config.continuous_mode = False
- config.speak_mode = False
-
- if debug:
- logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
- config.debug_mode = True
-
- if continuous:
- logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
- logger.typewriter_log(
- "WARNING: ",
- Fore.RED,
- "Continuous mode is not recommended. It is potentially dangerous and may"
- " cause your AI to run forever or carry out actions you would not usually"
- " authorise. Use at your own risk.",
- )
- config.continuous_mode = True
-
- if continuous_limit:
- logger.typewriter_log(
- "Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
- )
- config.continuous_limit = continuous_limit
-
- # Check if continuous limit is used without continuous mode
- if continuous_limit and not continuous:
- raise click.UsageError("--continuous-limit can only be used with --continuous")
-
- if speak:
- logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
- config.speak_mode = True
-
- # Set the default LLM models
- if gpt3only:
- logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
- # --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM config
- config.fast_llm = GPT_3_MODEL
- config.smart_llm = GPT_3_MODEL
- elif (
- gpt4only
- and check_model(GPT_4_MODEL, model_type="smart_llm", config=config)
- == GPT_4_MODEL
- ):
- logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
- # --gpt4only should always use gpt-4, despite user's SMART_LLM config
- config.fast_llm = GPT_4_MODEL
- config.smart_llm = GPT_4_MODEL
- else:
- config.fast_llm = check_model(config.fast_llm, "fast_llm", config=config)
- config.smart_llm = check_model(config.smart_llm, "smart_llm", config=config)
-
- if memory_type:
- supported_memory = get_supported_memory_backends()
- chosen = memory_type
- if chosen not in supported_memory:
- logger.typewriter_log(
- "ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ",
- Fore.RED,
- f"{supported_memory}",
- )
- logger.typewriter_log("Defaulting to: ", Fore.YELLOW, config.memory_backend)
- else:
- config.memory_backend = chosen
-
- if skip_reprompt:
- logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
- config.skip_reprompt = True
-
- if ai_settings_file:
- file = ai_settings_file
-
- # Validate file
- (validated, message) = utils.validate_yaml_file(file)
- if not validated:
- logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
- logger.double_check()
- exit(1)
-
- logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
- config.ai_settings_file = file
- config.skip_reprompt = True
-
- if prompt_settings_file:
- file = prompt_settings_file
-
- # Validate file
- (validated, message) = utils.validate_yaml_file(file)
- if not validated:
- logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
- logger.double_check()
- exit(1)
-
- logger.typewriter_log("Using Prompt Settings File:", Fore.GREEN, file)
- config.prompt_settings_file = file
-
- if browser_name:
- config.selenium_web_browser = browser_name
-
- if allow_downloads:
- logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
- logger.typewriter_log(
- "WARNING: ",
- Fore.YELLOW,
- f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} "
- + "It is recommended that you monitor any files it downloads carefully.",
- )
- logger.typewriter_log(
- "WARNING: ",
- Fore.YELLOW,
- f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
- )
- config.allow_downloads = True
-
- if skip_news:
- config.skip_news = True
diff --git a/autogpt/core/README.md b/autogpt/core/README.md
deleted file mode 100644
index f7bdf2d7d..000000000
--- a/autogpt/core/README.md
+++ /dev/null
@@ -1,62 +0,0 @@
-# Run instructions
-
-There are two client applications for Auto-GPT included.
-
-## CLI Application
-
-:star2: **This is the reference application I'm working with for now** :star2:
-
-The first app is a straight CLI application. I have not done anything yet to port all the friendly display stuff from the `logger.typewriter_log` logic.
-
-- [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/cli.py)
-- [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_app/main.py)
-
-Auto-GPT must be installed in your python environment to run this application. To do so, run
-
-```
-pip install -e REPOSITORY_ROOT
-```
-
-where `REPOSITORY_ROOT` is the root of the Auto-GPT repository on your machine.
-
-You'll then need a settings file. Run
-
-```
- python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py make-settings
- ```
-
-This will write a file called `default_agent_settings.yaml` with all the user-modifiable configuration keys to `~/auto-gpt/default_agent_settings.yml` and make the `auto-gpt` directory in your user directory if it doesn't exist). At a bare minimum, you'll need to set `openai.credentials.api_key` to your OpenAI API Key to run the model.
-
-You can then run Auto-GPT with
-
-```
-python REPOSITORY_ROOT/autogpt/core/runner/cli_app/cli.py run
-```
-
-to launch the interaction loop.
-
-## CLI Web App
-
-The second app is still a CLI, but it sets up a local webserver that the client application talks to rather than invoking calls to the Agent library code directly. This application is essentially a sketch at this point as the folks who were driving it have had less time (and likely not enough clarity) to proceed.
-
-- [Entry Point](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/cli.py)
-- [Client Application](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/client/client.py)
-- [Server API](https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/core/runner/cli_web_app/server/api.py)
-
-To run, you still need to generate a default configuration. You can do
-
-```
-python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py make-settings
-```
-
-It invokes the same command as the bare CLI app, so follow the instructions above about setting your API key.
-
-To run, do
-
-```
-python REPOSITORY_ROOT/autogpt/core/runner/cli_web_app/cli.py client
-```
-
-This will launch a webserver and then start the client cli application to communicate with it.
-
-:warning: I am not actively developing this application. It is a very good place to get involved if you have web application design experience and are looking to get involved in the re-arch. \ No newline at end of file
diff --git a/autogpt/core/ability/__init__.py b/autogpt/core/ability/__init__.py
deleted file mode 100644
index 3cf310dec..000000000
--- a/autogpt/core/ability/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-"""The command system provides a way to extend the functionality of the AI agent."""
-from autogpt.core.ability.base import Ability, AbilityRegistry
-from autogpt.core.ability.schema import AbilityResult
-from autogpt.core.ability.simple import AbilityRegistrySettings, SimpleAbilityRegistry
diff --git a/autogpt/core/ability/base.py b/autogpt/core/ability/base.py
deleted file mode 100644
index ac26f0267..000000000
--- a/autogpt/core/ability/base.py
+++ /dev/null
@@ -1,92 +0,0 @@
-import abc
-from pprint import pformat
-from typing import ClassVar
-
-import inflection
-from pydantic import Field
-
-from autogpt.core.ability.schema import AbilityResult
-from autogpt.core.configuration import SystemConfiguration
-from autogpt.core.planning.simple import LanguageModelConfiguration
-
-
-class AbilityConfiguration(SystemConfiguration):
- """Struct for model configuration."""
-
- from autogpt.core.plugin.base import PluginLocation
-
- location: PluginLocation
- packages_required: list[str] = Field(default_factory=list)
- language_model_required: LanguageModelConfiguration = None
- memory_provider_required: bool = False
- workspace_required: bool = False
-
-
-class Ability(abc.ABC):
- """A class representing an agent ability."""
-
- default_configuration: ClassVar[AbilityConfiguration]
-
- @classmethod
- def name(cls) -> str:
- """The name of the ability."""
- return inflection.underscore(cls.__name__)
-
- @classmethod
- @abc.abstractmethod
- def description(cls) -> str:
- """A detailed description of what the ability does."""
- ...
-
- @classmethod
- @abc.abstractmethod
- def arguments(cls) -> dict:
- """A dict of arguments in standard json schema format."""
- ...
-
- @classmethod
- def required_arguments(cls) -> list[str]:
- """A list of required arguments."""
- return []
-
- @abc.abstractmethod
- async def __call__(self, *args, **kwargs) -> AbilityResult:
- ...
-
- def __str__(self) -> str:
- return pformat(self.dump)
-
- def dump(self) -> dict:
- return {
- "name": self.name(),
- "description": self.description(),
- "parameters": {
- "type": "object",
- "properties": self.arguments(),
- "required": self.required_arguments(),
- },
- }
-
-
-class AbilityRegistry(abc.ABC):
- @abc.abstractmethod
- def register_ability(
- self, ability_name: str, ability_configuration: AbilityConfiguration
- ) -> None:
- ...
-
- @abc.abstractmethod
- def list_abilities(self) -> list[str]:
- ...
-
- @abc.abstractmethod
- def dump_abilities(self) -> list[dict]:
- ...
-
- @abc.abstractmethod
- def get_ability(self, ability_name: str) -> Ability:
- ...
-
- @abc.abstractmethod
- def perform(self, ability_name: str, **kwargs) -> AbilityResult:
- ...
diff --git a/autogpt/core/ability/builtins/__init__.py b/autogpt/core/ability/builtins/__init__.py
deleted file mode 100644
index 0572605a1..000000000
--- a/autogpt/core/ability/builtins/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from autogpt.core.ability.builtins.create_new_ability import CreateNewAbility
-from autogpt.core.ability.builtins.query_language_model import QueryLanguageModel
-
-BUILTIN_ABILITIES = {
- QueryLanguageModel.name(): QueryLanguageModel,
-}
diff --git a/autogpt/core/ability/builtins/create_new_ability.py b/autogpt/core/ability/builtins/create_new_ability.py
deleted file mode 100644
index 8c53efb3e..000000000
--- a/autogpt/core/ability/builtins/create_new_ability.py
+++ /dev/null
@@ -1,102 +0,0 @@
-import logging
-
-from autogpt.core.ability.base import Ability, AbilityConfiguration
-from autogpt.core.ability.schema import AbilityResult
-from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
-
-
-class CreateNewAbility(Ability):
- default_configuration = AbilityConfiguration(
- location=PluginLocation(
- storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
- storage_route="autogpt.core.ability.builtins.CreateNewAbility",
- ),
- )
-
- def __init__(
- self,
- logger: logging.Logger,
- configuration: AbilityConfiguration,
- ):
- self._logger = logger
- self._configuration = configuration
-
- @classmethod
- def description(cls) -> str:
- return "Create a new ability by writing python code."
-
- @classmethod
- def arguments(cls) -> dict:
- return {
- "ability_name": {
- "type": "string",
- "description": "A meaningful and concise name for the new ability.",
- },
- "description": {
- "type": "string",
- "description": "A detailed description of the ability and its uses, including any limitations.",
- },
- "arguments": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "description": "The name of the argument.",
- },
- "type": {
- "type": "string",
- "description": "The type of the argument. Must be a standard json schema type.",
- },
- "description": {
- "type": "string",
- "description": "A detailed description of the argument and its uses.",
- },
- },
- },
- "description": "A list of arguments that the ability will accept.",
- },
- "required_arguments": {
- "type": "array",
- "items": {
- "type": "string",
- "description": "The names of the arguments that are required.",
- },
- "description": "A list of the names of the arguments that are required.",
- },
- "package_requirements": {
- "type": "array",
- "items": {
- "type": "string",
- "description": "The of the Python package that is required to execute the ability.",
- },
- "description": "A list of the names of the Python packages that are required to execute the ability.",
- },
- "code": {
- "type": "string",
- "description": "The Python code that will be executed when the ability is called.",
- },
- }
-
- @classmethod
- def required_arguments(cls) -> list[str]:
- return [
- "ability_name",
- "description",
- "arguments",
- "required_arguments",
- "package_requirements",
- "code",
- ]
-
- async def __call__(
- self,
- ability_name: str,
- description: str,
- arguments: list[dict],
- required_arguments: list[str],
- package_requirements: list[str],
- code: str,
- ) -> AbilityResult:
- raise NotImplementedError
diff --git a/autogpt/core/ability/builtins/file_operations.py b/autogpt/core/ability/builtins/file_operations.py
deleted file mode 100644
index 43cd0d0cd..000000000
--- a/autogpt/core/ability/builtins/file_operations.py
+++ /dev/null
@@ -1,167 +0,0 @@
-import logging
-import os
-
-from autogpt.core.ability.base import Ability, AbilityConfiguration
-from autogpt.core.ability.schema import AbilityResult, ContentType, Knowledge
-from autogpt.core.workspace import Workspace
-
-
-class ReadFile(Ability):
- default_configuration = AbilityConfiguration(
- packages_required=["unstructured"],
- workspace_required=True,
- )
-
- def __init__(
- self,
- logger: logging.Logger,
- workspace: Workspace,
- ):
- self._logger = logger
- self._workspace = workspace
-
- @property
- def description(self) -> str:
- return "Read and parse all text from a file."
-
- @property
- def arguments(self) -> dict:
- return {
- "filename": {
- "type": "string",
- "description": "The name of the file to read.",
- },
- }
-
- def _check_preconditions(self, filename: str) -> AbilityResult | None:
- message = ""
- try:
- pass
- except ImportError:
- message = "Package charset_normalizer is not installed."
-
- try:
- file_path = self._workspace.get_path(filename)
- if not file_path.exists():
- message = f"File {filename} does not exist."
- if not file_path.is_file():
- message = f"{filename} is not a file."
- except ValueError as e:
- message = str(e)
-
- if message:
- return AbilityResult(
- ability_name=self.name(),
- ability_args={"filename": filename},
- success=False,
- message=message,
- data=None,
- )
-
- def __call__(self, filename: str) -> AbilityResult:
- if result := self._check_preconditions(filename):
- return result
-
- from unstructured.partition.auto import partition
-
- file_path = self._workspace.get_path(filename)
- try:
- elements = partition(str(file_path))
- # TODO: Lots of other potentially useful information is available
- # in the partitioned file. Consider returning more of it.
- new_knowledge = Knowledge(
- content="\n\n".join([element.text for element in elements]),
- content_type=ContentType.TEXT,
- content_metadata={"filename": filename},
- )
- success = True
- message = f"File {file_path} read successfully."
- except IOError as e:
- new_knowledge = None
- success = False
- message = str(e)
-
- return AbilityResult(
- ability_name=self.name(),
- ability_args={"filename": filename},
- success=success,
- message=message,
- new_knowledge=new_knowledge,
- )
-
-
-class WriteFile(Ability):
- default_configuration = AbilityConfiguration(
- packages_required=["unstructured"],
- workspace_required=True,
- )
-
- def __init__(
- self,
- logger: logging.Logger,
- workspace: Workspace,
- ):
- self._logger = logger
- self._workspace = workspace
-
- @property
- def description(self) -> str:
- return "Write text to a file."
-
- @property
- def arguments(self) -> dict:
- return {
- "filename": {
- "type": "string",
- "description": "The name of the file to write.",
- },
- "contents": {
- "type": "string",
- "description": "The contents of the file to write.",
- },
- }
-
- def _check_preconditions(
- self, filename: str, contents: str
- ) -> AbilityResult | None:
- message = ""
- try:
- file_path = self._workspace.get_path(filename)
- if file_path.exists():
- message = f"File {filename} already exists."
- if len(contents):
- message = f"File {filename} was not given any content."
- except ValueError as e:
- message = str(e)
-
- if message:
- return AbilityResult(
- ability_name=self.name(),
- ability_args={"filename": filename, "contents": contents},
- success=False,
- message=message,
- data=None,
- )
-
- def __call__(self, filename: str, contents: str) -> AbilityResult:
- if result := self._check_preconditions(filename, contents):
- return result
-
- file_path = self._workspace.get_path(filename)
- try:
- directory = os.path.dirname(file_path)
- os.makedirs(directory)
- with open(filename, "w", encoding="utf-8") as f:
- f.write(contents)
- success = True
- message = f"File {file_path} written successfully."
- except IOError as e:
- success = False
- message = str(e)
-
- return AbilityResult(
- ability_name=self.name(),
- ability_args={"filename": filename},
- success=success,
- message=message,
- )
diff --git a/autogpt/core/ability/builtins/query_language_model.py b/autogpt/core/ability/builtins/query_language_model.py
deleted file mode 100644
index 95a5e0948..000000000
--- a/autogpt/core/ability/builtins/query_language_model.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import logging
-
-from autogpt.core.ability.base import Ability, AbilityConfiguration
-from autogpt.core.ability.schema import AbilityResult
-from autogpt.core.planning.simple import LanguageModelConfiguration
-from autogpt.core.plugin.simple import PluginLocation, PluginStorageFormat
-from autogpt.core.resource.model_providers import (
- LanguageModelMessage,
- LanguageModelProvider,
- MessageRole,
- ModelProviderName,
- OpenAIModelName,
-)
-
-
-class QueryLanguageModel(Ability):
- default_configuration = AbilityConfiguration(
- location=PluginLocation(
- storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
- storage_route="autogpt.core.ability.builtins.QueryLanguageModel",
- ),
- language_model_required=LanguageModelConfiguration(
- model_name=OpenAIModelName.GPT3,
- provider_name=ModelProviderName.OPENAI,
- temperature=0.9,
- ),
- )
-
- def __init__(
- self,
- logger: logging.Logger,
- configuration: AbilityConfiguration,
- language_model_provider: LanguageModelProvider,
- ):
- self._logger = logger
- self._configuration = configuration
- self._language_model_provider = language_model_provider
-
- @classmethod
- def description(cls) -> str:
- return "Query a language model. A query should be a question and any relevant context."
-
- @classmethod
- def arguments(cls) -> dict:
- return {
- "query": {
- "type": "string",
- "description": "A query for a language model. A query should contain a question and any relevant context.",
- },
- }
-
- @classmethod
- def required_arguments(cls) -> list[str]:
- return ["query"]
-
- async def __call__(self, query: str) -> AbilityResult:
- messages = [
- LanguageModelMessage(
- content=query,
- role=MessageRole.USER,
- ),
- ]
- model_response = await self._language_model_provider.create_language_completion(
- model_prompt=messages,
- functions=[],
- model_name=self._configuration.language_model_required.model_name,
- completion_parser=self._parse_response,
- )
- return AbilityResult(
- ability_name=self.name(),
- ability_args={"query": query},
- success=True,
- message=model_response.content["content"],
- )
-
- @staticmethod
- def _parse_response(response_content: dict) -> dict:
- return {"content": response_content["content"]}
diff --git a/autogpt/core/ability/simple.py b/autogpt/core/ability/simple.py
deleted file mode 100644
index 459a8f13b..000000000
--- a/autogpt/core/ability/simple.py
+++ /dev/null
@@ -1,96 +0,0 @@
-import logging
-
-from autogpt.core.ability.base import Ability, AbilityConfiguration, AbilityRegistry
-from autogpt.core.ability.builtins import BUILTIN_ABILITIES
-from autogpt.core.ability.schema import AbilityResult
-from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings
-from autogpt.core.memory.base import Memory
-from autogpt.core.plugin.simple import SimplePluginService
-from autogpt.core.resource.model_providers import (
- LanguageModelProvider,
- ModelProviderName,
-)
-from autogpt.core.workspace.base import Workspace
-
-
-class AbilityRegistryConfiguration(SystemConfiguration):
- """Configuration for the AbilityRegistry subsystem."""
-
- abilities: dict[str, AbilityConfiguration]
-
-
-class AbilityRegistrySettings(SystemSettings):
- configuration: AbilityRegistryConfiguration
-
-
-class SimpleAbilityRegistry(AbilityRegistry, Configurable):
- default_settings = AbilityRegistrySettings(
- name="simple_ability_registry",
- description="A simple ability registry.",
- configuration=AbilityRegistryConfiguration(
- abilities={
- ability_name: ability.default_configuration
- for ability_name, ability in BUILTIN_ABILITIES.items()
- },
- ),
- )
-
- def __init__(
- self,
- settings: AbilityRegistrySettings,
- logger: logging.Logger,
- memory: Memory,
- workspace: Workspace,
- model_providers: dict[ModelProviderName, LanguageModelProvider],
- ):
- self._configuration = settings.configuration
- self._logger = logger
- self._memory = memory
- self._workspace = workspace
- self._model_providers = model_providers
- self._abilities = []
- for (
- ability_name,
- ability_configuration,
- ) in self._configuration.abilities.items():
- self.register_ability(ability_name, ability_configuration)
-
- def register_ability(
- self, ability_name: str, ability_configuration: AbilityConfiguration
- ) -> None:
- ability_class = SimplePluginService.get_plugin(ability_configuration.location)
- ability_args = {
- "logger": self._logger.getChild(ability_name),
- "configuration": ability_configuration,
- }
- if ability_configuration.packages_required:
- # TODO: Check packages are installed and maybe install them.
- pass
- if ability_configuration.memory_provider_required:
- ability_args["memory"] = self._memory
- if ability_configuration.workspace_required:
- ability_args["workspace"] = self._workspace
- if ability_configuration.language_model_required:
- ability_args["language_model_provider"] = self._model_providers[
- ability_configuration.language_model_required.provider_name
- ]
- ability = ability_class(**ability_args)
- self._abilities.append(ability)
-
- def list_abilities(self) -> list[str]:
- return [
- f"{ability.name()}: {ability.description()}" for ability in self._abilities
- ]
-
- def dump_abilities(self) -> list[dict]:
- return [ability.dump() for ability in self._abilities]
-
- def get_ability(self, ability_name: str) -> Ability:
- for ability in self._abilities:
- if ability.name() == ability_name:
- return ability
- raise ValueError(f"Ability '{ability_name}' not found.")
-
- async def perform(self, ability_name: str, **kwargs) -> AbilityResult:
- ability = self.get_ability(ability_name)
- return await ability(**kwargs)
diff --git a/autogpt/core/agent/__init__.py b/autogpt/core/agent/__init__.py
deleted file mode 100644
index 9324042b1..000000000
--- a/autogpt/core/agent/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""The Agent is an autonomouos entity guided by a LLM provider."""
-from autogpt.core.agent.base import Agent
-from autogpt.core.agent.simple import AgentSettings, SimpleAgent
diff --git a/autogpt/core/agent/simple.py b/autogpt/core/agent/simple.py
deleted file mode 100644
index de99c135e..000000000
--- a/autogpt/core/agent/simple.py
+++ /dev/null
@@ -1,391 +0,0 @@
-import logging
-from datetime import datetime
-from pathlib import Path
-from typing import Any
-
-from pydantic import BaseModel
-
-from autogpt.core.ability import (
- AbilityRegistrySettings,
- AbilityResult,
- SimpleAbilityRegistry,
-)
-from autogpt.core.agent.base import Agent
-from autogpt.core.configuration import Configurable, SystemConfiguration, SystemSettings
-from autogpt.core.memory import MemorySettings, SimpleMemory
-from autogpt.core.planning import PlannerSettings, SimplePlanner, Task, TaskStatus
-from autogpt.core.plugin.simple import (
- PluginLocation,
- PluginStorageFormat,
- SimplePluginService,
-)
-from autogpt.core.resource.model_providers import OpenAIProvider, OpenAISettings
-from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings
-
-
-class AgentSystems(SystemConfiguration):
- ability_registry: PluginLocation
- memory: PluginLocation
- openai_provider: PluginLocation
- planning: PluginLocation
- workspace: PluginLocation
-
-
-class AgentConfiguration(SystemConfiguration):
- cycle_count: int
- max_task_cycle_count: int
- creation_time: str
- name: str
- role: str
- goals: list[str]
- systems: AgentSystems
-
-
-class AgentSystemSettings(SystemSettings):
- configuration: AgentConfiguration
-
-
-class AgentSettings(BaseModel):
- agent: AgentSystemSettings
- ability_registry: AbilityRegistrySettings
- memory: MemorySettings
- openai_provider: OpenAISettings
- planning: PlannerSettings
- workspace: WorkspaceSettings
-
- def update_agent_name_and_goals(self, agent_goals: dict) -> None:
- self.agent.configuration.name = agent_goals["agent_name"]
- self.agent.configuration.role = agent_goals["agent_role"]
- self.agent.configuration.goals = agent_goals["agent_goals"]
-
-
-class SimpleAgent(Agent, Configurable):
- default_settings = AgentSystemSettings(
- name="simple_agent",
- description="A simple agent.",
- configuration=AgentConfiguration(
- name="Entrepreneur-GPT",
- role=(
- "An AI designed to autonomously develop and run businesses with "
- "the sole goal of increasing your net worth."
- ),
- goals=[
- "Increase net worth",
- "Grow Twitter Account",
- "Develop and manage multiple businesses autonomously",
- ],
- cycle_count=0,
- max_task_cycle_count=3,
- creation_time="",
- systems=AgentSystems(
- ability_registry=PluginLocation(
- storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
- storage_route="autogpt.core.ability.SimpleAbilityRegistry",
- ),
- memory=PluginLocation(
- storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
- storage_route="autogpt.core.memory.SimpleMemory",
- ),
- openai_provider=PluginLocation(
- storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
- storage_route="autogpt.core.resource.model_providers.OpenAIProvider",
- ),
- planning=PluginLocation(
- storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
- storage_route="autogpt.core.planning.SimplePlanner",
- ),
- workspace=PluginLocation(
- storage_format=PluginStorageFormat.INSTALLED_PACKAGE,
- storage_route="autogpt.core.workspace.SimpleWorkspace",
- ),
- ),
- ),
- )
-
- def __init__(
- self,
- settings: AgentSystemSettings,
- logger: logging.Logger,
- ability_registry: SimpleAbilityRegistry,
- memory: SimpleMemory,
- openai_provider: OpenAIProvider,
- planning: SimplePlanner,
- workspace: SimpleWorkspace,
- ):
- self._configuration = settings.configuration
- self._logger = logger
- self._ability_registry = ability_registry
- self._memory = memory
- # FIXME: Need some work to make this work as a dict of providers
- # Getting the construction of the config to work is a bit tricky
- self._openai_provider = openai_provider
- self._planning = planning
- self._workspace = workspace
- self._task_queue = []
- self._completed_tasks = []
- self._current_task = None
- self._next_ability = None
-
- @classmethod
- def from_workspace(
- cls,
- workspace_path: Path,
- logger: logging.Logger,
- ) -> "SimpleAgent":
- agent_settings = SimpleWorkspace.load_agent_settings(workspace_path)
- agent_args = {}
-
- agent_args["settings"] = agent_settings.agent
- agent_args["logger"] = logger
- agent_args["workspace"] = cls._get_system_instance(
- "workspace",
- agent_settings,
- logger,
- )
- agent_args["openai_provider"] = cls._get_system_instance(
- "openai_provider",
- agent_settings,
- logger,
- )
- agent_args["planning"] = cls._get_system_instance(
- "planning",
- agent_settings,
- logger,
- model_providers={"openai": agent_args["openai_provider"]},
- )
- agent_args["memory"] = cls._get_system_instance(
- "memory",
- agent_settings,
- logger,
- workspace=agent_args["workspace"],
- )
-
- agent_args["ability_registry"] = cls._get_system_instance(
- "ability_registry",
- agent_settings,
- logger,
- workspace=agent_args["workspace"],
- memory=agent_args["memory"],
- model_providers={"openai": agent_args["openai_provider"]},
- )
-
- return cls(**agent_args)
-
- async def build_initial_plan(self) -> dict:
- plan = await self._planning.make_initial_plan(
- agent_name=self._configuration.name,
- agent_role=self._configuration.role,
- agent_goals=self._configuration.goals,
- abilities=self._ability_registry.list_abilities(),
- )
- tasks = [Task.parse_obj(task) for task in plan.content["task_list"]]
-
- # TODO: Should probably do a step to evaluate the quality of the generated tasks,
- # and ensure that they have actionable ready and acceptance criteria
-
- self._task_queue.extend(tasks)
- self._task_queue.sort(key=lambda t: t.priority, reverse=True)
- self._task_queue[-1].context.status = TaskStatus.READY
- return plan.content
-
- async def determine_next_ability(self, *args, **kwargs):
- if not self._task_queue:
- return {"response": "I don't have any tasks to work on right now."}
-
- self._configuration.cycle_count += 1
- task = self._task_queue.pop()
- self._logger.info(f"Working on task: {task}")
-
- task = await self._evaluate_task_and_add_context(task)
- next_ability = await self._choose_next_ability(
- task,
- self._ability_registry.dump_abilities(),
- )
- self._current_task = task
- self._next_ability = next_ability.content
- return self._current_task, self._next_ability
-
- async def execute_next_ability(self, user_input: str, *args, **kwargs):
- if user_input == "y":
- ability = self._ability_registry.get_ability(
- self._next_ability["next_ability"]
- )
- ability_response = await ability(**self._next_ability["ability_arguments"])
- await self._update_tasks_and_memory(ability_response)
- if self._current_task.context.status == TaskStatus.DONE:
- self._completed_tasks.append(self._current_task)
- else:
- self._task_queue.append(self._current_task)
- self._current_task = None
- self._next_ability = None
-
- return ability_response.dict()
- else:
- raise NotImplementedError
-
- async def _evaluate_task_and_add_context(self, task: Task) -> Task:
- """Evaluate the task and add context to it."""
- if task.context.status == TaskStatus.IN_PROGRESS:
- # Nothing to do here
- return task
- else:
- self._logger.debug(f"Evaluating task {task} and adding relevant context.")
- # TODO: Look up relevant memories (need working memory system)
- # TODO: Evaluate whether there is enough information to start the task (language model call).
- task.context.enough_info = True
- task.context.status = TaskStatus.IN_PROGRESS
- return task
-
- async def _choose_next_ability(self, task: Task, ability_schema: list[dict]):
- """Choose the next ability to use for the task."""
- self._logger.debug(f"Choosing next ability for task {task}.")
- if task.context.cycle_count > self._configuration.max_task_cycle_count:
- # Don't hit the LLM, just set the next action as "breakdown_task" with an appropriate reason
- raise NotImplementedError
- elif not task.context.enough_info:
- # Don't ask the LLM, just set the next action as "breakdown_task" with an appropriate reason
- raise NotImplementedError
- else:
- next_ability = await self._planning.determine_next_ability(
- task, ability_schema
- )
- return next_ability
-
- async def _update_tasks_and_memory(self, ability_result: AbilityResult):
- self._current_task.context.cycle_count += 1
- self._current_task.context.prior_actions.append(ability_result)
- # TODO: Summarize new knowledge
- # TODO: store knowledge and summaries in memory and in relevant tasks
- # TODO: evaluate whether the task is complete
-
- def __repr__(self):
- return "SimpleAgent()"
-
- ################################################################
- # Factory interface for agent bootstrapping and initialization #
- ################################################################
-
- @classmethod
- def build_user_configuration(cls) -> dict[str, Any]:
- """Build the user's configuration."""
- configuration_dict = {
- "agent": cls.get_user_config(),
- }
-
- system_locations = configuration_dict["agent"]["configuration"]["systems"]
- for system_name, system_location in system_locations.items():
- system_class = SimplePluginService.get_plugin(system_location)
- configuration_dict[system_name] = system_class.get_user_config()
- configuration_dict = _prune_empty_dicts(configuration_dict)
- return configuration_dict
-
- @classmethod
- def compile_settings(
- cls, logger: logging.Logger, user_configuration: dict
- ) -> AgentSettings:
- """Compile the user's configuration with the defaults."""
- logger.debug("Processing agent system configuration.")
- configuration_dict = {
- "agent": cls.build_agent_configuration(
- user_configuration.get("agent", {})
- ).dict(),
- }
-
- system_locations = configuration_dict["agent"]["configuration"]["systems"]
-
- # Build up default configuration
- for system_name, system_location in system_locations.items():
- logger.debug(f"Compiling configuration for system {system_name}")
- system_class = SimplePluginService.get_plugin(system_location)
- configuration_dict[system_name] = system_class.build_agent_configuration(
- user_configuration.get(system_name, {})
- ).dict()
-
- return AgentSettings.parse_obj(configuration_dict)
-
- @classmethod
- async def determine_agent_name_and_goals(
- cls,
- user_objective: str,
- agent_settings: AgentSettings,
- logger: logging.Logger,
- ) -> dict:
- logger.debug("Loading OpenAI provider.")
- provider: OpenAIProvider = cls._get_system_instance(
- "openai_provider",
- agent_settings,
- logger=logger,
- )
- logger.debug("Loading agent planner.")
- agent_planner: SimplePlanner = cls._get_system_instance(
- "planning",
- agent_settings,
- logger=logger,
- model_providers={"openai": provider},
- )
- logger.debug("determining agent name and goals.")
- model_response = await agent_planner.decide_name_and_goals(
- user_objective,
- )
-
- return model_response.content
-
- @classmethod
- def provision_agent(
- cls,
- agent_settings: AgentSettings,
- logger: logging.Logger,
- ):
- agent_settings.agent.configuration.creation_time = datetime.now().strftime(
- "%Y%m%d_%H%M%S"
- )
- workspace: SimpleWorkspace = cls._get_system_instance(
- "workspace",
- agent_settings,
- logger=logger,
- )
- return workspace.setup_workspace(agent_settings, logger)
-
- @classmethod
- def _get_system_instance(
- cls,
- system_name: str,
- agent_settings: AgentSettings,
- logger: logging.Logger,
- *args,
- **kwargs,
- ):
- system_locations = agent_settings.agent.configuration.systems.dict()
-
- system_settings = getattr(agent_settings, system_name)
- system_class = SimplePluginService.get_plugin(system_locations[system_name])
- system_instance = system_class(
- system_settings,
- *args,
- logger=logger.getChild(system_name),
- **kwargs,
- )
- return system_instance
-
-
-def _prune_empty_dicts(d: dict) -> dict:
- """
- Prune branches from a nested dictionary if the branch only contains empty dictionaries at the leaves.
-
- Args:
- d: The dictionary to prune.
-
- Returns:
- The pruned dictionary.
- """
- pruned = {}
- for key, value in d.items():
- if isinstance(value, dict):
- pruned_value = _prune_empty_dicts(value)
- if (
- pruned_value
- ): # if the pruned dictionary is not empty, add it to the result
- pruned[key] = pruned_value
- else:
- pruned[key] = value
- return pruned
diff --git a/autogpt/core/configuration/__init__.py b/autogpt/core/configuration/__init__.py
deleted file mode 100644
index b6fee7493..000000000
--- a/autogpt/core/configuration/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-"""The configuration encapsulates settings for all Agent subsystems."""
-from autogpt.core.configuration.schema import (
- Configurable,
- SystemConfiguration,
- SystemSettings,
- UserConfigurable,
-)
diff --git a/autogpt/core/configuration/schema.py b/autogpt/core/configuration/schema.py
deleted file mode 100644
index 4a1a4d496..000000000
--- a/autogpt/core/configuration/schema.py
+++ /dev/null
@@ -1,107 +0,0 @@
-import abc
-import typing
-from typing import Any, Generic, TypeVar
-
-from pydantic import BaseModel, Field
-
-
-def UserConfigurable(*args, **kwargs):
- return Field(*args, **kwargs, user_configurable=True)
-
-
-class SystemConfiguration(BaseModel):
- def get_user_config(self) -> dict[str, Any]:
- return _get_user_config_fields(self)
-
- class Config:
- extra = "forbid"
- use_enum_values = True
-
-
-class SystemSettings(BaseModel):
- """A base class for all system settings."""
-
- name: str
- description: str
-
- class Config:
- extra = "forbid"
- use_enum_values = True
-
-
-S = TypeVar("S", bound=SystemSettings)
-
-
-class Configurable(abc.ABC, Generic[S]):
- """A base class for all configurable objects."""
-
- prefix: str = ""
- default_settings: typing.ClassVar[S]
-
- @classmethod
- def get_user_config(cls) -> dict[str, Any]:
- return _get_user_config_fields(cls.default_settings)
-
- @classmethod
- def build_agent_configuration(cls, configuration: dict) -> S:
- """Process the configuration for this object."""
-
- defaults = cls.default_settings.dict()
- final_configuration = deep_update(defaults, configuration)
-
- return cls.default_settings.__class__.parse_obj(final_configuration)
-
-
-def _get_user_config_fields(instance: BaseModel) -> dict[str, Any]:
- """
- Get the user config fields of a Pydantic model instance.
-
- Args:
- instance: The Pydantic model instance.
-
- Returns:
- The user config fields of the instance.
- """
- user_config_fields = {}
-
- for name, value in instance.__dict__.items():
- field_info = instance.__fields__[name]
- if "user_configurable" in field_info.field_info.extra:
- user_config_fields[name] = value
- elif isinstance(value, SystemConfiguration):
- user_config_fields[name] = value.get_user_config()
- elif isinstance(value, list) and all(
- isinstance(i, SystemConfiguration) for i in value
- ):
- user_config_fields[name] = [i.get_user_config() for i in value]
- elif isinstance(value, dict) and all(
- isinstance(i, SystemConfiguration) for i in value.values()
- ):
- user_config_fields[name] = {
- k: v.get_user_config() for k, v in value.items()
- }
-
- return user_config_fields
-
-
-def deep_update(original_dict: dict, update_dict: dict) -> dict:
- """
- Recursively update a dictionary.
-
- Args:
- original_dict (dict): The dictionary to be updated.
- update_dict (dict): The dictionary to update with.
-
- Returns:
- dict: The updated dictionary.
- """
- for key, value in update_dict.items():
- if (
- key in original_dict
- and isinstance(original_dict[key], dict)
- and isinstance(value, dict)
- ):
- original_dict[key] = deep_update(original_dict[key], value)
- else:
- original_dict[key] = value
- return original_dict
diff --git a/autogpt/core/memory/__init__.py b/autogpt/core/memory/__init__.py
deleted file mode 100644
index eeba203a6..000000000
--- a/autogpt/core/memory/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""The memory subsystem manages the Agent's long-term memory."""
-from autogpt.core.memory.base import Memory
-from autogpt.core.memory.simple import MemorySettings, SimpleMemory
diff --git a/autogpt/core/planning/__init__.py b/autogpt/core/planning/__init__.py
deleted file mode 100644
index 9125feb1b..000000000
--- a/autogpt/core/planning/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""The planning system organizes the Agent's activities."""
-from autogpt.core.planning.schema import (
- LanguageModelClassification,
- LanguageModelPrompt,
- LanguageModelResponse,
- Task,
- TaskStatus,
- TaskType,
-)
-from autogpt.core.planning.simple import PlannerSettings, SimplePlanner
diff --git a/autogpt/core/planning/base.py b/autogpt/core/planning/base.py
deleted file mode 100644
index cfda45a30..000000000
--- a/autogpt/core/planning/base.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import abc
-
-from autogpt.core.configuration import SystemConfiguration
-from autogpt.core.planning.schema import (
- LanguageModelClassification,
- LanguageModelPrompt,
-)
-
-# class Planner(abc.ABC):
-# """Manages the agent's planning and goal-setting by constructing language model prompts."""
-#
-# @staticmethod
-# @abc.abstractmethod
-# async def decide_name_and_goals(
-# user_objective: str,
-# ) -> LanguageModelResponse:
-# """Decide the name and goals of an Agent from a user-defined objective.
-#
-# Args:
-# user_objective: The user-defined objective for the agent.
-#
-# Returns:
-# The agent name and goals as a response from the language model.
-#
-# """
-# ...
-#
-# @abc.abstractmethod
-# async def plan(self, context: PlanningContext) -> LanguageModelResponse:
-# """Plan the next ability for the Agent.
-#
-# Args:
-# context: A context object containing information about the agent's
-# progress, result, memories, and feedback.
-#
-#
-# Returns:
-# The next ability the agent should take along with thoughts and reasoning.
-#
-# """
-# ...
-#
-# @abc.abstractmethod
-# def reflect(
-# self,
-# context: ReflectionContext,
-# ) -> LanguageModelResponse:
-# """Reflect on a planned ability and provide self-criticism.
-#
-#
-# Args:
-# context: A context object containing information about the agent's
-# reasoning, plan, thoughts, and criticism.
-#
-# Returns:
-# Self-criticism about the agent's plan.
-#
-# """
-# ...
-
-
-class PromptStrategy(abc.ABC):
- default_configuration: SystemConfiguration
-
- @property
- @abc.abstractmethod
- def model_classification(self) -> LanguageModelClassification:
- ...
-
- @abc.abstractmethod
- def build_prompt(self, *_, **kwargs) -> LanguageModelPrompt:
- ...
-
- @abc.abstractmethod
- def parse_response_content(self, response_content: dict) -> dict:
- ...
diff --git a/autogpt/core/planning/schema.py b/autogpt/core/planning/schema.py
deleted file mode 100644
index 4c19ea4b6..000000000
--- a/autogpt/core/planning/schema.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import enum
-
-from pydantic import BaseModel, Field
-
-from autogpt.core.ability.schema import AbilityResult
-from autogpt.core.resource.model_providers.schema import (
- LanguageModelFunction,
- LanguageModelMessage,
- LanguageModelProviderModelResponse,
-)
-
-
-class LanguageModelClassification(str, enum.Enum):
- """The LanguageModelClassification is a functional description of the model.
-
- This is used to determine what kind of model to use for a given prompt.
- Sometimes we prefer a faster or cheaper model to accomplish a task when
- possible.
-
- """
-
- FAST_MODEL: str = "fast_model"
- SMART_MODEL: str = "smart_model"
-
-
-class LanguageModelPrompt(BaseModel):
- messages: list[LanguageModelMessage]
- functions: list[LanguageModelFunction] = Field(default_factory=list)
-
- def __str__(self):
- return "\n\n".join([f"{m.role.value}: {m.content}" for m in self.messages])
-
-
-class LanguageModelResponse(LanguageModelProviderModelResponse):
- """Standard response struct for a response from a language model."""
-
-
-class TaskType(str, enum.Enum):
- RESEARCH: str = "research"
- WRITE: str = "write"
- EDIT: str = "edit"
- CODE: str = "code"
- DESIGN: str = "design"
- TEST: str = "test"
- PLAN: str = "plan"
-
-
-class TaskStatus(str, enum.Enum):
- BACKLOG: str = "backlog"
- READY: str = "ready"
- IN_PROGRESS: str = "in_progress"
- DONE: str = "done"
-
-
-class TaskContext(BaseModel):
- cycle_count: int = 0
- status: TaskStatus = TaskStatus.BACKLOG
- parent: "Task" = None
- prior_actions: list[AbilityResult] = Field(default_factory=list)
- memories: list = Field(default_factory=list)
- user_input: list[str] = Field(default_factory=list)
- supplementary_info: list[str] = Field(default_factory=list)
- enough_info: bool = False
-
-
-class Task(BaseModel):
- objective: str
- type: str # TaskType FIXME: gpt does not obey the enum parameter in its schema
- priority: int
- ready_criteria: list[str]
- acceptance_criteria: list[str]
- context: TaskContext = Field(default_factory=TaskContext)
-
-
-# Need to resolve the circular dependency between Task and TaskContext once both models are defined.
-TaskContext.update_forward_refs()
diff --git a/autogpt/core/planning/simple.py b/autogpt/core/planning/simple.py
deleted file mode 100644
index 633511ec4..000000000
--- a/autogpt/core/planning/simple.py
+++ /dev/null
@@ -1,182 +0,0 @@
-import logging
-import platform
-import time
-
-import distro
-
-from autogpt.core.configuration import (
- Configurable,
- SystemConfiguration,
- SystemSettings,
- UserConfigurable,
-)
-from autogpt.core.planning import strategies
-from autogpt.core.planning.base import PromptStrategy
-from autogpt.core.planning.schema import (
- LanguageModelClassification,
- LanguageModelResponse,
- Task,
-)
-from autogpt.core.resource.model_providers import (
- LanguageModelProvider,
- ModelProviderName,
- OpenAIModelName,
-)
-from autogpt.core.workspace import Workspace
-
-
-class LanguageModelConfiguration(SystemConfiguration):
- """Struct for model configuration."""
-
- model_name: str = UserConfigurable()
- provider_name: ModelProviderName = UserConfigurable()
- temperature: float = UserConfigurable()
-
-
-class PromptStrategiesConfiguration(SystemConfiguration):
- name_and_goals: strategies.NameAndGoalsConfiguration
- initial_plan: strategies.InitialPlanConfiguration
- next_ability: strategies.NextAbilityConfiguration
-
-
-class PlannerConfiguration(SystemConfiguration):
- """Configuration for the Planner subsystem."""
-
- models: dict[LanguageModelClassification, LanguageModelConfiguration]
- prompt_strategies: PromptStrategiesConfiguration
-
-
-class PlannerSettings(SystemSettings):
- """Settings for the Planner subsystem."""
-
- configuration: PlannerConfiguration
-
-
-class SimplePlanner(Configurable):
- """Manages the agent's planning and goal-setting by constructing language model prompts."""
-
- default_settings = PlannerSettings(
- name="planner",
- description="Manages the agent's planning and goal-setting by constructing language model prompts.",
- configuration=PlannerConfiguration(
- models={
- LanguageModelClassification.FAST_MODEL: LanguageModelConfiguration(
- model_name=OpenAIModelName.GPT3,
- provider_name=ModelProviderName.OPENAI,
- temperature=0.9,
- ),
- LanguageModelClassification.SMART_MODEL: LanguageModelConfiguration(
- model_name=OpenAIModelName.GPT4,
- provider_name=ModelProviderName.OPENAI,
- temperature=0.9,
- ),
- },
- prompt_strategies=PromptStrategiesConfiguration(
- name_and_goals=strategies.NameAndGoals.default_configuration,
- initial_plan=strategies.InitialPlan.default_configuration,
- next_ability=strategies.NextAbility.default_configuration,
- ),
- ),
- )
-
- def __init__(
- self,
- settings: PlannerSettings,
- logger: logging.Logger,
- model_providers: dict[ModelProviderName, LanguageModelProvider],
- workspace: Workspace = None, # Workspace is not available during bootstrapping.
- ) -> None:
- self._configuration = settings.configuration
- self._logger = logger
- self._workspace = workspace
-
- self._providers: dict[LanguageModelClassification, LanguageModelProvider] = {}
- for model, model_config in self._configuration.models.items():
- self._providers[model] = model_providers[model_config.provider_name]
-
- self._prompt_strategies = {
- "name_and_goals": strategies.NameAndGoals(
- **self._configuration.prompt_strategies.name_and_goals.dict()
- ),
- "initial_plan": strategies.InitialPlan(
- **self._configuration.prompt_strategies.initial_plan.dict()
- ),
- "next_ability": strategies.NextAbility(
- **self._configuration.prompt_strategies.next_ability.dict()
- ),
- }
-
- async def decide_name_and_goals(self, user_objective: str) -> LanguageModelResponse:
- return await self.chat_with_model(
- self._prompt_strategies["name_and_goals"],
- user_objective=user_objective,
- )
-
- async def make_initial_plan(
- self,
- agent_name: str,
- agent_role: str,
- agent_goals: list[str],
- abilities: list[str],
- ) -> LanguageModelResponse:
- return await self.chat_with_model(
- self._prompt_strategies["initial_plan"],
- agent_name=agent_name,
- agent_role=agent_role,
- agent_goals=agent_goals,
- abilities=abilities,
- )
-
- async def determine_next_ability(
- self,
- task: Task,
- ability_schema: list[dict],
- ):
- return await self.chat_with_model(
- self._prompt_strategies["next_ability"],
- task=task,
- ability_schema=ability_schema,
- )
-
- async def chat_with_model(
- self,
- prompt_strategy: PromptStrategy,
- **kwargs,
- ) -> LanguageModelResponse:
- model_classification = prompt_strategy.model_classification
- model_configuration = self._configuration.models[model_classification].dict()
- self._logger.debug(f"Using model configuration: {model_configuration}")
- del model_configuration["provider_name"]
- provider = self._providers[model_classification]
-
- template_kwargs = self._make_template_kwargs_for_strategy(prompt_strategy)
- template_kwargs.update(kwargs)
- prompt = prompt_strategy.build_prompt(**template_kwargs)
-
- self._logger.debug(f"Using prompt:\n{prompt}\n\n")
- response = await provider.create_language_completion(
- model_prompt=prompt.messages,
- functions=prompt.functions,
- **model_configuration,
- completion_parser=prompt_strategy.parse_response_content,
- )
- return LanguageModelResponse.parse_obj(response.dict())
-
- def _make_template_kwargs_for_strategy(self, strategy: PromptStrategy):
- provider = self._providers[strategy.model_classification]
- template_kwargs = {
- "os_info": get_os_info(),
- "api_budget": provider.get_remaining_budget(),
- "current_time": time.strftime("%c"),
- }
- return template_kwargs
-
-
-def get_os_info() -> str:
- os_name = platform.system()
- os_info = (
- platform.platform(terse=True)
- if os_name != "Linux"
- else distro.name(pretty=True)
- )
- return os_info
diff --git a/autogpt/core/planning/strategies/__init__.py b/autogpt/core/planning/strategies/__init__.py
deleted file mode 100644
index 856c000e6..000000000
--- a/autogpt/core/planning/strategies/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from autogpt.core.planning.strategies.initial_plan import (
- InitialPlan,
- InitialPlanConfiguration,
-)
-from autogpt.core.planning.strategies.name_and_goals import (
- NameAndGoals,
- NameAndGoalsConfiguration,
-)
-from autogpt.core.planning.strategies.next_ability import (
- NextAbility,
- NextAbilityConfiguration,
-)
diff --git a/autogpt/core/planning/strategies/initial_plan.py b/autogpt/core/planning/strategies/initial_plan.py
deleted file mode 100644
index 2f8d99b83..000000000
--- a/autogpt/core/planning/strategies/initial_plan.py
+++ /dev/null
@@ -1,190 +0,0 @@
-from autogpt.core.configuration import SystemConfiguration, UserConfigurable
-from autogpt.core.planning.base import PromptStrategy
-from autogpt.core.planning.schema import (
- LanguageModelClassification,
- LanguageModelPrompt,
- Task,
- TaskType,
-)
-from autogpt.core.planning.strategies.utils import json_loads, to_numbered_list
-from autogpt.core.resource.model_providers import (
- LanguageModelFunction,
- LanguageModelMessage,
- MessageRole,
-)
-
-
-class InitialPlanConfiguration(SystemConfiguration):
- model_classification: LanguageModelClassification = UserConfigurable()
- system_prompt_template: str = UserConfigurable()
- system_info: list[str] = UserConfigurable()
- user_prompt_template: str = UserConfigurable()
- create_plan_function: dict = UserConfigurable()
-
-
-class InitialPlan(PromptStrategy):
- DEFAULT_SYSTEM_PROMPT_TEMPLATE = (
- "You are an expert project planner. You're responsibility is to create work plans for autonomous agents. "
- "You will be given a name, a role, set of goals for the agent to accomplish. Your job is to "
- "break down those goals into a set of tasks that the agent can accomplish to achieve those goals. "
- "Agents are resourceful, but require clear instructions. Each task you create should have clearly defined "
- "`ready_criteria` that the agent can check to see if the task is ready to be started. Each task should "
- "also have clearly defined `acceptance_criteria` that the agent can check to evaluate if the task is complete. "
- "You should create as many tasks as you think is necessary to accomplish the goals.\n\n"
- "System Info:\n{system_info}"
- )
-
- DEFAULT_SYSTEM_INFO = [
- "The OS you are running on is: {os_info}",
- "It takes money to let you run. Your API budget is ${api_budget:.3f}",
- "The current time and date is {current_time}",
- ]
-
- DEFAULT_USER_PROMPT_TEMPLATE = (
- "You are {agent_name}, {agent_role}\n" "Your goals are:\n" "{agent_goals}"
- )
-
- DEFAULT_CREATE_PLAN_FUNCTION = {
- "name": "create_initial_agent_plan",
- "description": "Creates a set of tasks that forms the initial plan for an autonomous agent.",
- "parameters": {
- "type": "object",
- "properties": {
- "task_list": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "objective": {
- "type": "string",
- "description": "An imperative verb phrase that succinctly describes the task.",
- },
- "type": {
- "type": "string",
- "description": "A categorization for the task. ",
- "enum": [t.value for t in TaskType],
- },
- "acceptance_criteria": {
- "type": "array",
- "items": {
- "type": "string",
- "description": "A list of measurable and testable criteria that must be met for the task to be considered complete.",
- },
- },
- "priority": {
- "type": "integer",
- "description": "A number between 1 and 10 indicating the priority of the task relative to other generated tasks.",
- "minimum": 1,
- "maximum": 10,
- },
- "ready_criteria": {
- "type": "array",
- "items": {
- "type": "string",
- "description": "A list of measurable and testable criteria that must be met before the task can be started.",
- },
- },
- },
- "required": [
- "objective",
- "type",
- "acceptance_criteria",
- "priority",
- "ready_criteria",
- ],
- },
- },
- },
- },
- }
-
- default_configuration = InitialPlanConfiguration(
- model_classification=LanguageModelClassification.SMART_MODEL,
- system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE,
- system_info=DEFAULT_SYSTEM_INFO,
- user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
- create_plan_function=DEFAULT_CREATE_PLAN_FUNCTION,
- )
-
- def __init__(
- self,
- model_classification: LanguageModelClassification,
- system_prompt_template: str,
- system_info: list[str],
- user_prompt_template: str,
- create_plan_function: dict,
- ):
- self._model_classification = model_classification
- self._system_prompt_template = system_prompt_template
- self._system_info = system_info
- self._user_prompt_template = user_prompt_template
- self._create_plan_function = create_plan_function
-
- @property
- def model_classification(self) -> LanguageModelClassification:
- return self._model_classification
-
- def build_prompt(
- self,
- agent_name: str,
- agent_role: str,
- agent_goals: list[str],
- abilities: list[str],
- os_info: str,
- api_budget: float,
- current_time: str,
- **kwargs,
- ) -> LanguageModelPrompt:
- template_kwargs = {
- "agent_name": agent_name,
- "agent_role": agent_role,
- "os_info": os_info,
- "api_budget": api_budget,
- "current_time": current_time,
- **kwargs,
- }
- template_kwargs["agent_goals"] = to_numbered_list(
- agent_goals, **template_kwargs
- )
- template_kwargs["abilities"] = to_numbered_list(abilities, **template_kwargs)
- template_kwargs["system_info"] = to_numbered_list(
- self._system_info, **template_kwargs
- )
-
- system_prompt = LanguageModelMessage(
- role=MessageRole.SYSTEM,
- content=self._system_prompt_template.format(**template_kwargs),
- )
- user_prompt = LanguageModelMessage(
- role=MessageRole.USER,
- content=self._user_prompt_template.format(**template_kwargs),
- )
- create_plan_function = LanguageModelFunction(
- json_schema=self._create_plan_function,
- )
-
- return LanguageModelPrompt(
- messages=[system_prompt, user_prompt],
- functions=[create_plan_function],
- # TODO:
- tokens_used=0,
- )
-
- def parse_response_content(
- self,
- response_content: dict,
- ) -> dict:
- """Parse the actual text response from the objective model.
-
- Args:
- response_content: The raw response content from the objective model.
-
- Returns:
- The parsed response.
-
- """
- parsed_response = json_loads(response_content["function_call"]["arguments"])
- parsed_response["task_list"] = [
- Task.parse_obj(task) for task in parsed_response["task_list"]
- ]
- return parsed_response
diff --git a/autogpt/core/planning/strategies/name_and_goals.py b/autogpt/core/planning/strategies/name_and_goals.py
deleted file mode 100644
index c4f1e764a..000000000
--- a/autogpt/core/planning/strategies/name_and_goals.py
+++ /dev/null
@@ -1,139 +0,0 @@
-from autogpt.core.configuration import SystemConfiguration, UserConfigurable
-from autogpt.core.planning.base import PromptStrategy
-from autogpt.core.planning.schema import (
- LanguageModelClassification,
- LanguageModelPrompt,
-)
-from autogpt.core.planning.strategies.utils import json_loads
-from autogpt.core.resource.model_providers import (
- LanguageModelFunction,
- LanguageModelMessage,
- MessageRole,
-)
-
-
-class NameAndGoalsConfiguration(SystemConfiguration):
- model_classification: LanguageModelClassification = UserConfigurable()
- system_prompt: str = UserConfigurable()
- user_prompt_template: str = UserConfigurable()
- create_agent_function: dict = UserConfigurable()
-
-
-class NameAndGoals(PromptStrategy):
- DEFAULT_SYSTEM_PROMPT = (
- "Your job is to respond to a user-defined task by invoking the `create_agent` function "
- "to generate an autonomous agent to complete the task. You should supply a role-based "
- "name for the agent, an informative description for what the agent does, and 1 to 5 "
- "goals that are optimally aligned with the successful completion of its assigned task.\n\n"
- "Example Input:\n"
- "Help me with marketing my business\n\n"
- "Example Function Call:\n"
- "create_agent(name='CMOGPT', "
- "description='A professional digital marketer AI that assists Solopreneurs in "
- "growing their businesses by providing world-class expertise in solving "
- "marketing problems for SaaS, content products, agencies, and more.', "
- "goals=['Engage in effective problem-solving, prioritization, planning, and "
- "supporting execution to address your marketing needs as your virtual Chief "
- "Marketing Officer.', 'Provide specific, actionable, and concise advice to "
- "help you make informed decisions without the use of platitudes or overly "
- "wordy explanations.', 'Identify and prioritize quick wins and cost-effective "
- "campaigns that maximize results with minimal time and budget investment.', "
- "'Proactively take the lead in guiding you and offering suggestions when faced "
- "with unclear information or uncertainty to ensure your marketing strategy "
- "remains on track.'])"
- )
-
- DEFAULT_USER_PROMPT_TEMPLATE = "'{user_objective}'"
-
- DEFAULT_CREATE_AGENT_FUNCTION = {
- "name": "create_agent",
- "description": ("Create a new autonomous AI agent to complete a given task."),
- "parameters": {
- "type": "object",
- "properties": {
- "agent_name": {
- "type": "string",
- "description": "A short role-based name for an autonomous agent.",
- },
- "agent_role": {
- "type": "string",
- "description": "An informative one sentence description of what the AI agent does",
- },
- "agent_goals": {
- "type": "array",
- "minItems": 1,
- "maxItems": 5,
- "items": {
- "type": "string",
- },
- "description": (
- "One to five highly effective goals that are optimally aligned with the completion of a "
- "specific task. The number and complexity of the goals should correspond to the "
- "complexity of the agent's primary objective."
- ),
- },
- },
- "required": ["agent_name", "agent_role", "agent_goals"],
- },
- }
-
- default_configuration = NameAndGoalsConfiguration(
- model_classification=LanguageModelClassification.SMART_MODEL,
- system_prompt=DEFAULT_SYSTEM_PROMPT,
- user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
- create_agent_function=DEFAULT_CREATE_AGENT_FUNCTION,
- )
-
- def __init__(
- self,
- model_classification: LanguageModelClassification,
- system_prompt: str,
- user_prompt_template: str,
- create_agent_function: str,
- ):
- self._model_classification = model_classification
- self._system_prompt_message = system_prompt
- self._user_prompt_template = user_prompt_template
- self._create_agent_function = create_agent_function
-
- @property
- def model_classification(self) -> LanguageModelClassification:
- return self._model_classification
-
- def build_prompt(self, user_objective: str = "", **kwargs) -> LanguageModelPrompt:
- system_message = LanguageModelMessage(
- role=MessageRole.SYSTEM,
- content=self._system_prompt_message,
- )
- user_message = LanguageModelMessage(
- role=MessageRole.USER,
- content=self._user_prompt_template.format(
- user_objective=user_objective,
- ),
- )
- create_agent_function = LanguageModelFunction(
- json_schema=self._create_agent_function,
- )
- prompt = LanguageModelPrompt(
- messages=[system_message, user_message],
- functions=[create_agent_function],
- # TODO
- tokens_used=0,
- )
- return prompt
-
- def parse_response_content(
- self,
- response_content: dict,
- ) -> dict:
- """Parse the actual text response from the objective model.
-
- Args:
- response_content: The raw response content from the objective model.
-
- Returns:
- The parsed response.
-
- """
- parsed_response = json_loads(response_content["function_call"]["arguments"])
- return parsed_response
diff --git a/autogpt/core/planning/strategies/next_ability.py b/autogpt/core/planning/strategies/next_ability.py
deleted file mode 100644
index dff310c37..000000000
--- a/autogpt/core/planning/strategies/next_ability.py
+++ /dev/null
@@ -1,183 +0,0 @@
-from autogpt.core.configuration import SystemConfiguration, UserConfigurable
-from autogpt.core.planning.base import PromptStrategy
-from autogpt.core.planning.schema import (
- LanguageModelClassification,
- LanguageModelPrompt,
- Task,
-)
-from autogpt.core.planning.strategies.utils import json_loads, to_numbered_list
-from autogpt.core.resource.model_providers import (
- LanguageModelFunction,
- LanguageModelMessage,
- MessageRole,
-)
-
-
-class NextAbilityConfiguration(SystemConfiguration):
- model_classification: LanguageModelClassification = UserConfigurable()
- system_prompt_template: str = UserConfigurable()
- system_info: list[str] = UserConfigurable()
- user_prompt_template: str = UserConfigurable()
- additional_ability_arguments: dict = UserConfigurable()
-
-
-class NextAbility(PromptStrategy):
- DEFAULT_SYSTEM_PROMPT_TEMPLATE = "System Info:\n{system_info}"
-
- DEFAULT_SYSTEM_INFO = [
- "The OS you are running on is: {os_info}",
- "It takes money to let you run. Your API budget is ${api_budget:.3f}",
- "The current time and date is {current_time}",
- ]
-
- DEFAULT_USER_PROMPT_TEMPLATE = (
- "Your current task is is {task_objective}.\n"
- "You have taken {cycle_count} actions on this task already. "
- "Here is the actions you have taken and their results:\n"
- "{action_history}\n\n"
- "Here is additional information that may be useful to you:\n"
- "{additional_info}\n\n"
- "Additionally, you should consider the following:\n"
- "{user_input}\n\n"
- "Your task of {task_objective} is complete when the following acceptance criteria have been met:\n"
- "{acceptance_criteria}\n\n"
- "Please choose one of the provided functions to accomplish this task. "
- "Some tasks may require multiple functions to accomplish. If that is the case, choose the function that "
- "you think is most appropriate for the current situation given your progress so far."
- )
-
- DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS = {
- "motivation": {
- "type": "string",
- "description": "Your justification for choosing choosing this function instead of a different one.",
- },
- "self_criticism": {
- "type": "string",
- "description": "Thoughtful self-criticism that explains why this function may not be the best choice.",
- },
- "reasoning": {
- "type": "string",
- "description": "Your reasoning for choosing this function taking into account the `motivation` and weighing the `self_criticism`.",
- },
- }
-
- default_configuration = NextAbilityConfiguration(
- model_classification=LanguageModelClassification.SMART_MODEL,
- system_prompt_template=DEFAULT_SYSTEM_PROMPT_TEMPLATE,
- system_info=DEFAULT_SYSTEM_INFO,
- user_prompt_template=DEFAULT_USER_PROMPT_TEMPLATE,
- additional_ability_arguments=DEFAULT_ADDITIONAL_ABILITY_ARGUMENTS,
- )
-
- def __init__(
- self,
- model_classification: LanguageModelClassification,
- system_prompt_template: str,
- system_info: list[str],
- user_prompt_template: str,
- additional_ability_arguments: dict,
- ):
- self._model_classification = model_classification
- self._system_prompt_template = system_prompt_template
- self._system_info = system_info
- self._user_prompt_template = user_prompt_template
- self._additional_ability_arguments = additional_ability_arguments
-
- @property
- def model_classification(self) -> LanguageModelClassification:
- return self._model_classification
-
- def build_prompt(
- self,
- task: Task,
- ability_schema: list[dict],
- os_info: str,
- api_budget: float,
- current_time: str,
- **kwargs,
- ) -> LanguageModelPrompt:
- template_kwargs = {
- "os_info": os_info,
- "api_budget": api_budget,
- "current_time": current_time,
- **kwargs,
- }
-
- for ability in ability_schema:
- ability["parameters"]["properties"].update(
- self._additional_ability_arguments
- )
- ability["parameters"]["required"] += list(
- self._additional_ability_arguments.keys()
- )
-
- template_kwargs["task_objective"] = task.objective
- template_kwargs["cycle_count"] = task.context.cycle_count
- template_kwargs["action_history"] = to_numbered_list(
- [action.summary() for action in task.context.prior_actions],
- no_items_response="You have not taken any actions yet.",
- **template_kwargs,
- )
- template_kwargs["additional_info"] = to_numbered_list(
- [memory.summary() for memory in task.context.memories]
- + [info for info in task.context.supplementary_info],
- no_items_response="There is no additional information available at this time.",
- **template_kwargs,
- )
- template_kwargs["user_input"] = to_numbered_list(
- [user_input for user_input in task.context.user_input],
- no_items_response="There are no additional considerations at this time.",
- **template_kwargs,
- )
- template_kwargs["acceptance_criteria"] = to_numbered_list(
- [acceptance_criteria for acceptance_criteria in task.acceptance_criteria],
- **template_kwargs,
- )
-
- template_kwargs["system_info"] = to_numbered_list(
- self._system_info,
- **template_kwargs,
- )
-
- system_prompt = LanguageModelMessage(
- role=MessageRole.SYSTEM,
- content=self._system_prompt_template.format(**template_kwargs),
- )
- user_prompt = LanguageModelMessage(
- role=MessageRole.USER,
- content=self._user_prompt_template.format(**template_kwargs),
- )
- functions = [
- LanguageModelFunction(json_schema=ability) for ability in ability_schema
- ]
-
- return LanguageModelPrompt(
- messages=[system_prompt, user_prompt],
- functions=functions,
- # TODO:
- tokens_used=0,
- )
-
- def parse_response_content(
- self,
- response_content: dict,
- ) -> dict:
- """Parse the actual text response from the objective model.
-
- Args:
- response_content: The raw response content from the objective model.
-
- Returns:
- The parsed response.
-
- """
- function_name = response_content["function_call"]["name"]
- function_arguments = json_loads(response_content["function_call"]["arguments"])
- parsed_response = {
- "motivation": function_arguments.pop("motivation"),
- "self_criticism": function_arguments.pop("self_criticism"),
- "reasoning": function_arguments.pop("reasoning"),
- "next_ability": function_name,
- "ability_arguments": function_arguments,
- }
- return parsed_response
diff --git a/autogpt/core/planning/strategies/utils.py b/autogpt/core/planning/strategies/utils.py
deleted file mode 100644
index 5a725903f..000000000
--- a/autogpt/core/planning/strategies/utils.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import ast
-import json
-
-
-def to_numbered_list(
- items: list[str], no_items_response: str = "", **template_args
-) -> str:
- if items:
- return "\n".join(
- f"{i+1}. {item.format(**template_args)}" for i, item in enumerate(items)
- )
- else:
- return no_items_response
-
-
-def json_loads(json_str: str):
- # TODO: this is a hack function for now. Trying to see what errors show up in testing.
- # Can hopefully just replace with a call to ast.literal_eval (the function api still
- # sometimes returns json strings with minor issues like trailing commas).
- try:
- return ast.literal_eval(json_str)
- except json.decoder.JSONDecodeError as e:
- try:
- print(f"json decode error {e}. trying literal eval")
- return ast.literal_eval(json_str)
- except Exception:
- breakpoint()
diff --git a/autogpt/core/planning/templates.py b/autogpt/core/planning/templates.py
deleted file mode 100644
index e28f2ed75..000000000
--- a/autogpt/core/planning/templates.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Rules of thumb:
-# - Templates don't add new lines at the end of the string. This is the
-# responsibility of the or a consuming template.
-
-####################
-# Planner defaults #
-####################
-
-
-USER_OBJECTIVE = (
- "Write a wikipedia style article about the project: "
- "https://github.com/significant-gravitas/Auto-GPT"
-)
-
-
-ABILITIES = (
- 'analyze_code: Analyze Code, args: "code": "<full_code_string>"',
- 'execute_python_file: Execute Python File, args: "filename": "<filename>"',
- 'append_to_file: Append to file, args: "filename": "<filename>", "text": "<text>"',
- 'delete_file: Delete file, args: "filename": "<filename>"',
- 'list_files: List Files in Directory, args: "directory": "<directory>"',
- 'read_file: Read a file, args: "filename": "<filename>"',
- 'write_to_file: Write to file, args: "filename": "<filename>", "text": "<text>"',
- 'google: Google Search, args: "query": "<query>"',
- 'improve_code: Get Improved Code, args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>"',
- 'browse_website: Browse Website, args: "url": "<url>", "question": "<what_you_want_to_find_on_website>"',
- 'write_tests: Write Tests, args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>"',
- 'get_hyperlinks: Get hyperlinks, args: "url": "<url>"',
- 'get_text_summary: Get text summary, args: "url": "<url>", "question": "<question>"',
- 'task_complete: Task Complete (Shutdown), args: "reason": "<reason>"',
-)
-
-
-# Plan Prompt
-# -----------
-
-
-PLAN_PROMPT_CONSTRAINTS = (
- "~4000 word limit for short term memory. Your short term memory is short, so "
- "immediately save important information to files.",
- "If you are unsure how you previously did something or want to recall past "
- "events, thinking about similar events will help you remember.",
- "No user assistance",
- "Exclusively use the commands listed below e.g. command_name",
-)
-
-PLAN_PROMPT_RESOURCES = (
- "Internet access for searches and information gathering.",
- "Long-term memory management.",
- "File output.",
-)
-
-PLAN_PROMPT_PERFORMANCE_EVALUATIONS = (
- "Continuously review and analyze your actions to ensure you are performing to"
- " the best of your abilities.",
- "Constructively self-criticize your big-picture behavior constantly.",
- "Reflect on past decisions and strategies to refine your approach.",
- "Every command has a cost, so be smart and efficient. Aim to complete tasks in"
- " the least number of steps.",
- "Write all code to a file",
-)
-
-
-PLAN_PROMPT_RESPONSE_DICT = {
- "thoughts": {
- "text": "thought",
- "reasoning": "reasoning",
- "plan": "- short bulleted\n- list that conveys\n- long-term plan",
- "criticism": "constructive self-criticism",
- "speak": "thoughts summary to say to user",
- },
- "command": {"name": "command name", "args": {"arg name": "value"}},
-}
-
-PLAN_PROMPT_RESPONSE_FORMAT = (
- "You should only respond in JSON format as described below\n"
- "Response Format:\n"
- "{response_json_structure}\n"
- "Ensure the response can be parsed by Python json.loads"
-)
-
-PLAN_TRIGGERING_PROMPT = (
- "Determine which next command to use, and respond using the format specified above:"
-)
-
-PLAN_PROMPT_MAIN = (
- "{header}\n\n"
- "GOALS:\n\n{goals}\n\n"
- "Info:\n{info}\n\n"
- "Constraints:\n{constraints}\n\n"
- "Commands:\n{commands}\n\n"
- "Resources:\n{resources}\n\n"
- "Performance Evaluations:\n{performance_evaluations}\n\n"
- "You should only respond in JSON format as described below\n"
- "Response Format:\n{response_json_structure}\n"
- "Ensure the response can be parsed by Python json.loads"
-)
-
-
-###########################
-# Parameterized templates #
-###########################
diff --git a/autogpt/core/plugin/__init__.py b/autogpt/core/plugin/__init__.py
deleted file mode 100644
index dfa192591..000000000
--- a/autogpt/core/plugin/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-"""The plugin system allows the Agent to be extended with new functionality."""
-from autogpt.core.plugin.base import PluginService
diff --git a/autogpt/core/plugin/base.py b/autogpt/core/plugin/base.py
deleted file mode 100644
index bbd99ad8e..000000000
--- a/autogpt/core/plugin/base.py
+++ /dev/null
@@ -1,155 +0,0 @@
-import abc
-import enum
-from typing import TYPE_CHECKING, Type
-
-from pydantic import BaseModel
-
-from autogpt.core.configuration import SystemConfiguration, UserConfigurable
-
-if TYPE_CHECKING:
- from autogpt.core.ability import Ability, AbilityRegistry
- from autogpt.core.memory import Memory
- from autogpt.core.resource.model_providers import (
- EmbeddingModelProvider,
- LanguageModelProvider,
- )
-
- # Expand to other types as needed
- PluginType = (
- Type[Ability] # Swappable now
- | Type[AbilityRegistry] # Swappable maybe never
- | Type[LanguageModelProvider] # Swappable soon
- | Type[EmbeddingModelProvider] # Swappable soon
- | Type[Memory] # Swappable now
- # | Type[Planner] # Swappable soon
- )
-
-
-class PluginStorageFormat(str, enum.Enum):
- """Supported plugin storage formats.
-
- Plugins can be stored at one of these supported locations.
-
- """
-
- INSTALLED_PACKAGE = "installed_package" # Required now, loads system defaults
- WORKSPACE = "workspace" # Required now
- # OPENAPI_URL = "open_api_url" # Soon (requires some tooling we don't have yet).
- # OTHER_FILE_PATH = "other_file_path" # Maybe later (maybe now)
- # GIT = "git" # Maybe later (or soon)
- # PYPI = "pypi" # Maybe later
- # AUTOGPT_PLUGIN_SERVICE = "autogpt_plugin_service" # Long term solution, requires design
- # AUTO = "auto" # Feature for later maybe, automatically find plugin.
-
-
-# Installed package example
-# PluginLocation(
-# storage_format='installed_package',
-# storage_route='autogpt_plugins.twitter.SendTwitterMessage'
-# )
-# Workspace example
-# PluginLocation(
-# storage_format='workspace',
-# storage_route='relative/path/to/plugin.pkl'
-# OR
-# storage_route='relative/path/to/plugin.py'
-# )
-# Git
-# PluginLocation(
-# storage_format='git',
-# Exact format TBD.
-# storage_route='https://github.com/gravelBridge/AutoGPT-WolframAlpha/blob/main/autogpt-wolframalpha/wolfram_alpha.py'
-# )
-# PyPI
-# PluginLocation(
-# storage_format='pypi',
-# storage_route='package_name'
-# )
-
-
-# PluginLocation(
-# storage_format='installed_package',
-# storage_route='autogpt_plugins.twitter.SendTwitterMessage'
-# )
-
-
-# A plugin storage route.
-#
-# This is a string that specifies where to load a plugin from
-# (e.g. an import path or file path).
-PluginStorageRoute = str
-
-
-class PluginLocation(SystemConfiguration):
- """A plugin location.
-
- This is a combination of a plugin storage format and a plugin storage route.
- It is used by the PluginService to load plugins.
-
- """
-
- storage_format: PluginStorageFormat = UserConfigurable()
- storage_route: PluginStorageRoute = UserConfigurable()
-
-
-class PluginMetadata(BaseModel):
- """Metadata about a plugin."""
-
- name: str
- description: str
- location: PluginLocation
-
-
-class PluginService(abc.ABC):
- """Base class for plugin service.
-
- The plugin service should be stateless. This defines the interface for
- loading plugins from various storage formats.
-
- """
-
- @staticmethod
- @abc.abstractmethod
- def get_plugin(plugin_location: PluginLocation) -> "PluginType":
- """Get a plugin from a plugin location."""
- ...
-
- ####################################
- # Low-level storage format loaders #
- ####################################
- @staticmethod
- @abc.abstractmethod
- def load_from_file_path(plugin_route: PluginStorageRoute) -> "PluginType":
- """Load a plugin from a file path."""
-
- ...
-
- @staticmethod
- @abc.abstractmethod
- def load_from_import_path(plugin_route: PluginStorageRoute) -> "PluginType":
- """Load a plugin from an import path."""
- ...
-
- @staticmethod
- @abc.abstractmethod
- def resolve_name_to_path(
- plugin_route: PluginStorageRoute, path_type: str
- ) -> PluginStorageRoute:
- """Resolve a plugin name to a plugin path."""
- ...
-
- #####################################
- # High-level storage format loaders #
- #####################################
-
- @staticmethod
- @abc.abstractmethod
- def load_from_workspace(plugin_route: PluginStorageRoute) -> "PluginType":
- """Load a plugin from the workspace."""
- ...
-
- @staticmethod
- @abc.abstractmethod
- def load_from_installed_package(plugin_route: PluginStorageRoute) -> "PluginType":
- """Load a plugin from an installed package."""
- ...
diff --git a/autogpt/core/plugin/simple.py b/autogpt/core/plugin/simple.py
deleted file mode 100644
index aacf84aff..000000000
--- a/autogpt/core/plugin/simple.py
+++ /dev/null
@@ -1,74 +0,0 @@
-from importlib import import_module
-from typing import TYPE_CHECKING
-
-from autogpt.core.plugin.base import (
- PluginLocation,
- PluginService,
- PluginStorageFormat,
- PluginStorageRoute,
-)
-
-if TYPE_CHECKING:
- from autogpt.core.plugin.base import PluginType
-
-
-class SimplePluginService(PluginService):
- @staticmethod
- def get_plugin(plugin_location: dict | PluginLocation) -> "PluginType":
- """Get a plugin from a plugin location."""
- if isinstance(plugin_location, dict):
- plugin_location = PluginLocation.parse_obj(plugin_location)
- if plugin_location.storage_format == PluginStorageFormat.WORKSPACE:
- return SimplePluginService.load_from_workspace(
- plugin_location.storage_route
- )
- elif plugin_location.storage_format == PluginStorageFormat.INSTALLED_PACKAGE:
- return SimplePluginService.load_from_installed_package(
- plugin_location.storage_route
- )
- else:
- raise NotImplementedError(
- f"Plugin storage format {plugin_location.storage_format} is not implemented."
- )
-
- ####################################
- # Low-level storage format loaders #
- ####################################
- @staticmethod
- def load_from_file_path(plugin_route: PluginStorageRoute) -> "PluginType":
- """Load a plugin from a file path."""
- # TODO: Define an on disk storage format and implement this.
- # Can pull from existing zip file loading implementation
- raise NotImplemented("Loading from file path is not implemented.")
-
- @staticmethod
- def load_from_import_path(plugin_route: PluginStorageRoute) -> "PluginType":
- """Load a plugin from an import path."""
- module_path, _, class_name = plugin_route.rpartition(".")
- return getattr(import_module(module_path), class_name)
-
- @staticmethod
- def resolve_name_to_path(
- plugin_route: PluginStorageRoute, path_type: str
- ) -> PluginStorageRoute:
- """Resolve a plugin name to a plugin path."""
- # TODO: Implement a discovery system for finding plugins by name from known
- # storage locations. E.g. if we know that path_type is a file path, we can
- # search the workspace for it. If it's an import path, we can check the core
- # system and the auto_gpt_plugins package.
- raise NotImplemented("Resolving plugin name to path is not implemented.")
-
- #####################################
- # High-level storage format loaders #
- #####################################
-
- @staticmethod
- def load_from_workspace(plugin_route: PluginStorageRoute) -> "PluginType":
- """Load a plugin from the workspace."""
- plugin = SimplePluginService.load_from_file_path(plugin_route)
- return plugin
-
- @staticmethod
- def load_from_installed_package(plugin_route: PluginStorageRoute) -> "PluginType":
- plugin = SimplePluginService.load_from_import_path(plugin_route)
- return plugin
diff --git a/autogpt/core/resource/__init__.py b/autogpt/core/resource/__init__.py
deleted file mode 100644
index 12576c203..000000000
--- a/autogpt/core/resource/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from autogpt.core.resource.schema import (
- ProviderBudget,
- ProviderCredentials,
- ProviderSettings,
- ProviderUsage,
- ResourceType,
-)
diff --git a/autogpt/core/resource/model_providers/__init__.py b/autogpt/core/resource/model_providers/__init__.py
deleted file mode 100644
index ac9545d61..000000000
--- a/autogpt/core/resource/model_providers/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from autogpt.core.resource.model_providers.openai import (
- OPEN_AI_MODELS,
- OpenAIModelName,
- OpenAIProvider,
- OpenAISettings,
-)
-from autogpt.core.resource.model_providers.schema import (
- Embedding,
- EmbeddingModelProvider,
- EmbeddingModelProviderModelInfo,
- EmbeddingModelProviderModelResponse,
- LanguageModelFunction,
- LanguageModelMessage,
- LanguageModelProvider,
- LanguageModelProviderModelInfo,
- LanguageModelProviderModelResponse,
- MessageRole,
- ModelProvider,
- ModelProviderBudget,
- ModelProviderCredentials,
- ModelProviderModelInfo,
- ModelProviderModelResponse,
- ModelProviderName,
- ModelProviderService,
- ModelProviderSettings,
- ModelProviderUsage,
-)
-
-__all__ = [
- "ModelProvider",
- "ModelProviderName",
- "ModelProviderSettings",
- "EmbeddingModelProvider",
- "EmbeddingModelProviderModelResponse",
- "LanguageModelProvider",
- "LanguageModelProviderModelResponse",
- "LanguageModelFunction",
- "LanguageModelMessage",
- "MessageRole",
- "OpenAIModelName",
- "OPEN_AI_MODELS",
- "OpenAIProvider",
- "OpenAISettings",
-]
diff --git a/autogpt/core/resource/model_providers/openai.py b/autogpt/core/resource/model_providers/openai.py
deleted file mode 100644
index 3707796a1..000000000
--- a/autogpt/core/resource/model_providers/openai.py
+++ /dev/null
@@ -1,373 +0,0 @@
-import enum
-import functools
-import logging
-import math
-import time
-from typing import Callable, ParamSpec, TypeVar
-
-import openai
-from openai.error import APIError, RateLimitError
-
-from autogpt.core.configuration import (
- Configurable,
- SystemConfiguration,
- UserConfigurable,
-)
-from autogpt.core.resource.model_providers.schema import (
- Embedding,
- EmbeddingModelProvider,
- EmbeddingModelProviderModelInfo,
- EmbeddingModelProviderModelResponse,
- LanguageModelFunction,
- LanguageModelMessage,
- LanguageModelProvider,
- LanguageModelProviderModelInfo,
- LanguageModelProviderModelResponse,
- ModelProviderBudget,
- ModelProviderCredentials,
- ModelProviderName,
- ModelProviderService,
- ModelProviderSettings,
- ModelProviderUsage,
-)
-
-OpenAIEmbeddingParser = Callable[[Embedding], Embedding]
-OpenAIChatParser = Callable[[str], dict]
-
-
-class OpenAIModelName(str, enum.Enum):
- ADA = "text-embedding-ada-002"
- GPT3 = "gpt-3.5-turbo-0613"
- GPT3_16K = "gpt-3.5-turbo-16k-0613"
- GPT4 = "gpt-4-0613"
- GPT4_32K = "gpt-4-32k-0613"
-
-
-OPEN_AI_EMBEDDING_MODELS = {
- OpenAIModelName.ADA: EmbeddingModelProviderModelInfo(
- name=OpenAIModelName.ADA,
- service=ModelProviderService.EMBEDDING,
- provider_name=ModelProviderName.OPENAI,
- prompt_token_cost=0.0004,
- completion_token_cost=0.0,
- max_tokens=8191,
- embedding_dimensions=1536,
- ),
-}
-
-
-OPEN_AI_LANGUAGE_MODELS = {
- OpenAIModelName.GPT3: LanguageModelProviderModelInfo(
- name=OpenAIModelName.GPT3,
- service=ModelProviderService.LANGUAGE,
- provider_name=ModelProviderName.OPENAI,
- prompt_token_cost=0.0015,
- completion_token_cost=0.002,
- max_tokens=4096,
- ),
- OpenAIModelName.GPT3_16K: LanguageModelProviderModelInfo(
- name=OpenAIModelName.GPT3,
- service=ModelProviderService.LANGUAGE,
- provider_name=ModelProviderName.OPENAI,
- prompt_token_cost=0.003,
- completion_token_cost=0.002,
- max_tokens=16384,
- ),
- OpenAIModelName.GPT4: LanguageModelProviderModelInfo(
- name=OpenAIModelName.GPT4,
- service=ModelProviderService.LANGUAGE,
- provider_name=ModelProviderName.OPENAI,
- prompt_token_cost=0.03,
- completion_token_cost=0.06,
- max_tokens=8192,
- ),
- OpenAIModelName.GPT4_32K: LanguageModelProviderModelInfo(
- name=OpenAIModelName.GPT4_32K,
- service=ModelProviderService.LANGUAGE,
- provider_name=ModelProviderName.OPENAI,
- prompt_token_cost=0.06,
- completion_token_cost=0.12,
- max_tokens=32768,
- ),
-}
-
-
-OPEN_AI_MODELS = {
- **OPEN_AI_LANGUAGE_MODELS,
- **OPEN_AI_EMBEDDING_MODELS,
-}
-
-
-class OpenAIConfiguration(SystemConfiguration):
- retries_per_request: int = UserConfigurable()
-
-
-class OpenAIModelProviderBudget(ModelProviderBudget):
- graceful_shutdown_threshold: float = UserConfigurable()
- warning_threshold: float = UserConfigurable()
-
-
-class OpenAISettings(ModelProviderSettings):
- configuration: OpenAIConfiguration
- credentials: ModelProviderCredentials()
- budget: OpenAIModelProviderBudget
-
-
-class OpenAIProvider(
- Configurable,
- LanguageModelProvider,
- EmbeddingModelProvider,
-):
- default_settings = OpenAISettings(
- name="openai_provider",
- description="Provides access to OpenAI's API.",
- configuration=OpenAIConfiguration(
- retries_per_request=10,
- ),
- credentials=ModelProviderCredentials(),
- budget=OpenAIModelProviderBudget(
- total_budget=math.inf,
- total_cost=0.0,
- remaining_budget=math.inf,
- usage=ModelProviderUsage(
- prompt_tokens=0,
- completion_tokens=0,
- total_tokens=0,
- ),
- graceful_shutdown_threshold=0.005,
- warning_threshold=0.01,
- ),
- )
-
- def __init__(
- self,
- settings: OpenAISettings,
- logger: logging.Logger,
- ):
- self._configuration = settings.configuration
- self._credentials = settings.credentials
- self._budget = settings.budget
-
- self._logger = logger
-
- retry_handler = _OpenAIRetryHandler(
- logger=self._logger,
- num_retries=self._configuration.retries_per_request,
- )
-
- self._create_completion = retry_handler(_create_completion)
- self._create_embedding = retry_handler(_create_embedding)
-
- def get_token_limit(self, model_name: str) -> int:
- """Get the token limit for a given model."""
- return OPEN_AI_MODELS[model_name].max_tokens
-
- def get_remaining_budget(self) -> float:
- """Get the remaining budget."""
- return self._budget.remaining_budget
-
- async def create_language_completion(
- self,
- model_prompt: list[LanguageModelMessage],
- functions: list[LanguageModelFunction],
- model_name: OpenAIModelName,
- completion_parser: Callable[[dict], dict],
- **kwargs,
- ) -> LanguageModelProviderModelResponse:
- """Create a completion using the OpenAI API."""
- completion_kwargs = self._get_completion_kwargs(model_name, functions, **kwargs)
- response = await self._create_completion(
- messages=model_prompt,
- **completion_kwargs,
- )
- response_args = {
- "model_info": OPEN_AI_LANGUAGE_MODELS[model_name],
- "prompt_tokens_used": response.usage.prompt_tokens,
- "completion_tokens_used": response.usage.completion_tokens,
- }
-
- parsed_response = completion_parser(
- response.choices[0].message.to_dict_recursive()
- )
- response = LanguageModelProviderModelResponse(
- content=parsed_response, **response_args
- )
- self._budget.update_usage_and_cost(response)
- return response
-
- async def create_embedding(
- self,
- text: str,
- model_name: OpenAIModelName,
- embedding_parser: Callable[[Embedding], Embedding],
- **kwargs,
- ) -> EmbeddingModelProviderModelResponse:
- """Create an embedding using the OpenAI API."""
- embedding_kwargs = self._get_embedding_kwargs(model_name, **kwargs)
- response = await self._create_embedding(text=text, **embedding_kwargs)
-
- response_args = {
- "model_info": OPEN_AI_EMBEDDING_MODELS[model_name],
- "prompt_tokens_used": response.usage.prompt_tokens,
- "completion_tokens_used": response.usage.completion_tokens,
- }
- response = EmbeddingModelProviderModelResponse(
- **response_args,
- embedding=embedding_parser(response.embeddings[0]),
- )
- self._budget.update_usage_and_cost(response)
- return response
-
- def _get_completion_kwargs(
- self,
- model_name: OpenAIModelName,
- functions: list[LanguageModelFunction],
- **kwargs,
- ) -> dict:
- """Get kwargs for completion API call.
-
- Args:
- model: The model to use.
- kwargs: Keyword arguments to override the default values.
-
- Returns:
- The kwargs for the chat API call.
-
- """
- completion_kwargs = {
- "model": model_name,
- **kwargs,
- **self._credentials.unmasked(),
- }
- if functions:
- completion_kwargs["functions"] = functions
-
- return completion_kwargs
-
- def _get_embedding_kwargs(
- self,
- model_name: OpenAIModelName,
- **kwargs,
- ) -> dict:
- """Get kwargs for embedding API call.
-
- Args:
- model: The model to use.
- kwargs: Keyword arguments to override the default values.
-
- Returns:
- The kwargs for the embedding API call.
-
- """
- embedding_kwargs = {
- "model": model_name,
- **kwargs,
- **self._credentials.unmasked(),
- }
-
- return embedding_kwargs
-
- def __repr__(self):
- return "OpenAIProvider()"
-
-
-async def _create_embedding(text: str, *_, **kwargs) -> openai.Embedding:
- """Embed text using the OpenAI API.
-
- Args:
- text str: The text to embed.
- model_name str: The name of the model to use.
-
- Returns:
- str: The embedding.
- """
- return await openai.Embedding.acreate(
- input=[text],
- **kwargs,
- )
-
-
-async def _create_completion(
- messages: list[LanguageModelMessage], *_, **kwargs
-) -> openai.Completion:
- """Create a chat completion using the OpenAI API.
-
- Args:
- messages: The prompt to use.
-
- Returns:
- The completion.
-
- """
- messages = [message.dict() for message in messages]
- if "functions" in kwargs:
- kwargs["functions"] = [function.json_schema for function in kwargs["functions"]]
- return await openai.ChatCompletion.acreate(
- messages=messages,
- **kwargs,
- )
-
-
-_T = TypeVar("_T")
-_P = ParamSpec("_P")
-
-
-class _OpenAIRetryHandler:
- """Retry Handler for OpenAI API call.
-
- Args:
- num_retries int: Number of retries. Defaults to 10.
- backoff_base float: Base for exponential backoff. Defaults to 2.
- warn_user bool: Whether to warn the user. Defaults to True.
- """
-
- _retry_limit_msg = "Error: Reached rate limit, passing..."
- _api_key_error_msg = (
- "Please double check that you have setup a PAID OpenAI API Account. You can "
- "read more here: https://docs.agpt.co/setup/#getting-an-api-key"
- )
- _backoff_msg = "Error: API Bad gateway. Waiting {backoff} seconds..."
-
- def __init__(
- self,
- logger: logging.Logger,
- num_retries: int = 10,
- backoff_base: float = 2.0,
- warn_user: bool = True,
- ):
- self._logger = logger
- self._num_retries = num_retries
- self._backoff_base = backoff_base
- self._warn_user = warn_user
-
- def _log_rate_limit_error(self) -> None:
- self._logger.debug(self._retry_limit_msg)
- if self._warn_user:
- self._logger.warning(self._api_key_error_msg)
- self._warn_user = False
-
- def _backoff(self, attempt: int) -> None:
- backoff = self._backoff_base ** (attempt + 2)
- self._logger.debug(self._backoff_msg.format(backoff=backoff))
- time.sleep(backoff)
-
- def __call__(self, func: Callable[_P, _T]) -> Callable[_P, _T]:
- @functools.wraps(func)
- async def _wrapped(*args: _P.args, **kwargs: _P.kwargs) -> _T:
- num_attempts = self._num_retries + 1 # +1 for the first attempt
- for attempt in range(1, num_attempts + 1):
- try:
- return await func(*args, **kwargs)
-
- except RateLimitError:
- if attempt == num_attempts:
- raise
- self._log_rate_limit_error()
-
- except APIError as e:
- if (e.http_status != 502) or (attempt == num_attempts):
- raise
-
- self._backoff(attempt)
-
- return _wrapped
diff --git a/autogpt/core/resource/model_providers/schema.py b/autogpt/core/resource/model_providers/schema.py
deleted file mode 100644
index 266b4c81f..000000000
--- a/autogpt/core/resource/model_providers/schema.py
+++ /dev/null
@@ -1,219 +0,0 @@
-import abc
-import enum
-from typing import Callable, ClassVar
-
-from pydantic import BaseModel, Field, SecretStr, validator
-
-from autogpt.core.configuration import UserConfigurable
-from autogpt.core.resource.schema import (
- Embedding,
- ProviderBudget,
- ProviderCredentials,
- ProviderSettings,
- ProviderUsage,
- ResourceType,
-)
-
-
-class ModelProviderService(str, enum.Enum):
- """A ModelService describes what kind of service the model provides."""
-
- EMBEDDING: str = "embedding"
- LANGUAGE: str = "language"
- TEXT: str = "text"
-
-
-class ModelProviderName(str, enum.Enum):
- OPENAI: str = "openai"
-
-
-class MessageRole(str, enum.Enum):
- USER = "user"
- SYSTEM = "system"
- ASSISTANT = "assistant"
-
-
-class LanguageModelMessage(BaseModel):
- role: MessageRole
- content: str
-
-
-class LanguageModelFunction(BaseModel):
- json_schema: dict
-
-
-class ModelProviderModelInfo(BaseModel):
- """Struct for model information.
-
- Would be lovely to eventually get this directly from APIs, but needs to be
- scraped from websites for now.
-
- """
-
- name: str
- service: ModelProviderService
- provider_name: ModelProviderName
- prompt_token_cost: float = 0.0
- completion_token_cost: float = 0.0
-
-
-class ModelProviderModelResponse(BaseModel):
- """Standard response struct for a response from a model."""
-
- prompt_tokens_used: int
- completion_tokens_used: int
- model_info: ModelProviderModelInfo
-
-
-class ModelProviderCredentials(ProviderCredentials):
- """Credentials for a model provider."""
-
- api_key: SecretStr | None = UserConfigurable(default=None)
- api_type: SecretStr | None = UserConfigurable(default=None)
- api_base: SecretStr | None = UserConfigurable(default=None)
- api_version: SecretStr | None = UserConfigurable(default=None)
- deployment_id: SecretStr | None = UserConfigurable(default=None)
-
- def unmasked(self) -> dict:
- return unmask(self)
-
- class Config:
- extra = "ignore"
-
-
-def unmask(model: BaseModel):
- unmasked_fields = {}
- for field_name, field in model.__fields__.items():
- value = getattr(model, field_name)
- if isinstance(value, SecretStr):
- unmasked_fields[field_name] = value.get_secret_value()
- else:
- unmasked_fields[field_name] = value
- return unmasked_fields
-
-
-class ModelProviderUsage(ProviderUsage):
- """Usage for a particular model from a model provider."""
-
- completion_tokens: int = 0
- prompt_tokens: int = 0
- total_tokens: int = 0
-
- def update_usage(
- self,
- model_response: ModelProviderModelResponse,
- ) -> None:
- self.completion_tokens += model_response.completion_tokens_used
- self.prompt_tokens += model_response.prompt_tokens_used
- self.total_tokens += (
- model_response.completion_tokens_used + model_response.prompt_tokens_used
- )
-
-
-class ModelProviderBudget(ProviderBudget):
- total_budget: float = UserConfigurable()
- total_cost: float
- remaining_budget: float
- usage: ModelProviderUsage
-
- def update_usage_and_cost(
- self,
- model_response: ModelProviderModelResponse,
- ) -> None:
- """Update the usage and cost of the provider."""
- model_info = model_response.model_info
- self.usage.update_usage(model_response)
- incremental_cost = (
- model_response.completion_tokens_used * model_info.completion_token_cost
- + model_response.prompt_tokens_used * model_info.prompt_token_cost
- ) / 1000.0
- self.total_cost += incremental_cost
- self.remaining_budget -= incremental_cost
-
-
-class ModelProviderSettings(ProviderSettings):
- resource_type = ResourceType.MODEL
- credentials: ModelProviderCredentials
- budget: ModelProviderBudget
-
-
-class ModelProvider(abc.ABC):
- """A ModelProvider abstracts the details of a particular provider of models."""
-
- defaults: ClassVar[ModelProviderSettings]
-
- @abc.abstractmethod
- def get_token_limit(self, model_name: str) -> int:
- ...
-
- @abc.abstractmethod
- def get_remaining_budget(self) -> float:
- ...
-
-
-####################
-# Embedding Models #
-####################
-
-
-class EmbeddingModelProviderModelInfo(ModelProviderModelInfo):
- """Struct for embedding model information."""
-
- model_service = ModelProviderService.EMBEDDING
- embedding_dimensions: int
-
-
-class EmbeddingModelProviderModelResponse(ModelProviderModelResponse):
- """Standard response struct for a response from an embedding model."""
-
- embedding: Embedding = Field(default_factory=list)
-
- @classmethod
- @validator("completion_tokens_used")
- def _verify_no_completion_tokens_used(cls, v):
- if v > 0:
- raise ValueError("Embeddings should not have completion tokens used.")
- return v
-
-
-class EmbeddingModelProvider(ModelProvider):
- @abc.abstractmethod
- async def create_embedding(
- self,
- text: str,
- model_name: str,
- embedding_parser: Callable[[Embedding], Embedding],
- **kwargs,
- ) -> EmbeddingModelProviderModelResponse:
- ...
-
-
-###################
-# Language Models #
-###################
-
-
-class LanguageModelProviderModelInfo(ModelProviderModelInfo):
- """Struct for language model information."""
-
- model_service = ModelProviderService.LANGUAGE
- max_tokens: int
-
-
-class LanguageModelProviderModelResponse(ModelProviderModelResponse):
- """Standard response struct for a response from a language model."""
-
- content: dict = None
-
-
-class LanguageModelProvider(ModelProvider):
- @abc.abstractmethod
- async def create_language_completion(
- self,
- model_prompt: list[LanguageModelMessage],
- functions: list[LanguageModelFunction],
- model_name: str,
- completion_parser: Callable[[dict], dict],
- **kwargs,
- ) -> LanguageModelProviderModelResponse:
- ...
diff --git a/autogpt/core/resource/schema.py b/autogpt/core/resource/schema.py
deleted file mode 100644
index 0f97aedb0..000000000
--- a/autogpt/core/resource/schema.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import abc
-import enum
-
-from pydantic import SecretBytes, SecretField, SecretStr
-
-from autogpt.core.configuration import (
- SystemConfiguration,
- SystemSettings,
- UserConfigurable,
-)
-
-
-class ResourceType(str, enum.Enum):
- """An enumeration of resource types."""
-
- MODEL = "model"
- MEMORY = "memory"
-
-
-class ProviderUsage(SystemConfiguration, abc.ABC):
- @abc.abstractmethod
- def update_usage(self, *args, **kwargs) -> None:
- """Update the usage of the resource."""
- ...
-
-
-class ProviderBudget(SystemConfiguration):
- total_budget: float = UserConfigurable()
- total_cost: float
- remaining_budget: float
- usage: ProviderUsage
-
- @abc.abstractmethod
- def update_usage_and_cost(self, *args, **kwargs) -> None:
- """Update the usage and cost of the resource."""
- ...
-
-
-class ProviderCredentials(SystemConfiguration):
- """Struct for credentials."""
-
- class Config:
- json_encoders = {
- SecretStr: lambda v: v.get_secret_value() if v else None,
- SecretBytes: lambda v: v.get_secret_value() if v else None,
- SecretField: lambda v: v.get_secret_value() if v else None,
- }
-
-
-class ProviderSettings(SystemSettings):
- resource_type: ResourceType
- credentials: ProviderCredentials | None = None
- budget: ProviderBudget | None = None
-
-
-# Used both by model providers and memory providers
-Embedding = list[float]
diff --git a/autogpt/core/runner/cli_app/cli.py b/autogpt/core/runner/cli_app/cli.py
deleted file mode 100644
index 56fca9759..000000000
--- a/autogpt/core/runner/cli_app/cli.py
+++ /dev/null
@@ -1,47 +0,0 @@
-from pathlib import Path
-
-import click
-import yaml
-
-from autogpt.core.runner.cli_app.main import run_auto_gpt
-from autogpt.core.runner.client_lib.shared_click_commands import (
- DEFAULT_SETTINGS_FILE,
- make_settings,
-)
-from autogpt.core.runner.client_lib.utils import coroutine, handle_exceptions
-
-
-@click.group()
-def autogpt():
- """Temporary command group for v2 commands."""
- pass
-
-
-autogpt.add_command(make_settings)
-
-
-@autogpt.command()
-@click.option(
- "--settings-file",
- type=click.Path(),
- default=DEFAULT_SETTINGS_FILE,
-)
-@click.option(
- "--pdb",
- is_flag=True,
- help="Drop into a debugger if an error is raised.",
-)
-@coroutine
-async def run(settings_file: str, pdb: bool) -> None:
- """Run the Auto-GPT agent."""
- click.echo("Running Auto-GPT agent...")
- settings_file = Path(settings_file)
- settings = {}
- if settings_file.exists():
- settings = yaml.safe_load(settings_file.read_text())
- main = handle_exceptions(run_auto_gpt, with_debugger=pdb)
- await main(settings)
-
-
-if __name__ == "__main__":
- autogpt()
diff --git a/autogpt/core/runner/cli_app/main.py b/autogpt/core/runner/cli_app/main.py
deleted file mode 100644
index 60af24bec..000000000
--- a/autogpt/core/runner/cli_app/main.py
+++ /dev/null
@@ -1,110 +0,0 @@
-import click
-
-from autogpt.core.agent import AgentSettings, SimpleAgent
-from autogpt.core.runner.client_lib.logging import get_client_logger
-
-
-async def run_auto_gpt(user_configuration: dict):
- """Run the Auto-GPT CLI client."""
-
- client_logger = get_client_logger()
- client_logger.debug("Getting agent settings")
-
- agent_workspace = (
- user_configuration.get("workspace", {}).get("configuration", {}).get("root", "")
- )
-
- if not agent_workspace: # We don't have an agent yet.
- #################
- # Bootstrapping #
- #################
- # Step 1. Collate the user's settings with the default system settings.
- agent_settings: AgentSettings = SimpleAgent.compile_settings(
- client_logger,
- user_configuration,
- )
-
- # Step 2. Get a name and goals for the agent.
- # First we need to figure out what the user wants to do with the agent.
- # We'll do this by asking the user for a prompt.
- user_objective = click.prompt("What do you want Auto-GPT to do?")
- # Ask a language model to determine a name and goals for a suitable agent.
- name_and_goals = await SimpleAgent.determine_agent_name_and_goals(
- user_objective,
- agent_settings,
- client_logger,
- )
- print(parse_agent_name_and_goals(name_and_goals))
- # Finally, update the agent settings with the name and goals.
- agent_settings.update_agent_name_and_goals(name_and_goals)
-
- # Step 3. Provision the agent.
- agent_workspace = SimpleAgent.provision_agent(agent_settings, client_logger)
- print("agent is provisioned")
-
- # launch agent interaction loop
- agent = SimpleAgent.from_workspace(
- agent_workspace,
- client_logger,
- )
- print("agent is loaded")
-
- plan = await agent.build_initial_plan()
- print(parse_agent_plan(plan))
-
- while True:
- current_task, next_ability = await agent.determine_next_ability(plan)
- print(parse_next_ability(current_task, next_ability))
- user_input = click.prompt(
- "Should the agent proceed with this ability?",
- default="y",
- )
- ability_result = await agent.execute_next_ability(user_input)
- print(parse_ability_result(ability_result))
-
-
-def parse_agent_name_and_goals(name_and_goals: dict) -> str:
- parsed_response = f"Agent Name: {name_and_goals['agent_name']}\n"
- parsed_response += f"Agent Role: {name_and_goals['agent_role']}\n"
- parsed_response += "Agent Goals:\n"
- for i, goal in enumerate(name_and_goals["agent_goals"]):
- parsed_response += f"{i+1}. {goal}\n"
- return parsed_response
-
-
-def parse_agent_plan(plan: dict) -> str:
- parsed_response = f"Agent Plan:\n"
- for i, task in enumerate(plan["task_list"]):
- parsed_response += f"{i+1}. {task['objective']}\n"
- parsed_response += f"Task type: {task['type']} "
- parsed_response += f"Priority: {task['priority']}\n"
- parsed_response += f"Ready Criteria:\n"
- for j, criteria in enumerate(task["ready_criteria"]):
- parsed_response += f" {j+1}. {criteria}\n"
- parsed_response += f"Acceptance Criteria:\n"
- for j, criteria in enumerate(task["acceptance_criteria"]):
- parsed_response += f" {j+1}. {criteria}\n"
- parsed_response += "\n"
-
- return parsed_response
-
-
-def parse_next_ability(current_task, next_ability: dict) -> str:
- parsed_response = f"Current Task: {current_task.objective}\n"
- ability_args = ", ".join(
- f"{k}={v}" for k, v in next_ability["ability_arguments"].items()
- )
- parsed_response += f"Next Ability: {next_ability['next_ability']}({ability_args})\n"
- parsed_response += f"Motivation: {next_ability['motivation']}\n"
- parsed_response += f"Self-criticism: {next_ability['self_criticism']}\n"
- parsed_response += f"Reasoning: {next_ability['reasoning']}\n"
- return parsed_response
-
-
-def parse_ability_result(ability_result) -> str:
- parsed_response = f"Ability: {ability_result['ability_name']}\n"
- parsed_response += f"Ability Arguments: {ability_result['ability_args']}\n"
- parsed_response = f"Ability Result: {ability_result['success']}\n"
- parsed_response += f"Message: {ability_result['message']}\n"
- parsed_response += f"Data: {ability_result['new_knowledge']}\n"
- return parsed_response
diff --git a/autogpt/core/runner/cli_web_app/cli.py b/autogpt/core/runner/cli_web_app/cli.py
deleted file mode 100644
index 6600b8e1d..000000000
--- a/autogpt/core/runner/cli_web_app/cli.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import contextlib
-import pathlib
-import shlex
-import subprocess
-import sys
-import time
-
-import click
-import requests
-import uvicorn
-import yaml
-
-from autogpt.core.runner.client_lib.shared_click_commands import (
- DEFAULT_SETTINGS_FILE,
- make_settings,
- status,
-)
-from autogpt.core.runner.client_lib.utils import coroutine
-
-
-@click.group()
-def autogpt():
- """Temporary command group for v2 commands."""
- pass
-
-
-autogpt.add_command(make_settings)
-autogpt.add_command(status)
-
-
-@autogpt.command()
-@click.option(
- "host",
- "--host",
- default="localhost",
- help="The host for the webserver.",
- type=click.STRING,
-)
-@click.option(
- "port",
- "--port",
- default=8080,
- help="The port of the webserver.",
- type=click.INT,
-)
-def server(host: str, port: int) -> None:
- """Run the Auto-GPT runner httpserver."""
- click.echo("Running Auto-GPT runner httpserver...")
- uvicorn.run(
- "autogpt.core.runner.cli_web_app.server.api:app",
- workers=1,
- host=host,
- port=port,
- reload=True,
- )
-
-
-@autogpt.command()
-@click.option(
- "--settings-file",
- type=click.Path(),
- default=DEFAULT_SETTINGS_FILE,
-)
-@coroutine
-async def client(settings_file) -> None:
- """Run the Auto-GPT runner client."""
- settings_file = pathlib.Path(settings_file)
- settings = {}
- if settings_file.exists():
- settings = yaml.safe_load(settings_file.read_text())
-
- from autogpt.core.runner.cli_web_app.client.client import run
-
- with autogpt_server():
- run()
-
-
-@contextlib.contextmanager
-def autogpt_server():
- host = "localhost"
- port = 8080
- cmd = shlex.split(
- f"{sys.executable} autogpt/core/runner/cli_web_app/cli.py server --host {host} --port {port}"
- )
- server_process = subprocess.Popen(
- args=cmd,
- )
- started = False
-
- while not started:
- try:
- requests.get(f"http://{host}:{port}")
- started = True
- except requests.exceptions.ConnectionError:
- time.sleep(0.2)
- yield server_process
- server_process.terminate()
-
-
-if __name__ == "__main__":
- autogpt()
diff --git a/autogpt/core/runner/cli_web_app/client/client.py b/autogpt/core/runner/cli_web_app/client/client.py
deleted file mode 100644
index 346203f7c..000000000
--- a/autogpt/core/runner/cli_web_app/client/client.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import json
-
-import requests
-
-
-def run():
- body = json.dumps(
- {"ai_name": "HelloBot", "ai_role": "test", "ai_goals": ["goal1", "goal2"]}
- )
-
- header = {"Content-Type": "application/json", "openai_api_key": "asdf"}
- print("Sending: ", header, body)
- response = requests.post(
- "http://localhost:8080/api/v1/agents", data=body, headers=header
- )
- print(response.content.decode("utf-8"))
diff --git a/autogpt/core/runner/cli_web_app/server/api.py b/autogpt/core/runner/cli_web_app/server/api.py
deleted file mode 100644
index 01c50b06d..000000000
--- a/autogpt/core/runner/cli_web_app/server/api.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import uuid
-
-from fastapi import APIRouter, FastAPI, Request
-
-from autogpt.core.runner.cli_web_app.server.schema import InteractRequestBody
-
-router = APIRouter()
-
-
-@router.post("/agents")
-async def create_agent(request: Request):
- """Create a new agent."""
- agent_id = uuid.uuid4().hex
- return {"agent_id": agent_id}
-
-
-@router.post("/agents/{agent_id}")
-async def interact(request: Request, agent_id: str, body: InteractRequestBody):
- """Interact with an agent."""
-
- # check headers
-
- # check if agent_id exists
-
- # get agent object from somewhere, e.g. a database/disk/global dict
-
- # continue agent interaction with user input
-
- return {
- "thoughts": {
- "thoughts": {
- "text": "text",
- "reasoning": "reasoning",
- "plan": "plan",
- "criticism": "criticism",
- "speak": "speak",
- },
- "commands": {
- "name": "name",
- "args": {"arg_1": "value_1", "arg_2": "value_2"},
- },
- },
- "messages": ["message1", agent_id],
- }
-
-
-app = FastAPI()
-app.include_router(router, prefix="/api/v1")
diff --git a/autogpt/core/runner/cli_web_app/server/schema.py b/autogpt/core/runner/cli_web_app/server/schema.py
deleted file mode 100644
index 272fbc78e..000000000
--- a/autogpt/core/runner/cli_web_app/server/schema.py
+++ /dev/null
@@ -1,36 +0,0 @@
-from uuid import UUID
-
-from pydantic import BaseModel, validator
-
-
-class AgentInfo(BaseModel):
- id: UUID = None
- objective: str = ""
- name: str = ""
- role: str = ""
- goals: list[str] = []
-
-
-class AgentConfiguration(BaseModel):
- """Configuration for creation of a new agent."""
-
- # We'll want to get this schema from the configuration, so it needs to be dynamic.
- user_configuration: dict
- agent_goals: AgentInfo
-
- @validator("agent_goals")
- def only_objective_or_name_role_goals(cls, agent_goals):
- goals_specification = [agent_goals.name, agent_goals.role, agent_goals.goals]
- if agent_goals.objective and any(goals_specification):
- raise ValueError("Cannot specify both objective and name, role, or goals")
- if not agent_goals.objective and not all(goals_specification):
- raise ValueError("Must specify either objective or name, role, and goals")
-
-
-class InteractRequestBody(BaseModel):
- user_input: str = ""
-
-
-class InteractResponseBody(BaseModel):
- thoughts: dict[str, str] # TBD
- messages: list[str] # for example
diff --git a/autogpt/core/runner/cli_web_app/server/services/users.py b/autogpt/core/runner/cli_web_app/server/services/users.py
deleted file mode 100644
index 5192dcdb2..000000000
--- a/autogpt/core/runner/cli_web_app/server/services/users.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import uuid
-
-from fastapi import Request
-
-
-class UserService:
- def __init__(self):
- self.users = {}
-
- def get_user_id(self, request: Request) -> uuid.UUID:
- # TODO: something real. I don't know how this works.
- hostname = request.client.host
- port = request.client.port
- user = f"{hostname}:{port}"
- if user not in self.users:
- self.users[user] = uuid.uuid4()
- return self.users[user]
-
-
-USER_SERVICE = UserService()
diff --git a/autogpt/core/runner/client_lib/logging.py b/autogpt/core/runner/client_lib/logging.py
deleted file mode 100644
index 04f0a9c4b..000000000
--- a/autogpt/core/runner/client_lib/logging.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import logging
-
-
-def get_client_logger():
- # Configure logging before we do anything else.
- # Application logs need a place to live.
- client_logger = logging.getLogger("autogpt_client_application")
- client_logger.setLevel(logging.DEBUG)
-
- formatter = logging.Formatter(
- "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
- )
-
- ch = logging.StreamHandler()
- ch.setLevel(logging.DEBUG)
- ch.setFormatter(formatter)
-
- client_logger.addHandler(ch)
-
- return client_logger
diff --git a/autogpt/core/runner/client_lib/utils.py b/autogpt/core/runner/client_lib/utils.py
deleted file mode 100644
index 39b5135f0..000000000
--- a/autogpt/core/runner/client_lib/utils.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import asyncio
-import functools
-from bdb import BdbQuit
-from typing import Callable, ParamSpec, TypeVar
-
-import click
-
-P = ParamSpec("P")
-T = TypeVar("T")
-
-
-def handle_exceptions(
- application_main: Callable[P, T],
- with_debugger: bool,
-) -> Callable[P, T]:
- """Wraps a function so that it drops a user into a debugger if it raises an error.
-
- This is intended to be used as a wrapper for the main function of a CLI application.
- It will catch all errors and drop a user into a debugger if the error is not a
- KeyboardInterrupt. If the error is a KeyboardInterrupt, it will raise the error.
- If the error is not a KeyboardInterrupt, it will log the error and drop a user into a
- debugger if with_debugger is True. If with_debugger is False, it will raise the error.
-
- Parameters
- ----------
- application_main
- The function to wrap.
- with_debugger
- Whether to drop a user into a debugger if an error is raised.
-
- Returns
- -------
- Callable
- The wrapped function.
-
- """
-
- @functools.wraps(application_main)
- async def wrapped(*args: P.args, **kwargs: P.kwargs) -> T:
- try:
- return await application_main(*args, **kwargs)
- except (BdbQuit, KeyboardInterrupt, click.Abort):
- raise
- except Exception as e:
- if with_debugger:
- print(f"Uncaught exception {e}")
- import pdb
-
- pdb.post_mortem()
- else:
- raise
-
- return wrapped
-
-
-def coroutine(f):
- @functools.wraps(f)
- def wrapper(*args, **kwargs):
- return asyncio.run(f(*args, **kwargs))
-
- return wrapper
diff --git a/autogpt/core/workspace/__init__.py b/autogpt/core/workspace/__init__.py
deleted file mode 100644
index f474d4faa..000000000
--- a/autogpt/core/workspace/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""The workspace is the central hub for the Agent's on disk resources."""
-from autogpt.core.workspace.base import Workspace
-from autogpt.core.workspace.simple import SimpleWorkspace, WorkspaceSettings
diff --git a/autogpt/core/workspace/simple.py b/autogpt/core/workspace/simple.py
deleted file mode 100644
index 45e1f9dd6..000000000
--- a/autogpt/core/workspace/simple.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import json
-import logging
-import typing
-from pathlib import Path
-
-from pydantic import SecretField
-
-from autogpt.core.configuration import (
- Configurable,
- SystemConfiguration,
- SystemSettings,
- UserConfigurable,
-)
-from autogpt.core.workspace.base import Workspace
-
-if typing.TYPE_CHECKING:
- # Cyclic import
- from autogpt.core.agent.simple import AgentSettings
-
-
-class WorkspaceConfiguration(SystemConfiguration):
- root: str
- parent: str = UserConfigurable()
- restrict_to_workspace: bool = UserConfigurable()
-
-
-class WorkspaceSettings(SystemSettings):
- configuration: WorkspaceConfiguration
-
-
-class SimpleWorkspace(Configurable, Workspace):
- default_settings = WorkspaceSettings(
- name="workspace",
- description="The workspace is the root directory for all agent activity.",
- configuration=WorkspaceConfiguration(
- root="",
- parent="~/auto-gpt/agents",
- restrict_to_workspace=True,
- ),
- )
-
- NULL_BYTES = ["\0", "\000", "\x00", "\u0000", "%00"]
-
- def __init__(
- self,
- settings: WorkspaceSettings,
- logger: logging.Logger,
- ):
- self._configuration = settings.configuration
- self._logger = logger.getChild("workspace")
-
- @property
- def root(self) -> Path:
- return Path(self._configuration.root)
-
- @property
- def debug_log_path(self) -> Path:
- return self.root / "logs" / "debug.log"
-
- @property
- def cycle_log_path(self) -> Path:
- return self.root / "logs" / "cycle.log"
-
- @property
- def configuration_path(self) -> Path:
- return self.root / "configuration.yml"
-
- @property
- def restrict_to_workspace(self) -> bool:
- return self._configuration.restrict_to_workspace
-
- def get_path(self, relative_path: str | Path) -> Path:
- """Get the full path for an item in the workspace.
-
- Parameters
- ----------
- relative_path
- The relative path to resolve in the workspace.
-
- Returns
- -------
- Path
- The resolved path relative to the workspace.
-
- """
- return self._sanitize_path(
- relative_path,
- root=self.root,
- restrict_to_root=self.restrict_to_workspace,
- )
-
- def _sanitize_path(
- self,
- relative_path: str | Path,
- root: str | Path = None,
- restrict_to_root: bool = True,
- ) -> Path:
- """Resolve the relative path within the given root if possible.
-
- Parameters
- ----------
- relative_path
- The relative path to resolve.
- root
- The root path to resolve the relative path within.
- restrict_to_root
- Whether to restrict the path to the root.
-
- Returns
- -------
- Path
- The resolved path.
-
- Raises
- ------
- ValueError
- If the path is absolute and a root is provided.
- ValueError
- If the path is outside the root and the root is restricted.
-
- """
-
- # Posix systems disallow null bytes in paths. Windows is agnostic about it.
- # Do an explicit check here for all sorts of null byte representations.
-
- for null_byte in self.NULL_BYTES:
- if null_byte in str(relative_path) or null_byte in str(root):
- raise ValueError("embedded null byte")
-
- if root is None:
- return Path(relative_path).resolve()
-
- self._logger.debug(f"Resolving path '{relative_path}' in workspace '{root}'")
- root, relative_path = Path(root).resolve(), Path(relative_path)
- self._logger.debug(f"Resolved root as '{root}'")
-
- if relative_path.is_absolute():
- raise ValueError(
- f"Attempted to access absolute path '{relative_path}' in workspace '{root}'."
- )
- full_path = root.joinpath(relative_path).resolve()
-
- self._logger.debug(f"Joined paths as '{full_path}'")
-
- if restrict_to_root and not full_path.is_relative_to(root):
- raise ValueError(
- f"Attempted to access path '{full_path}' outside of workspace '{root}'."
- )
-
- return full_path
-
- ###################################
- # Factory methods for agent setup #
- ###################################
-
- @staticmethod
- def setup_workspace(settings: "AgentSettings", logger: logging.Logger) -> Path:
- workspace_parent = settings.workspace.configuration.parent
- workspace_parent = Path(workspace_parent).expanduser().resolve()
- workspace_parent.mkdir(parents=True, exist_ok=True)
-
- agent_name = settings.agent.name
-
- workspace_root = workspace_parent / agent_name
- workspace_root.mkdir(parents=True, exist_ok=True)
-
- settings.workspace.configuration.root = str(workspace_root)
-
- with (workspace_root / "agent_settings.json").open("w") as f:
- settings_json = settings.json(
- encoder=lambda x: x.get_secret_value()
- if isinstance(x, SecretField)
- else x,
- )
- f.write(settings_json)
-
- # TODO: What are all the kinds of logs we want here?
- log_path = workspace_root / "logs"
- log_path.mkdir(parents=True, exist_ok=True)
- (log_path / "debug.log").touch()
- (log_path / "cycle.log").touch()
-
- return workspace_root
-
- @staticmethod
- def load_agent_settings(workspace_root: Path) -> "AgentSettings":
- # Cyclic import
- from autogpt.core.agent.simple import AgentSettings
-
- with (workspace_root / "agent_settings.json").open("r") as f:
- agent_settings = json.load(f)
-
- return AgentSettings.parse_obj(agent_settings)
diff --git a/autogpt/js/overlay.js b/autogpt/js/overlay.js
deleted file mode 100644
index 1c99c7267..000000000
--- a/autogpt/js/overlay.js
+++ /dev/null
@@ -1,29 +0,0 @@
-const overlay = document.createElement('div');
-Object.assign(overlay.style, {
- position: 'fixed',
- zIndex: 999999,
- top: 0,
- left: 0,
- width: '100%',
- height: '100%',
- background: 'rgba(0, 0, 0, 0.7)',
- color: '#fff',
- fontSize: '24px',
- fontWeight: 'bold',
- display: 'flex',
- justifyContent: 'center',
- alignItems: 'center',
-});
-const textContent = document.createElement('div');
-Object.assign(textContent.style, {
- textAlign: 'center',
-});
-textContent.textContent = 'AutoGPT Analyzing Page';
-overlay.appendChild(textContent);
-document.body.append(overlay);
-document.body.style.overflow = 'hidden';
-let dotCount = 0;
-setInterval(() => {
- textContent.textContent = 'AutoGPT Analyzing Page' + '.'.repeat(dotCount);
- dotCount = (dotCount + 1) % 4;
-}, 1000);
diff --git a/autogpt/json_utils/llm_response_format_1.json b/autogpt/json_utils/llm_response_format_1.json
deleted file mode 100644
index 17101dda1..000000000
--- a/autogpt/json_utils/llm_response_format_1.json
+++ /dev/null
@@ -1,45 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "type": "object",
- "properties": {
- "thoughts": {
- "type": "object",
- "properties": {
- "text": {
- "type": "string",
- "description": "thoughts"
- },
- "reasoning": {
- "type": "string"
- },
- "plan": {
- "type": "string",
- "description": "- short bulleted\n- list that conveys\n- long-term plan"
- },
- "criticism": {
- "type": "string",
- "description": "constructive self-criticism"
- },
- "speak": {
- "type": "string",
- "description": "thoughts summary to say to user"
- }
- },
- "required": ["text", "reasoning", "plan", "criticism", "speak"],
- "additionalProperties": false
- },
- "command": {
- "type": "object",
- "properties": {
- "name": {"type": "string"},
- "args": {
- "type": "object"
- }
- },
- "required": ["name", "args"],
- "additionalProperties": false
- }
- },
- "required": ["thoughts", "command"],
- "additionalProperties": false
-}
diff --git a/autogpt/json_utils/utilities.py b/autogpt/json_utils/utilities.py
deleted file mode 100644
index 7162abc58..000000000
--- a/autogpt/json_utils/utilities.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""Utilities for the json_fixes package."""
-import ast
-import json
-import os.path
-from typing import Any
-
-from jsonschema import Draft7Validator
-
-from autogpt.config import Config
-from autogpt.logs import logger
-
-LLM_DEFAULT_RESPONSE_FORMAT = "llm_response_format_1"
-
-
-def extract_json_from_response(response_content: str) -> dict:
- # Sometimes the response includes the JSON in a code block with ```
- if response_content.startswith("```") and response_content.endswith("```"):
- # Discard the first and last ```, then re-join in case the response naturally included ```
- response_content = "```".join(response_content.split("```")[1:-1])
-
- # response content comes from OpenAI as a Python `str(content_dict)`, literal_eval reverses this
- try:
- return ast.literal_eval(response_content)
- except BaseException as e:
- logger.info(f"Error parsing JSON response with literal_eval {e}")
- logger.debug(f"Invalid JSON received in response: {response_content}")
- # TODO: How to raise an error here without causing the program to exit?
- return {}
-
-
-def llm_response_schema(
- config: Config, schema_name: str = LLM_DEFAULT_RESPONSE_FORMAT
-) -> dict[str, Any]:
- filename = os.path.join(os.path.dirname(__file__), f"{schema_name}.json")
- with open(filename, "r") as f:
- json_schema = json.load(f)
- if config.openai_functions:
- del json_schema["properties"]["command"]
- json_schema["required"].remove("command")
- return json_schema
-
-
-def validate_json(
- json_object: object, config: Config, schema_name: str = LLM_DEFAULT_RESPONSE_FORMAT
-) -> bool:
- """
- :type schema_name: object
- :param schema_name: str
- :type json_object: object
-
- Returns:
- bool: Whether the json_object is valid or not
- """
- schema = llm_response_schema(config, schema_name)
- validator = Draft7Validator(schema)
-
- if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path):
- for error in errors:
- logger.debug(f"JSON Validation Error: {error}")
-
- if config.debug_mode:
- logger.error(
- json.dumps(json_object, indent=4)
- ) # Replace 'json_object' with the variable containing the JSON data
- logger.error("The following issues were found:")
-
- for error in errors:
- logger.error(f"Error: {error.message}")
- return False
-
- logger.debug("The JSON object is valid.")
-
- return True
diff --git a/autogpt/llm/__init__.py b/autogpt/llm/__init__.py
deleted file mode 100644
index 22a743c06..000000000
--- a/autogpt/llm/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from autogpt.llm.base import (
- ChatModelInfo,
- ChatModelResponse,
- EmbeddingModelInfo,
- EmbeddingModelResponse,
- LLMResponse,
- Message,
- ModelInfo,
-)
-
-__all__ = [
- "Message",
- "ModelInfo",
- "ChatModelInfo",
- "EmbeddingModelInfo",
- "LLMResponse",
- "ChatModelResponse",
- "EmbeddingModelResponse",
-]
diff --git a/autogpt/llm/api_manager.py b/autogpt/llm/api_manager.py
deleted file mode 100644
index 04e67db64..000000000
--- a/autogpt/llm/api_manager.py
+++ /dev/null
@@ -1,110 +0,0 @@
-from __future__ import annotations
-
-from typing import List, Optional
-
-import openai
-from openai import Model
-
-from autogpt.llm.base import CompletionModelInfo
-from autogpt.logs import logger
-from autogpt.singleton import Singleton
-
-
-class ApiManager(metaclass=Singleton):
- def __init__(self):
- self.total_prompt_tokens = 0
- self.total_completion_tokens = 0
- self.total_cost = 0
- self.total_budget = 0
- self.models: Optional[list[Model]] = None
-
- def reset(self):
- self.total_prompt_tokens = 0
- self.total_completion_tokens = 0
- self.total_cost = 0
- self.total_budget = 0.0
- self.models = None
-
- def update_cost(self, prompt_tokens, completion_tokens, model):
- """
- Update the total cost, prompt tokens, and completion tokens.
-
- Args:
- prompt_tokens (int): The number of tokens used in the prompt.
- completion_tokens (int): The number of tokens used in the completion.
- model (str): The model used for the API call.
- """
- # the .model property in API responses can contain version suffixes like -v2
- from autogpt.llm.providers.openai import OPEN_AI_MODELS
-
- model = model[:-3] if model.endswith("-v2") else model
- model_info = OPEN_AI_MODELS[model]
-
- self.total_prompt_tokens += prompt_tokens
- self.total_completion_tokens += completion_tokens
- self.total_cost += prompt_tokens * model_info.prompt_token_cost / 1000
- if issubclass(type(model_info), CompletionModelInfo):
- self.total_cost += (
- completion_tokens * model_info.completion_token_cost / 1000
- )
-
- logger.debug(f"Total running cost: ${self.total_cost:.3f}")
-
- def set_total_budget(self, total_budget):
- """
- Sets the total user-defined budget for API calls.
-
- Args:
- total_budget (float): The total budget for API calls.
- """
- self.total_budget = total_budget
-
- def get_total_prompt_tokens(self):
- """
- Get the total number of prompt tokens.
-
- Returns:
- int: The total number of prompt tokens.
- """
- return self.total_prompt_tokens
-
- def get_total_completion_tokens(self):
- """
- Get the total number of completion tokens.
-
- Returns:
- int: The total number of completion tokens.
- """
- return self.total_completion_tokens
-
- def get_total_cost(self):
- """
- Get the total cost of API calls.
-
- Returns:
- float: The total cost of API calls.
- """
- return self.total_cost
-
- def get_total_budget(self):
- """
- Get the total user-defined budget for API calls.
-
- Returns:
- float: The total budget for API calls.
- """
- return self.total_budget
-
- def get_models(self, **openai_credentials) -> List[Model]:
- """
- Get list of available GPT models.
-
- Returns:
- list: List of available GPT models.
-
- """
- if self.models is None:
- all_models = openai.Model.list(**openai_credentials)["data"]
- self.models = [model for model in all_models if "gpt" in model["id"]]
-
- return self.models
diff --git a/autogpt/llm/base.py b/autogpt/llm/base.py
deleted file mode 100644
index 14a146b3c..000000000
--- a/autogpt/llm/base.py
+++ /dev/null
@@ -1,195 +0,0 @@
-from __future__ import annotations
-
-from copy import deepcopy
-from dataclasses import dataclass, field
-from math import ceil, floor
-from typing import TYPE_CHECKING, Literal, Optional, Type, TypedDict, TypeVar, overload
-
-if TYPE_CHECKING:
- from autogpt.llm.providers.openai import OpenAIFunctionCall
-
-MessageRole = Literal["system", "user", "assistant", "function"]
-MessageType = Literal["ai_response", "action_result"]
-
-TText = list[int]
-"""Token array representing tokenized text"""
-
-
-class MessageDict(TypedDict):
- role: MessageRole
- content: str
-
-
-class ResponseMessageDict(TypedDict):
- role: Literal["assistant"]
- content: Optional[str]
- function_call: Optional[FunctionCallDict]
-
-
-class FunctionCallDict(TypedDict):
- name: str
- arguments: str
-
-
-@dataclass
-class Message:
- """OpenAI Message object containing a role and the message content"""
-
- role: MessageRole
- content: str
- type: MessageType | None = None
-
- def raw(self) -> MessageDict:
- return {"role": self.role, "content": self.content}
-
-
-@dataclass
-class ModelInfo:
- """Struct for model information.
-
- Would be lovely to eventually get this directly from APIs, but needs to be scraped from
- websites for now.
- """
-
- name: str
- max_tokens: int
- prompt_token_cost: float
-
-
-@dataclass
-class CompletionModelInfo(ModelInfo):
- """Struct for generic completion model information."""
-
- completion_token_cost: float
-
-
-@dataclass
-class ChatModelInfo(CompletionModelInfo):
- """Struct for chat model information."""
-
-
-@dataclass
-class TextModelInfo(CompletionModelInfo):
- """Struct for text completion model information."""
-
-
-@dataclass
-class EmbeddingModelInfo(ModelInfo):
- """Struct for embedding model information."""
-
- embedding_dimensions: int
-
-
-# Can be replaced by Self in Python 3.11
-TChatSequence = TypeVar("TChatSequence", bound="ChatSequence")
-
-
-@dataclass
-class ChatSequence:
- """Utility container for a chat sequence"""
-
- model: ChatModelInfo
- messages: list[Message] = field(default_factory=list[Message])
-
- @overload
- def __getitem__(self, key: int) -> Message:
- ...
-
- @overload
- def __getitem__(self: TChatSequence, key: slice) -> TChatSequence:
- ...
-
- def __getitem__(self: TChatSequence, key: int | slice) -> Message | TChatSequence:
- if isinstance(key, slice):
- copy = deepcopy(self)
- copy.messages = self.messages[key]
- return copy
- return self.messages[key]
-
- def __iter__(self):
- return iter(self.messages)
-
- def __len__(self):
- return len(self.messages)
-
- def add(
- self,
- message_role: MessageRole,
- content: str,
- type: MessageType | None = None,
- ) -> None:
- self.append(Message(message_role, content, type))
-
- def append(self, message: Message):
- return self.messages.append(message)
-
- def extend(self, messages: list[Message] | ChatSequence):
- return self.messages.extend(messages)
-
- def insert(self, index: int, *messages: Message):
- for message in reversed(messages):
- self.messages.insert(index, message)
-
- @classmethod
- def for_model(
- cls: Type[TChatSequence],
- model_name: str,
- messages: list[Message] | ChatSequence = [],
- **kwargs,
- ) -> TChatSequence:
- from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
-
- if not model_name in OPEN_AI_CHAT_MODELS:
- raise ValueError(f"Unknown chat model '{model_name}'")
-
- return cls(
- model=OPEN_AI_CHAT_MODELS[model_name], messages=list(messages), **kwargs
- )
-
- @property
- def token_length(self) -> int:
- from autogpt.llm.utils import count_message_tokens
-
- return count_message_tokens(self.messages, self.model.name)
-
- def raw(self) -> list[MessageDict]:
- return [m.raw() for m in self.messages]
-
- def dump(self) -> str:
- SEPARATOR_LENGTH = 42
-
- def separator(text: str):
- half_sep_len = (SEPARATOR_LENGTH - 2 - len(text)) / 2
- return f"{floor(half_sep_len)*'-'} {text.upper()} {ceil(half_sep_len)*'-'}"
-
- formatted_messages = "\n".join(
- [f"{separator(m.role)}\n{m.content}" for m in self.messages]
- )
- return f"""
-============== {__class__.__name__} ==============
-Length: {self.token_length} tokens; {len(self.messages)} messages
-{formatted_messages}
-==========================================
-"""
-
-
-@dataclass
-class LLMResponse:
- """Standard response struct for a response from an LLM model."""
-
- model_info: ModelInfo
-
-
-@dataclass
-class EmbeddingModelResponse(LLMResponse):
- """Standard response struct for a response from an embedding model."""
-
- embedding: list[float] = field(default_factory=list)
-
-
-@dataclass
-class ChatModelResponse(LLMResponse):
- """Standard response struct for a response from a chat LLM."""
-
- content: Optional[str]
- function_call: Optional[OpenAIFunctionCall]
diff --git a/autogpt/llm/chat.py b/autogpt/llm/chat.py
deleted file mode 100644
index 4364cb1d8..000000000
--- a/autogpt/llm/chat.py
+++ /dev/null
@@ -1,203 +0,0 @@
-from __future__ import annotations
-
-import time
-from typing import TYPE_CHECKING
-
-if TYPE_CHECKING:
- from autogpt.agent.agent import Agent
-
-from autogpt.config import Config
-from autogpt.llm.api_manager import ApiManager
-from autogpt.llm.base import ChatSequence, Message
-from autogpt.llm.providers.openai import (
- count_openai_functions_tokens,
- get_openai_command_specs,
-)
-from autogpt.llm.utils import count_message_tokens, create_chat_completion
-from autogpt.logs import CURRENT_CONTEXT_FILE_NAME, logger
-
-
-# TODO: Change debug from hardcode to argument
-def chat_with_ai(
- config: Config,
- agent: Agent,
- system_prompt: str,
- triggering_prompt: str,
- token_limit: int,
- model: str | None = None,
-):
- """
- Interact with the OpenAI API, sending the prompt, user input,
- message history, and permanent memory.
-
- Args:
- config (Config): The config to use.
- agent (Agent): The agent to use.
- system_prompt (str): The prompt explaining the rules to the AI.
- triggering_prompt (str): The input from the user.
- token_limit (int): The maximum number of tokens allowed in the API call.
- model (str, optional): The model to use. By default, the config.smart_llm will be used.
-
- Returns:
- str: The AI's response.
- """
- if model is None:
- model = config.smart_llm
-
- # Reserve 1000 tokens for the response
- logger.debug(f"Token limit: {token_limit}")
- send_token_limit = token_limit - 1000
-
- # if len(agent.history) == 0:
- # relevant_memory = ""
- # else:
- # recent_history = agent.history[-5:]
- # shuffle(recent_history)
- # relevant_memories = agent.memory.get_relevant(
- # str(recent_history), 5
- # )
- # if relevant_memories:
- # shuffle(relevant_memories)
- # relevant_memory = str(relevant_memories)
- # logger.debug(f"Memory Stats: {agent.memory.get_stats()}")
- relevant_memory = []
-
- message_sequence = ChatSequence.for_model(
- model,
- [
- Message("system", system_prompt),
- Message("system", f"The current time and date is {time.strftime('%c')}"),
- # Message(
- # "system",
- # f"This reminds you of these events from your past:\n{relevant_memory}\n\n",
- # ),
- ],
- )
-
- # Count the currently used tokens
- current_tokens_used = message_sequence.token_length
- insertion_index = len(message_sequence)
-
- # Account for tokens used by OpenAI functions
- openai_functions = None
- if agent.config.openai_functions:
- openai_functions = get_openai_command_specs(agent.command_registry)
- functions_tlength = count_openai_functions_tokens(openai_functions, model)
- current_tokens_used += functions_tlength
- logger.debug(f"OpenAI Functions take up {functions_tlength} tokens in API call")
-
- # Account for user input (appended later)
- user_input_msg = Message("user", triggering_prompt)
- current_tokens_used += count_message_tokens(user_input_msg, model)
-
- current_tokens_used += agent.history.max_summary_tlength # Reserve space
- current_tokens_used += 500 # Reserve space for the openai functions TODO improve
-
- # Add historical Messages until the token limit is reached
- # or there are no more messages to add.
- for cycle in reversed(list(agent.history.per_cycle())):
- messages_to_add = [msg for msg in cycle if msg is not None]
- tokens_to_add = count_message_tokens(messages_to_add, model)
- if current_tokens_used + tokens_to_add > send_token_limit:
- break
-
- # Add the most recent message to the start of the chain,
- # after the system prompts.
- message_sequence.insert(insertion_index, *messages_to_add)
- current_tokens_used += tokens_to_add
-
- # Update & add summary of trimmed messages
- if len(agent.history) > 0:
- new_summary_message, trimmed_messages = agent.history.trim_messages(
- current_message_chain=list(message_sequence), config=agent.config
- )
- tokens_to_add = count_message_tokens(new_summary_message, model)
- message_sequence.insert(insertion_index, new_summary_message)
- current_tokens_used += tokens_to_add - agent.history.max_summary_tlength
-
- # FIXME: uncomment when memory is back in use
- # memory_store = get_memory(config)
- # for _, ai_msg, result_msg in agent.history.per_cycle(trimmed_messages):
- # memory_to_add = MemoryItem.from_ai_action(ai_msg, result_msg)
- # logger.debug(f"Storing the following memory:\n{memory_to_add.dump()}")
- # memory_store.add(memory_to_add)
-
- api_manager = ApiManager()
- # inform the AI about its remaining budget (if it has one)
- if api_manager.get_total_budget() > 0.0:
- remaining_budget = api_manager.get_total_budget() - api_manager.get_total_cost()
- if remaining_budget < 0:
- remaining_budget = 0
- budget_message = f"Your remaining API budget is ${remaining_budget:.3f}" + (
- " BUDGET EXCEEDED! SHUT DOWN!\n\n"
- if remaining_budget == 0
- else " Budget very nearly exceeded! Shut down gracefully!\n\n"
- if remaining_budget < 0.005
- else " Budget nearly exceeded. Finish up.\n\n"
- if remaining_budget < 0.01
- else "\n\n"
- )
- logger.debug(budget_message)
- message_sequence.add("system", budget_message)
- current_tokens_used += count_message_tokens(message_sequence[-1], model)
-
- # Append user input, the length of this is accounted for above
- message_sequence.append(user_input_msg)
-
- plugin_count = len(config.plugins)
- for i, plugin in enumerate(config.plugins):
- if not plugin.can_handle_on_planning():
- continue
- plugin_response = plugin.on_planning(
- agent.ai_config.prompt_generator, message_sequence.raw()
- )
- if not plugin_response or plugin_response == "":
- continue
- tokens_to_add = count_message_tokens(Message("system", plugin_response), model)
- if current_tokens_used + tokens_to_add > send_token_limit:
- logger.debug(f"Plugin response too long, skipping: {plugin_response}")
- logger.debug(f"Plugins remaining at stop: {plugin_count - i}")
- break
- message_sequence.add("system", plugin_response)
- current_tokens_used += tokens_to_add
-
- # Calculate remaining tokens
- tokens_remaining = token_limit - current_tokens_used
- # assert tokens_remaining >= 0, "Tokens remaining is negative.
- # This should never happen, please submit a bug report at
- # https://www.github.com/Torantulino/Auto-GPT"
-
- # Debug print the current context
- logger.debug(f"Token limit: {token_limit}")
- logger.debug(f"Send Token Count: {current_tokens_used}")
- logger.debug(f"Tokens remaining for response: {tokens_remaining}")
- logger.debug("------------ CONTEXT SENT TO AI ---------------")
- for message in message_sequence:
- # Skip printing the prompt
- if message.role == "system" and message.content == system_prompt:
- continue
- logger.debug(f"{message.role.capitalize()}: {message.content}")
- logger.debug("")
- logger.debug("----------- END OF CONTEXT ----------------")
- agent.log_cycle_handler.log_cycle(
- agent.ai_name,
- agent.created_at,
- agent.cycle_count,
- message_sequence.raw(),
- CURRENT_CONTEXT_FILE_NAME,
- )
-
- # TODO: use a model defined elsewhere, so that model can contain
- # temperature and other settings we care about
- assistant_reply = create_chat_completion(
- prompt=message_sequence,
- config=agent.config,
- functions=openai_functions,
- max_tokens=tokens_remaining,
- )
-
- # Update full message history
- agent.history.append(user_input_msg)
- agent.history.add("assistant", assistant_reply.content, "ai_response")
-
- return assistant_reply
diff --git a/autogpt/llm/providers/openai.py b/autogpt/llm/providers/openai.py
deleted file mode 100644
index f00a1f28b..000000000
--- a/autogpt/llm/providers/openai.py
+++ /dev/null
@@ -1,429 +0,0 @@
-from __future__ import annotations
-
-import functools
-import time
-from dataclasses import dataclass
-from typing import Callable, List, Optional
-from unittest.mock import patch
-
-import openai
-import openai.api_resources.abstract.engine_api_resource as engine_api_resource
-from colorama import Fore, Style
-from openai.error import APIError, RateLimitError, ServiceUnavailableError, Timeout
-from openai.openai_object import OpenAIObject
-
-from autogpt.llm.base import (
- ChatModelInfo,
- EmbeddingModelInfo,
- MessageDict,
- TextModelInfo,
- TText,
-)
-from autogpt.logs import logger
-from autogpt.models.command_registry import CommandRegistry
-
-OPEN_AI_CHAT_MODELS = {
- info.name: info
- for info in [
- ChatModelInfo(
- name="gpt-3.5-turbo-0301",
- prompt_token_cost=0.0015,
- completion_token_cost=0.002,
- max_tokens=4096,
- ),
- ChatModelInfo(
- name="gpt-3.5-turbo-0613",
- prompt_token_cost=0.0015,
- completion_token_cost=0.002,
- max_tokens=4096,
- ),
- ChatModelInfo(
- name="gpt-3.5-turbo-16k-0613",
- prompt_token_cost=0.003,
- completion_token_cost=0.004,
- max_tokens=16384,
- ),
- ChatModelInfo(
- name="gpt-4-0314",
- prompt_token_cost=0.03,
- completion_token_cost=0.06,
- max_tokens=8192,
- ),
- ChatModelInfo(
- name="gpt-4-0613",
- prompt_token_cost=0.03,
- completion_token_cost=0.06,
- max_tokens=8192,
- ),
- ChatModelInfo(
- name="gpt-4-32k-0314",
- prompt_token_cost=0.06,
- completion_token_cost=0.12,
- max_tokens=32768,
- ),
- ChatModelInfo(
- name="gpt-4-32k-0613",
- prompt_token_cost=0.06,
- completion_token_cost=0.12,
- max_tokens=32768,
- ),
- ]
-}
-# Set aliases for rolling model IDs
-chat_model_mapping = {
- "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo-16k": "gpt-3.5-turbo-16k-0613",
- "gpt-4": "gpt-4-0613",
- "gpt-4-32k": "gpt-4-32k-0613",
-}
-for alias, target in chat_model_mapping.items():
- alias_info = ChatModelInfo(**OPEN_AI_CHAT_MODELS[target].__dict__)
- alias_info.name = alias
- OPEN_AI_CHAT_MODELS[alias] = alias_info
-
-OPEN_AI_TEXT_MODELS = {
- info.name: info
- for info in [
- TextModelInfo(
- name="text-davinci-003",
- prompt_token_cost=0.02,
- completion_token_cost=0.02,
- max_tokens=4097,
- ),
- ]
-}
-
-OPEN_AI_EMBEDDING_MODELS = {
- info.name: info
- for info in [
- EmbeddingModelInfo(
- name="text-embedding-ada-002",
- prompt_token_cost=0.0001,
- max_tokens=8191,
- embedding_dimensions=1536,
- ),
- ]
-}
-
-OPEN_AI_MODELS: dict[str, ChatModelInfo | EmbeddingModelInfo | TextModelInfo] = {
- **OPEN_AI_CHAT_MODELS,
- **OPEN_AI_TEXT_MODELS,
- **OPEN_AI_EMBEDDING_MODELS,
-}
-
-
-def meter_api(func: Callable):
- """Adds ApiManager metering to functions which make OpenAI API calls"""
- from autogpt.llm.api_manager import ApiManager
-
- api_manager = ApiManager()
-
- openai_obj_processor = openai.util.convert_to_openai_object
-
- def update_usage_with_response(response: OpenAIObject):
- try:
- usage = response.usage
- logger.debug(f"Reported usage from call to model {response.model}: {usage}")
- api_manager.update_cost(
- response.usage.prompt_tokens,
- response.usage.completion_tokens if "completion_tokens" in usage else 0,
- response.model,
- )
- except Exception as err:
- logger.warn(f"Failed to update API costs: {err.__class__.__name__}: {err}")
-
- def metering_wrapper(*args, **kwargs):
- openai_obj = openai_obj_processor(*args, **kwargs)
- if isinstance(openai_obj, OpenAIObject) and "usage" in openai_obj:
- update_usage_with_response(openai_obj)
- return openai_obj
-
- def metered_func(*args, **kwargs):
- with patch.object(
- engine_api_resource.util,
- "convert_to_openai_object",
- side_effect=metering_wrapper,
- ):
- return func(*args, **kwargs)
-
- return metered_func
-
-
-def retry_api(
- max_retries: int = 10,
- backoff_base: float = 2.0,
- warn_user: bool = True,
-):
- """Retry an OpenAI API call.
-
- Args:
- num_retries int: Number of retries. Defaults to 10.
- backoff_base float: Base for exponential backoff. Defaults to 2.
- warn_user bool: Whether to warn the user. Defaults to True.
- """
- error_messages = {
- ServiceUnavailableError: f"{Fore.RED}Error: The OpenAI API engine is currently overloaded{Fore.RESET}",
- RateLimitError: f"{Fore.RED}Error: Reached rate limit{Fore.RESET}",
- }
- api_key_error_msg = (
- f"Please double check that you have setup a "
- f"{Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. You can "
- f"read more here: {Fore.CYAN}https://docs.agpt.co/setup/#getting-an-api-key{Fore.RESET}"
- )
- backoff_msg = f"{Fore.RED}Waiting {{backoff}} seconds...{Fore.RESET}"
-
- def _wrapper(func: Callable):
- @functools.wraps(func)
- def _wrapped(*args, **kwargs):
- user_warned = not warn_user
- max_attempts = max_retries + 1 # +1 for the first attempt
- for attempt in range(1, max_attempts + 1):
- try:
- return func(*args, **kwargs)
-
- except (RateLimitError, ServiceUnavailableError) as e:
- if attempt >= max_attempts or (
- # User's API quota exceeded
- isinstance(e, RateLimitError)
- and (err := getattr(e, "error", {}))
- and err.get("code") == "insufficient_quota"
- ):
- raise
-
- error_msg = error_messages[type(e)]
- logger.warn(error_msg)
- if not user_warned:
- logger.double_check(api_key_error_msg)
- logger.debug(f"Status: {e.http_status}")
- logger.debug(f"Response body: {e.json_body}")
- logger.debug(f"Response headers: {e.headers}")
- user_warned = True
-
- except (APIError, Timeout) as e:
- if (e.http_status not in [429, 502]) or (attempt == max_attempts):
- raise
-
- backoff = backoff_base ** (attempt + 2)
- logger.warn(backoff_msg.format(backoff=backoff))
- time.sleep(backoff)
-
- return _wrapped
-
- return _wrapper
-
-
-@meter_api
-@retry_api()
-def create_chat_completion(
- messages: List[MessageDict],
- *_,
- **kwargs,
-) -> OpenAIObject:
- """Create a chat completion using the OpenAI API
-
- Args:
- messages: A list of messages to feed to the chatbot.
- kwargs: Other arguments to pass to the OpenAI API chat completion call.
- Returns:
- OpenAIObject: The ChatCompletion response from OpenAI
-
- """
- completion: OpenAIObject = openai.ChatCompletion.create(
- messages=messages,
- **kwargs,
- )
- if not hasattr(completion, "error"):
- logger.debug(f"Response: {completion}")
- return completion
-
-
-@meter_api
-@retry_api()
-def create_text_completion(
- prompt: str,
- *_,
- **kwargs,
-) -> OpenAIObject:
- """Create a text completion using the OpenAI API
-
- Args:
- prompt: A text prompt to feed to the LLM
- kwargs: Other arguments to pass to the OpenAI API text completion call.
- Returns:
- OpenAIObject: The Completion response from OpenAI
-
- """
- return openai.Completion.create(
- prompt=prompt,
- **kwargs,
- )
-
-
-@meter_api
-@retry_api()
-def create_embedding(
- input: str | TText | List[str] | List[TText],
- *_,
- **kwargs,
-) -> OpenAIObject:
- """Create an embedding using the OpenAI API
-
- Args:
- input: The text to embed.
- kwargs: Other arguments to pass to the OpenAI API embedding call.
- Returns:
- OpenAIObject: The Embedding response from OpenAI
-
- """
- return openai.Embedding.create(
- input=input,
- **kwargs,
- )
-
-
-@dataclass
-class OpenAIFunctionCall:
- """Represents a function call as generated by an OpenAI model
-
- Attributes:
- name: the name of the function that the LLM wants to call
- arguments: a stringified JSON object (unverified) containing `arg: value` pairs
- """
-
- name: str
- arguments: str
-
-
-@dataclass
-class OpenAIFunctionSpec:
- """Represents a "function" in OpenAI, which is mapped to a Command in Auto-GPT"""
-
- name: str
- description: str
- parameters: dict[str, ParameterSpec]
-
- @dataclass
- class ParameterSpec:
- name: str
- type: str # TODO: add enum support
- description: Optional[str]
- required: bool = False
-
- @property
- def schema(self) -> dict[str, str | dict | list]:
- """Returns an OpenAI-consumable function specification"""
- return {
- "name": self.name,
- "description": self.description,
- "parameters": {
- "type": "object",
- "properties": {
- param.name: {
- "type": param.type,
- "description": param.description,
- }
- for param in self.parameters.values()
- },
- "required": [
- param.name for param in self.parameters.values() if param.required
- ],
- },
- }
-
- @property
- def prompt_format(self) -> str:
- """Returns the function formatted similarly to the way OpenAI does it internally:
- https://community.openai.com/t/how-to-calculate-the-tokens-when-using-function-call/266573/18
-
- Example:
- ```ts
- // Get the current weather in a given location
- type get_current_weather = (_: {
- // The city and state, e.g. San Francisco, CA
- location: string,
- unit?: "celsius" | "fahrenheit",
- }) => any;
- ```
- """
-
- def param_signature(p_spec: OpenAIFunctionSpec.ParameterSpec) -> str:
- # TODO: enum type support
- return (
- f"// {p_spec.description}\n" if p_spec.description else ""
- ) + f"{p_spec.name}{'' if p_spec.required else '?'}: {p_spec.type},"
-
- return "\n".join(
- [
- f"// {self.description}",
- f"type {self.name} = (_ :{{",
- *[param_signature(p) for p in self.parameters.values()],
- "}) => any;",
- ]
- )
-
-
-def get_openai_command_specs(
- command_registry: CommandRegistry,
-) -> list[OpenAIFunctionSpec]:
- """Get OpenAI-consumable function specs for the agent's available commands.
- see https://platform.openai.com/docs/guides/gpt/function-calling
- """
- return [
- OpenAIFunctionSpec(
- name=command.name,
- description=command.description,
- parameters={
- param.name: OpenAIFunctionSpec.ParameterSpec(
- name=param.name,
- type=param.type,
- required=param.required,
- description=param.description,
- )
- for param in command.parameters
- },
- )
- for command in command_registry.commands.values()
- ]
-
-
-def count_openai_functions_tokens(
- functions: list[OpenAIFunctionSpec], for_model: str
-) -> int:
- """Returns the number of tokens taken up by a set of function definitions
-
- Reference: https://community.openai.com/t/how-to-calculate-the-tokens-when-using-function-call/266573/18
- """
- from autogpt.llm.utils import count_string_tokens
-
- return count_string_tokens(
- f"# Tools\n\n## functions\n\n{format_function_specs_as_typescript_ns(functions)}",
- for_model,
- )
-
-
-def format_function_specs_as_typescript_ns(functions: list[OpenAIFunctionSpec]) -> str:
- """Returns a function signature block in the format used by OpenAI internally:
- https://community.openai.com/t/how-to-calculate-the-tokens-when-using-function-call/266573/18
-
- For use with `count_string_tokens` to determine token usage of provided functions.
-
- Example:
- ```ts
- namespace functions {
-
- // Get the current weather in a given location
- type get_current_weather = (_: {
- // The city and state, e.g. San Francisco, CA
- location: string,
- unit?: "celsius" | "fahrenheit",
- }) => any;
-
- } // namespace functions
- ```
- """
-
- return (
- "namespace functions {\n\n"
- + "\n\n".join(f.prompt_format for f in functions)
- + "\n\n} // namespace functions"
- )
diff --git a/autogpt/llm/utils/__init__.py b/autogpt/llm/utils/__init__.py
deleted file mode 100644
index 74e88dc67..000000000
--- a/autogpt/llm/utils/__init__.py
+++ /dev/null
@@ -1,217 +0,0 @@
-from __future__ import annotations
-
-from typing import List, Literal, Optional
-
-from colorama import Fore
-
-from autogpt.config import Config
-
-from ..api_manager import ApiManager
-from ..base import (
- ChatModelResponse,
- ChatSequence,
- FunctionCallDict,
- Message,
- ResponseMessageDict,
-)
-from ..providers import openai as iopenai
-from ..providers.openai import (
- OPEN_AI_CHAT_MODELS,
- OpenAIFunctionCall,
- OpenAIFunctionSpec,
- count_openai_functions_tokens,
-)
-from .token_counter import *
-
-
-def call_ai_function(
- function: str,
- args: list,
- description: str,
- config: Config,
- model: Optional[str] = None,
-) -> str:
- """Call an AI function
-
- This is a magic function that can do anything with no-code. See
- https://github.com/Torantulino/AI-Functions for more info.
-
- Args:
- function (str): The function to call
- args (list): The arguments to pass to the function
- description (str): The description of the function
- model (str, optional): The model to use. Defaults to None.
-
- Returns:
- str: The response from the function
- """
- if model is None:
- model = config.smart_llm
- # For each arg, if any are None, convert to "None":
- args = [str(arg) if arg is not None else "None" for arg in args]
- # parse args to comma separated string
- arg_str: str = ", ".join(args)
-
- prompt = ChatSequence.for_model(
- model,
- [
- Message(
- "system",
- f"You are now the following python function: ```# {description}"
- f"\n{function}```\n\nOnly respond with your `return` value.",
- ),
- Message("user", arg_str),
- ],
- )
- return create_chat_completion(prompt=prompt, temperature=0, config=config).content
-
-
-def create_text_completion(
- prompt: str,
- config: Config,
- model: Optional[str],
- temperature: Optional[float],
- max_output_tokens: Optional[int],
-) -> str:
- if model is None:
- model = config.fast_llm
- if temperature is None:
- temperature = config.temperature
-
- if config.use_azure:
- kwargs = config.get_azure_kwargs(model)
- else:
- kwargs = {"model": model}
-
- response = iopenai.create_text_completion(
- prompt=prompt,
- **kwargs,
- temperature=temperature,
- max_tokens=max_output_tokens,
- api_key=config.openai_api_key,
- )
- logger.debug(f"Response: {response}")
-
- return response.choices[0].text
-
-
-# Overly simple abstraction until we create something better
-def create_chat_completion(
- prompt: ChatSequence,
- config: Config,
- functions: Optional[List[OpenAIFunctionSpec]] = None,
- model: Optional[str] = None,
- temperature: Optional[float] = None,
- max_tokens: Optional[int] = None,
-) -> ChatModelResponse:
- """Create a chat completion using the OpenAI API
-
- Args:
- messages (List[Message]): The messages to send to the chat completion
- model (str, optional): The model to use. Defaults to None.
- temperature (float, optional): The temperature to use. Defaults to 0.9.
- max_tokens (int, optional): The max tokens to use. Defaults to None.
-
- Returns:
- str: The response from the chat completion
- """
-
- if model is None:
- model = prompt.model.name
- if temperature is None:
- temperature = config.temperature
- if max_tokens is None:
- prompt_tlength = prompt.token_length
- max_tokens = OPEN_AI_CHAT_MODELS[model].max_tokens - prompt_tlength
- logger.debug(f"Prompt length: {prompt_tlength} tokens")
- if functions:
- functions_tlength = count_openai_functions_tokens(functions, model)
- max_tokens -= functions_tlength
- logger.debug(f"Functions take up {functions_tlength} tokens in API call")
-
- logger.debug(
- f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
- )
- chat_completion_kwargs = {
- "model": model,
- "temperature": temperature,
- "max_tokens": max_tokens,
- }
-
- for plugin in config.plugins:
- if plugin.can_handle_chat_completion(
- messages=prompt.raw(),
- **chat_completion_kwargs,
- ):
- message = plugin.handle_chat_completion(
- messages=prompt.raw(),
- **chat_completion_kwargs,
- )
- if message is not None:
- return message
-
- chat_completion_kwargs["api_key"] = config.openai_api_key
- if config.use_azure:
- chat_completion_kwargs.update(config.get_azure_kwargs(model))
-
- if functions:
- chat_completion_kwargs["functions"] = [
- function.schema for function in functions
- ]
-
- response = iopenai.create_chat_completion(
- messages=prompt.raw(),
- **chat_completion_kwargs,
- )
- logger.debug(f"Response: {response}")
-
- if hasattr(response, "error"):
- logger.error(response.error)
- raise RuntimeError(response.error)
-
- first_message: ResponseMessageDict = response.choices[0].message
- content: str | None = first_message.get("content")
- function_call: FunctionCallDict | None = first_message.get("function_call")
-
- for plugin in config.plugins:
- if not plugin.can_handle_on_response():
- continue
- # TODO: function call support in plugin.on_response()
- content = plugin.on_response(content)
-
- return ChatModelResponse(
- model_info=OPEN_AI_CHAT_MODELS[model],
- content=content,
- function_call=OpenAIFunctionCall(
- name=function_call["name"], arguments=function_call["arguments"]
- )
- if function_call
- else None,
- )
-
-
-def check_model(
- model_name: str,
- model_type: Literal["smart_llm", "fast_llm"],
- config: Config,
-) -> str:
- """Check if model is available for use. If not, return gpt-3.5-turbo."""
- openai_credentials = {
- "api_key": config.openai_api_key,
- }
- if config.use_azure:
- openai_credentials.update(config.get_azure_kwargs(model_name))
-
- api_manager = ApiManager()
- models = api_manager.get_models(**openai_credentials)
-
- if any(model_name in m["id"] for m in models):
- return model_name
-
- logger.typewriter_log(
- "WARNING: ",
- Fore.YELLOW,
- f"You do not have access to {model_name}. Setting {model_type} to "
- f"gpt-3.5-turbo.",
- )
- return "gpt-3.5-turbo"
diff --git a/autogpt/llm/utils/token_counter.py b/autogpt/llm/utils/token_counter.py
deleted file mode 100644
index b0791e651..000000000
--- a/autogpt/llm/utils/token_counter.py
+++ /dev/null
@@ -1,85 +0,0 @@
-"""Functions for counting the number of tokens in a message or string."""
-from __future__ import annotations
-
-from typing import List, overload
-
-import tiktoken
-
-from autogpt.llm.base import Message
-from autogpt.logs import logger
-
-
-@overload
-def count_message_tokens(messages: Message, model: str = "gpt-3.5-turbo") -> int:
- ...
-
-
-@overload
-def count_message_tokens(messages: List[Message], model: str = "gpt-3.5-turbo") -> int:
- ...
-
-
-def count_message_tokens(
- messages: Message | List[Message], model: str = "gpt-3.5-turbo"
-) -> int:
- """
- Returns the number of tokens used by a list of messages.
-
- Args:
- messages (list): A list of messages, each of which is a dictionary
- containing the role and content of the message.
- model (str): The name of the model to use for tokenization.
- Defaults to "gpt-3.5-turbo-0301".
-
- Returns:
- int: The number of tokens used by the list of messages.
- """
- if isinstance(messages, Message):
- messages = [messages]
-
- if model.startswith("gpt-3.5-turbo"):
- tokens_per_message = (
- 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
- )
- tokens_per_name = -1 # if there's a name, the role is omitted
- encoding_model = "gpt-3.5-turbo"
- elif model.startswith("gpt-4"):
- tokens_per_message = 3
- tokens_per_name = 1
- encoding_model = "gpt-4"
- else:
- raise NotImplementedError(
- f"count_message_tokens() is not implemented for model {model}.\n"
- " See https://github.com/openai/openai-python/blob/main/chatml.md for"
- " information on how messages are converted to tokens."
- )
- try:
- encoding = tiktoken.encoding_for_model(encoding_model)
- except KeyError:
- logger.warn("Warning: model not found. Using cl100k_base encoding.")
- encoding = tiktoken.get_encoding("cl100k_base")
-
- num_tokens = 0
- for message in messages:
- num_tokens += tokens_per_message
- for key, value in message.raw().items():
- num_tokens += len(encoding.encode(value))
- if key == "name":
- num_tokens += tokens_per_name
- num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
- return num_tokens
-
-
-def count_string_tokens(string: str, model_name: str) -> int:
- """
- Returns the number of tokens in a text string.
-
- Args:
- string (str): The text string.
- model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo")
-
- Returns:
- int: The number of tokens in the text string.
- """
- encoding = tiktoken.encoding_for_model(model_name)
- return len(encoding.encode(string))
diff --git a/autogpt/logs/__init__.py b/autogpt/logs/__init__.py
deleted file mode 100644
index 40df21cb2..000000000
--- a/autogpt/logs/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from .formatters import AutoGptFormatter, JsonFormatter, remove_color_codes
-from .handlers import ConsoleHandler, JsonFileHandler, TypingConsoleHandler
-from .log_cycle import (
- CURRENT_CONTEXT_FILE_NAME,
- FULL_MESSAGE_HISTORY_FILE_NAME,
- NEXT_ACTION_FILE_NAME,
- PROMPT_SUMMARY_FILE_NAME,
- PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME,
- SUMMARY_FILE_NAME,
- SUPERVISOR_FEEDBACK_FILE_NAME,
- USER_INPUT_FILE_NAME,
- LogCycleHandler,
-)
-from .logger import Logger, logger
-from .utils import print_assistant_thoughts, remove_ansi_escape
diff --git a/autogpt/logs/formatters.py b/autogpt/logs/formatters.py
deleted file mode 100644
index 50e7c3333..000000000
--- a/autogpt/logs/formatters.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import logging
-import re
-
-from colorama import Style
-
-
-class AutoGptFormatter(logging.Formatter):
- """
- Allows to handle custom placeholders 'title_color' and 'message_no_color'.
- To use this formatter, make sure to pass 'color', 'title' as log extras.
- """
-
- def format(self, record: logging.LogRecord) -> str:
- if hasattr(record, "color"):
- record.title_color = (
- getattr(record, "color")
- + getattr(record, "title", "")
- + " "
- + Style.RESET_ALL
- )
- else:
- record.title_color = getattr(record, "title", "")
-
- # Add this line to set 'title' to an empty string if it doesn't exist
- record.title = getattr(record, "title", "")
-
- if hasattr(record, "msg"):
- record.message_no_color = remove_color_codes(getattr(record, "msg"))
- else:
- record.message_no_color = ""
- return super().format(record)
-
-
-def remove_color_codes(s: str) -> str:
- ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
- return ansi_escape.sub("", s)
-
-
-class JsonFormatter(logging.Formatter):
- def format(self, record: logging.LogRecord):
- return record.msg
diff --git a/autogpt/logs/handlers.py b/autogpt/logs/handlers.py
deleted file mode 100644
index c60b05752..000000000
--- a/autogpt/logs/handlers.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import json
-import logging
-import random
-import time
-
-
-class ConsoleHandler(logging.StreamHandler):
- def emit(self, record: logging.LogRecord) -> None:
- msg = self.format(record)
- try:
- print(msg)
- except Exception:
- self.handleError(record)
-
-
-class TypingConsoleHandler(logging.StreamHandler):
- """Output stream to console using simulated typing"""
-
- def emit(self, record: logging.LogRecord):
- min_typing_speed = 0.05
- max_typing_speed = 0.01
-
- msg = self.format(record)
- try:
- words = msg.split()
- for i, word in enumerate(words):
- print(word, end="", flush=True)
- if i < len(words) - 1:
- print(" ", end="", flush=True)
- typing_speed = random.uniform(min_typing_speed, max_typing_speed)
- time.sleep(typing_speed)
- # type faster after each word
- min_typing_speed = min_typing_speed * 0.95
- max_typing_speed = max_typing_speed * 0.95
- print()
- except Exception:
- self.handleError(record)
-
-
-class JsonFileHandler(logging.FileHandler):
- def __init__(self, filename: str, mode="a", encoding=None, delay=False):
- super().__init__(filename, mode, encoding, delay)
-
- def emit(self, record: logging.LogRecord):
- json_data = json.loads(self.format(record))
- with open(self.baseFilename, "w", encoding="utf-8") as f:
- json.dump(json_data, f, ensure_ascii=False, indent=4)
diff --git a/autogpt/logs/log_cycle.py b/autogpt/logs/log_cycle.py
deleted file mode 100644
index f3cbf166e..000000000
--- a/autogpt/logs/log_cycle.py
+++ /dev/null
@@ -1,88 +0,0 @@
-import json
-import os
-from typing import Any, Dict, Union
-
-from .logger import logger
-
-DEFAULT_PREFIX = "agent"
-FULL_MESSAGE_HISTORY_FILE_NAME = "full_message_history.json"
-CURRENT_CONTEXT_FILE_NAME = "current_context.json"
-NEXT_ACTION_FILE_NAME = "next_action.json"
-PROMPT_SUMMARY_FILE_NAME = "prompt_summary.json"
-SUMMARY_FILE_NAME = "summary.txt"
-SUPERVISOR_FEEDBACK_FILE_NAME = "supervisor_feedback.txt"
-PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME = "prompt_supervisor_feedback.json"
-USER_INPUT_FILE_NAME = "user_input.txt"
-
-
-class LogCycleHandler:
- """
- A class for logging cycle data.
- """
-
- def __init__(self):
- self.log_count_within_cycle = 0
-
- @staticmethod
- def create_directory_if_not_exists(directory_path: str) -> None:
- if not os.path.exists(directory_path):
- os.makedirs(directory_path, exist_ok=True)
-
- def create_outer_directory(self, ai_name: str, created_at: str) -> str:
- log_directory = logger.get_log_directory()
-
- if os.environ.get("OVERWRITE_DEBUG") == "1":
- outer_folder_name = "auto_gpt"
- else:
- ai_name_short = self.get_agent_short_name(ai_name)
- outer_folder_name = f"{created_at}_{ai_name_short}"
-
- outer_folder_path = os.path.join(log_directory, "DEBUG", outer_folder_name)
- self.create_directory_if_not_exists(outer_folder_path)
-
- return outer_folder_path
-
- def get_agent_short_name(self, ai_name: str) -> str:
- return ai_name[:15].rstrip() if ai_name else DEFAULT_PREFIX
-
- def create_inner_directory(self, outer_folder_path: str, cycle_count: int) -> str:
- nested_folder_name = str(cycle_count).zfill(3)
- nested_folder_path = os.path.join(outer_folder_path, nested_folder_name)
- self.create_directory_if_not_exists(nested_folder_path)
-
- return nested_folder_path
-
- def create_nested_directory(
- self, ai_name: str, created_at: str, cycle_count: int
- ) -> str:
- outer_folder_path = self.create_outer_directory(ai_name, created_at)
- nested_folder_path = self.create_inner_directory(outer_folder_path, cycle_count)
-
- return nested_folder_path
-
- def log_cycle(
- self,
- ai_name: str,
- created_at: str,
- cycle_count: int,
- data: Union[Dict[str, Any], Any],
- file_name: str,
- ) -> None:
- """
- Log cycle data to a JSON file.
-
- Args:
- data (Any): The data to be logged.
- file_name (str): The name of the file to save the logged data.
- """
- nested_folder_path = self.create_nested_directory(
- ai_name, created_at, cycle_count
- )
-
- json_data = json.dumps(data, ensure_ascii=False, indent=4)
- log_file_path = os.path.join(
- nested_folder_path, f"{self.log_count_within_cycle}_{file_name}"
- )
-
- logger.log_json(json_data, log_file_path)
- self.log_count_within_cycle += 1
diff --git a/autogpt/logs/logger.py b/autogpt/logs/logger.py
deleted file mode 100644
index e4cedc366..000000000
--- a/autogpt/logs/logger.py
+++ /dev/null
@@ -1,203 +0,0 @@
-"""Logging module for Auto-GPT."""
-from __future__ import annotations
-
-import logging
-import os
-from typing import TYPE_CHECKING, Any, Optional
-
-from colorama import Fore
-
-if TYPE_CHECKING:
- from autogpt.config import Config
-
-from autogpt.singleton import Singleton
-
-from .formatters import AutoGptFormatter, JsonFormatter
-from .handlers import ConsoleHandler, JsonFileHandler, TypingConsoleHandler
-
-
-class Logger(metaclass=Singleton):
- """
- Logger that handle titles in different colors.
- Outputs logs in console, activity.log, and errors.log
- For console handler: simulates typing
- """
-
- def __init__(self):
- # create log directory if it doesn't exist
- this_files_dir_path = os.path.dirname(__file__)
- log_dir = os.path.join(this_files_dir_path, "../logs")
- if not os.path.exists(log_dir):
- os.makedirs(log_dir)
-
- log_file = "activity.log"
- error_file = "error.log"
-
- console_formatter = AutoGptFormatter("%(title_color)s %(message)s")
-
- # Create a handler for console which simulate typing
- self.typing_console_handler = TypingConsoleHandler()
- self.typing_console_handler.setLevel(logging.INFO)
- self.typing_console_handler.setFormatter(console_formatter)
-
- # Create a handler for console without typing simulation
- self.console_handler = ConsoleHandler()
- self.console_handler.setLevel(logging.DEBUG)
- self.console_handler.setFormatter(console_formatter)
-
- # Info handler in activity.log
- self.file_handler = logging.FileHandler(
- os.path.join(log_dir, log_file), "a", "utf-8"
- )
- self.file_handler.setLevel(logging.DEBUG)
- info_formatter = AutoGptFormatter(
- "%(asctime)s %(levelname)s %(title)s %(message_no_color)s"
- )
- self.file_handler.setFormatter(info_formatter)
-
- # Error handler error.log
- error_handler = logging.FileHandler(
- os.path.join(log_dir, error_file), "a", "utf-8"
- )
- error_handler.setLevel(logging.ERROR)
- error_formatter = AutoGptFormatter(
- "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s"
- " %(message_no_color)s"
- )
- error_handler.setFormatter(error_formatter)
-
- self.typing_logger = logging.getLogger("TYPER")
- self.typing_logger.addHandler(self.typing_console_handler)
- self.typing_logger.addHandler(self.file_handler)
- self.typing_logger.addHandler(error_handler)
- self.typing_logger.setLevel(logging.DEBUG)
-
- self.logger = logging.getLogger("LOGGER")
- self.logger.addHandler(self.console_handler)
- self.logger.addHandler(self.file_handler)
- self.logger.addHandler(error_handler)
- self.logger.setLevel(logging.DEBUG)
-
- self.json_logger = logging.getLogger("JSON_LOGGER")
- self.json_logger.addHandler(self.file_handler)
- self.json_logger.addHandler(error_handler)
- self.json_logger.setLevel(logging.DEBUG)
-
- self._config: Optional[Config] = None
- self.chat_plugins = []
-
- @property
- def config(self) -> Config | None:
- return self._config
-
- @config.setter
- def config(self, config: Config):
- self._config = config
- if config.plain_output:
- self.typing_logger.removeHandler(self.typing_console_handler)
- self.typing_logger.addHandler(self.console_handler)
-
- def typewriter_log(
- self,
- title: str = "",
- title_color: str = "",
- content: str = "",
- speak_text: bool = False,
- level: int = logging.INFO,
- ) -> None:
- from autogpt.speech import say_text
-
- if speak_text and self.config and self.config.speak_mode:
- say_text(f"{title}. {content}", self.config)
-
- for plugin in self.chat_plugins:
- plugin.report(f"{title}. {content}")
-
- if content:
- if isinstance(content, list):
- content = " ".join(content)
- else:
- content = ""
-
- self.typing_logger.log(
- level, content, extra={"title": title, "color": title_color}
- )
-
- def debug(
- self,
- message: str,
- title: str = "",
- title_color: str = "",
- ) -> None:
- self._log(title, title_color, message, logging.DEBUG)
-
- def info(
- self,
- message: str,
- title: str = "",
- title_color: str = "",
- ) -> None:
- self._log(title, title_color, message, logging.INFO)
-
- def warn(
- self,
- message: str,
- title: str = "",
- title_color: str = "",
- ) -> None:
- self._log(title, title_color, message, logging.WARN)
-
- def error(self, title: str, message: str = "") -> None:
- self._log(title, Fore.RED, message, logging.ERROR)
-
- def _log(
- self,
- title: str = "",
- title_color: str = "",
- message: str = "",
- level: int = logging.INFO,
- ) -> None:
- if message:
- if isinstance(message, list):
- message = " ".join(message)
- self.logger.log(
- level, message, extra={"title": str(title), "color": str(title_color)}
- )
-
- def set_level(self, level: logging._Level) -> None:
- self.logger.setLevel(level)
- self.typing_logger.setLevel(level)
-
- def double_check(self, additionalText: Optional[str] = None) -> None:
- if not additionalText:
- additionalText = (
- "Please ensure you've setup and configured everything"
- " correctly. Read https://github.com/Torantulino/Auto-GPT#readme to "
- "double check. You can also create a github issue or join the discord"
- " and ask there!"
- )
-
- self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText)
-
- def log_json(self, data: Any, file_name: str) -> None:
- # Define log directory
- this_files_dir_path = os.path.dirname(__file__)
- log_dir = os.path.join(this_files_dir_path, "../logs")
-
- # Create a handler for JSON files
- json_file_path = os.path.join(log_dir, file_name)
- json_data_handler = JsonFileHandler(json_file_path)
- json_data_handler.setFormatter(JsonFormatter())
-
- # Log the JSON data using the custom file handler
- self.json_logger.addHandler(json_data_handler)
- self.json_logger.debug(data)
- self.json_logger.removeHandler(json_data_handler)
-
- def get_log_directory(self) -> str:
- this_files_dir_path = os.path.dirname(__file__)
- log_dir = os.path.join(this_files_dir_path, "../../logs")
- return os.path.abspath(log_dir)
-
-
-logger = Logger()
diff --git a/autogpt/logs/utils.py b/autogpt/logs/utils.py
deleted file mode 100644
index 637c917f8..000000000
--- a/autogpt/logs/utils.py
+++ /dev/null
@@ -1,65 +0,0 @@
-from __future__ import annotations
-
-from typing import TYPE_CHECKING
-
-from colorama import Fore
-
-if TYPE_CHECKING:
- from autogpt.config import Config
-
-from .logger import logger
-
-
-def print_assistant_thoughts(
- ai_name: str,
- assistant_reply_json_valid: dict,
- config: Config,
-) -> None:
- from autogpt.speech import say_text
-
- assistant_thoughts_reasoning = None
- assistant_thoughts_plan = None
- assistant_thoughts_speak = None
- assistant_thoughts_criticism = None
-
- assistant_thoughts = assistant_reply_json_valid.get("thoughts", {})
- assistant_thoughts_text = remove_ansi_escape(assistant_thoughts.get("text", ""))
- if assistant_thoughts:
- assistant_thoughts_reasoning = remove_ansi_escape(
- assistant_thoughts.get("reasoning", "")
- )
- assistant_thoughts_plan = remove_ansi_escape(assistant_thoughts.get("plan", ""))
- assistant_thoughts_criticism = remove_ansi_escape(
- assistant_thoughts.get("criticism", "")
- )
- assistant_thoughts_speak = remove_ansi_escape(
- assistant_thoughts.get("speak", "")
- )
- logger.typewriter_log(
- f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, assistant_thoughts_text
- )
- logger.typewriter_log("REASONING:", Fore.YELLOW, str(assistant_thoughts_reasoning))
- if assistant_thoughts_plan:
- logger.typewriter_log("PLAN:", Fore.YELLOW, "")
- # If it's a list, join it into a string
- if isinstance(assistant_thoughts_plan, list):
- assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
- elif isinstance(assistant_thoughts_plan, dict):
- assistant_thoughts_plan = str(assistant_thoughts_plan)
-
- # Split the input_string using the newline character and dashes
- lines = assistant_thoughts_plan.split("\n")
- for line in lines:
- line = line.lstrip("- ")
- logger.typewriter_log("- ", Fore.GREEN, line.strip())
- logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
- # Speak the assistant's thoughts
- if assistant_thoughts_speak:
- if config.speak_mode:
- say_text(assistant_thoughts_speak, config)
- else:
- logger.typewriter_log("SPEAK:", Fore.YELLOW, f"{assistant_thoughts_speak}")
-
-
-def remove_ansi_escape(s: str) -> str:
- return s.replace("\x1B", "")
diff --git a/autogpt/main.py b/autogpt/main.py
deleted file mode 100644
index 08ac4b400..000000000
--- a/autogpt/main.py
+++ /dev/null
@@ -1,205 +0,0 @@
-"""The application entry point. Can be invoked by a CLI or any other front end application."""
-import logging
-import sys
-from pathlib import Path
-from typing import Optional
-
-from colorama import Fore, Style
-
-from autogpt.agent import Agent
-from autogpt.config.config import ConfigBuilder, check_openai_api_key
-from autogpt.configurator import create_config
-from autogpt.logs import logger
-from autogpt.memory.vector import get_memory
-from autogpt.models.command_registry import CommandRegistry
-from autogpt.plugins import scan_plugins
-from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT, construct_main_ai_config
-from autogpt.utils import (
- get_current_git_branch,
- get_latest_bulletin,
- get_legal_warning,
- markdown_to_ansi_style,
-)
-from autogpt.workspace import Workspace
-from scripts.install_plugin_deps import install_plugin_dependencies
-
-COMMAND_CATEGORIES = [
- "autogpt.commands.execute_code",
- "autogpt.commands.file_operations",
- "autogpt.commands.web_search",
- "autogpt.commands.web_selenium",
- "autogpt.app",
- "autogpt.commands.task_statuses",
-]
-
-
-def run_auto_gpt(
- continuous: bool,
- continuous_limit: int,
- ai_settings: str,
- prompt_settings: str,
- skip_reprompt: bool,
- speak: bool,
- debug: bool,
- gpt3only: bool,
- gpt4only: bool,
- memory_type: str,
- browser_name: str,
- allow_downloads: bool,
- skip_news: bool,
- workspace_directory: str | Path,
- install_plugin_deps: bool,
- ai_name: Optional[str] = None,
- ai_role: Optional[str] = None,
- ai_goals: tuple[str] = tuple(),
-):
- # Configure logging before we do anything else.
- logger.set_level(logging.DEBUG if debug else logging.INFO)
-
- config = ConfigBuilder.build_config_from_env()
- # HACK: This is a hack to allow the config into the logger without having to pass it around everywhere
- # or import it directly.
- logger.config = config
-
- # TODO: fill in llm values here
- check_openai_api_key(config)
-
- create_config(
- config,
- continuous,
- continuous_limit,
- ai_settings,
- prompt_settings,
- skip_reprompt,
- speak,
- debug,
- gpt3only,
- gpt4only,
- memory_type,
- browser_name,
- allow_downloads,
- skip_news,
- )
-
- if config.continuous_mode:
- for line in get_legal_warning().split("\n"):
- logger.warn(markdown_to_ansi_style(line), "LEGAL:", Fore.RED)
-
- if not config.skip_news:
- motd, is_new_motd = get_latest_bulletin()
- if motd:
- motd = markdown_to_ansi_style(motd)
- for motd_line in motd.split("\n"):
- logger.info(motd_line, "NEWS:", Fore.GREEN)
- if is_new_motd and not config.chat_messages_enabled:
- input(
- Fore.MAGENTA
- + Style.BRIGHT
- + "NEWS: Bulletin was updated! Press Enter to continue..."
- + Style.RESET_ALL
- )
-
- git_branch = get_current_git_branch()
- if git_branch and git_branch != "stable":
- logger.typewriter_log(
- "WARNING: ",
- Fore.RED,
- f"You are running on `{git_branch}` branch "
- "- this is not a supported branch.",
- )
- if sys.version_info < (3, 10):
- logger.typewriter_log(
- "WARNING: ",
- Fore.RED,
- "You are running on an older version of Python. "
- "Some people have observed problems with certain "
- "parts of Auto-GPT with this version. "
- "Please consider upgrading to Python 3.10 or higher.",
- )
-
- if install_plugin_deps:
- install_plugin_dependencies()
-
- # TODO: have this directory live outside the repository (e.g. in a user's
- # home directory) and have it come in as a command line argument or part of
- # the env file.
- workspace_directory = Workspace.get_workspace_directory(config, workspace_directory)
-
- # HACK: doing this here to collect some globals that depend on the workspace.
- Workspace.build_file_logger_path(config, workspace_directory)
-
- config.plugins = scan_plugins(config, config.debug_mode)
- # Create a CommandRegistry instance and scan default folder
- command_registry = CommandRegistry()
-
- logger.debug(
- f"The following command categories are disabled: {config.disabled_command_categories}"
- )
- enabled_command_categories = [
- x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories
- ]
-
- logger.debug(
- f"The following command categories are enabled: {enabled_command_categories}"
- )
-
- for command_category in enabled_command_categories:
- command_registry.import_commands(command_category)
-
- # Unregister commands that are incompatible with the current config
- incompatible_commands = []
- for command in command_registry.commands.values():
- if callable(command.enabled) and not command.enabled(config):
- command.enabled = False
- incompatible_commands.append(command)
-
- for command in incompatible_commands:
- command_registry.unregister(command)
- logger.debug(
- f"Unregistering incompatible command: {command.name}, "
- f"reason - {command.disabled_reason or 'Disabled by current config.'}"
- )
-
- ai_config = construct_main_ai_config(
- config,
- name=ai_name,
- role=ai_role,
- goals=ai_goals,
- )
- ai_config.command_registry = command_registry
- ai_name = ai_config.ai_name
- # print(prompt)
- # Initialize variables
- next_action_count = 0
-
- # add chat plugins capable of report to logger
- if config.chat_messages_enabled:
- for plugin in config.plugins:
- if hasattr(plugin, "can_handle_report") and plugin.can_handle_report():
- logger.info(f"Loaded plugin into logger: {plugin.__class__.__name__}")
- logger.chat_plugins.append(plugin)
-
- # Initialize memory and make sure it is empty.
- # this is particularly important for indexing and referencing pinecone memory
- memory = get_memory(config)
- memory.clear()
- logger.typewriter_log(
- "Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
- )
- logger.typewriter_log("Using Browser:", Fore.GREEN, config.selenium_web_browser)
- system_prompt = ai_config.construct_full_prompt(config)
- if config.debug_mode:
- logger.typewriter_log("Prompt:", Fore.GREEN, system_prompt)
-
- agent = Agent(
- ai_name=ai_name,
- memory=memory,
- next_action_count=next_action_count,
- command_registry=command_registry,
- system_prompt=system_prompt,
- triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
- workspace_directory=workspace_directory,
- ai_config=ai_config,
- config=config,
- )
- agent.start_interaction_loop()
diff --git a/autogpt/memory/message_history.py b/autogpt/memory/message_history.py
deleted file mode 100644
index 30dbbb809..000000000
--- a/autogpt/memory/message_history.py
+++ /dev/null
@@ -1,235 +0,0 @@
-from __future__ import annotations
-
-import copy
-import json
-from dataclasses import dataclass
-from typing import TYPE_CHECKING, Optional
-
-if TYPE_CHECKING:
- from autogpt.agent import Agent
-
-from autogpt.config import Config
-from autogpt.json_utils.utilities import extract_json_from_response
-from autogpt.llm.base import ChatSequence, Message
-from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
-from autogpt.llm.utils import (
- count_message_tokens,
- count_string_tokens,
- create_chat_completion,
-)
-from autogpt.logs import PROMPT_SUMMARY_FILE_NAME, SUMMARY_FILE_NAME, logger
-
-
-@dataclass
-class MessageHistory(ChatSequence):
- max_summary_tlength: int = 500
- agent: Optional[Agent] = None
- summary: str = "I was created"
- last_trimmed_index: int = 0
-
- SUMMARIZATION_PROMPT = '''Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information to remember.
-
-You will receive the current summary and your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.
-
-Summary So Far:
-"""
-{summary}
-"""
-
-Latest Development:
-"""
-{new_events}
-"""
-'''
-
- def trim_messages(
- self, current_message_chain: list[Message], config: Config
- ) -> tuple[Message, list[Message]]:
- """
- Returns a list of trimmed messages: messages which are in the message history
- but not in current_message_chain.
-
- Args:
- current_message_chain (list[Message]): The messages currently in the context.
- config (Config): The config to use.
-
- Returns:
- Message: A message with the new running summary after adding the trimmed messages.
- list[Message]: A list of messages that are in full_message_history with an index higher than last_trimmed_index and absent from current_message_chain.
- """
- # Select messages in full_message_history with an index higher than last_trimmed_index
- new_messages = [
- msg for i, msg in enumerate(self) if i > self.last_trimmed_index
- ]
-
- # Remove messages that are already present in current_message_chain
- new_messages_not_in_chain = [
- msg for msg in new_messages if msg not in current_message_chain
- ]
-
- if not new_messages_not_in_chain:
- return self.summary_message(), []
-
- new_summary_message = self.update_running_summary(
- new_events=new_messages_not_in_chain, config=config
- )
-
- # Find the index of the last message processed
- last_message = new_messages_not_in_chain[-1]
- self.last_trimmed_index = self.messages.index(last_message)
-
- return new_summary_message, new_messages_not_in_chain
-
- def per_cycle(self, messages: list[Message] | None = None):
- """
- Yields:
- Message: a message containing user input
- Message: a message from the AI containing a proposed action
- Message: the message containing the result of the AI's proposed action
- """
- messages = messages or self.messages
- for i in range(0, len(messages) - 1):
- ai_message = messages[i]
- if ai_message.type != "ai_response":
- continue
- user_message = (
- messages[i - 1] if i > 0 and messages[i - 1].role == "user" else None
- )
- result_message = messages[i + 1]
- try:
- assert (
- extract_json_from_response(ai_message.content) != {}
- ), "AI response is not a valid JSON object"
- assert result_message.type == "action_result"
-
- yield user_message, ai_message, result_message
- except AssertionError as err:
- logger.debug(
- f"Invalid item in message history: {err}; Messages: {messages[i-1:i+2]}"
- )
-
- def summary_message(self) -> Message:
- return Message(
- "system",
- f"This reminds you of these events from your past: \n{self.summary}",
- )
-
- def update_running_summary(
- self,
- new_events: list[Message],
- config: Config,
- max_summary_length: Optional[int] = None,
- ) -> Message:
- """
- This function takes a list of Message objects and updates the running summary
- to include the events they describe. The updated summary is returned
- in a Message formatted in the 1st person past tense.
-
- Args:
- new_events: A list of Messages containing the latest events to be added to the summary.
-
- Returns:
- Message: a Message containing the updated running summary.
-
- Example:
- ```py
- new_events = [{"event": "entered the kitchen."}, {"event": "found a scrawled note with the number 7"}]
- update_running_summary(new_events)
- # Returns: "This reminds you of these events from your past: \nI entered the kitchen and found a scrawled note saying 7."
- ```
- """
- if not new_events:
- return self.summary_message()
- if not max_summary_length:
- max_summary_length = self.max_summary_tlength
-
- # Create a copy of the new_events list to prevent modifying the original list
- new_events = copy.deepcopy(new_events)
-
- # Replace "assistant" with "you". This produces much better first person past tense results.
- for event in new_events:
- if event.role.lower() == "assistant":
- event.role = "you"
-
- # Remove "thoughts" dictionary from "content"
- try:
- content_dict = extract_json_from_response(event.content)
- if "thoughts" in content_dict:
- del content_dict["thoughts"]
- event.content = json.dumps(content_dict)
- except json.JSONDecodeError as e:
- logger.error(f"Error: Invalid JSON: {e}")
- if config.debug_mode:
- logger.error(f"{event.content}")
-
- elif event.role.lower() == "system":
- event.role = "your computer"
-
- # Delete all user messages
- elif event.role == "user":
- new_events.remove(event)
-
- summ_model = OPEN_AI_CHAT_MODELS[config.fast_llm]
-
- # Determine token lengths for use in batching
- prompt_template_length = len(
- MessageHistory.SUMMARIZATION_PROMPT.format(summary="", new_events="")
- )
- max_input_tokens = summ_model.max_tokens - max_summary_length
- summary_tlength = count_string_tokens(self.summary, summ_model.name)
- batch = []
- batch_tlength = 0
-
- # TODO: Put a cap on length of total new events and drop some previous events to
- # save API cost. Need to think thru more how to do it without losing the context.
- for event in new_events:
- event_tlength = count_message_tokens(event, summ_model.name)
-
- if (
- batch_tlength + event_tlength
- > max_input_tokens - prompt_template_length - summary_tlength
- ):
- # The batch is full. Summarize it and start a new one.
- self.summarize_batch(batch, config, max_summary_length)
- summary_tlength = count_string_tokens(self.summary, summ_model.name)
- batch = [event]
- batch_tlength = event_tlength
- else:
- batch.append(event)
- batch_tlength += event_tlength
-
- if batch:
- # There's an unprocessed batch. Summarize it.
- self.summarize_batch(batch, config, max_summary_length)
-
- return self.summary_message()
-
- def summarize_batch(
- self, new_events_batch: list[Message], config: Config, max_output_length: int
- ):
- prompt = MessageHistory.SUMMARIZATION_PROMPT.format(
- summary=self.summary, new_events=new_events_batch
- )
-
- prompt = ChatSequence.for_model(config.fast_llm, [Message("user", prompt)])
- if self.agent:
- self.agent.log_cycle_handler.log_cycle(
- self.agent.ai_config.ai_name,
- self.agent.created_at,
- self.agent.cycle_count,
- prompt.raw(),
- PROMPT_SUMMARY_FILE_NAME,
- )
-
- self.summary = create_chat_completion(
- prompt, config, max_tokens=max_output_length
- ).content
-
- if self.agent:
- self.agent.log_cycle_handler.log_cycle(
- self.agent.ai_config.ai_name,
- self.agent.created_at,
- self.agent.cycle_count,
- self.summary,
- SUMMARY_FILE_NAME,
- )
diff --git a/autogpt/memory/vector/__init__.py b/autogpt/memory/vector/__init__.py
deleted file mode 100644
index 414a28006..000000000
--- a/autogpt/memory/vector/__init__.py
+++ /dev/null
@@ -1,153 +0,0 @@
-from autogpt.config import Config
-from autogpt.logs import logger
-
-from .memory_item import MemoryItem, MemoryItemRelevance
-from .providers.base import VectorMemoryProvider as VectorMemory
-from .providers.json_file import JSONFileMemory
-from .providers.no_memory import NoMemory
-
-# List of supported memory backends
-# Add a backend to this list if the import attempt is successful
-supported_memory = ["json_file", "no_memory"]
-
-# try:
-# from .providers.redis import RedisMemory
-
-# supported_memory.append("redis")
-# except ImportError:
-# RedisMemory = None
-
-# try:
-# from .providers.pinecone import PineconeMemory
-
-# supported_memory.append("pinecone")
-# except ImportError:
-# PineconeMemory = None
-
-# try:
-# from .providers.weaviate import WeaviateMemory
-
-# supported_memory.append("weaviate")
-# except ImportError:
-# WeaviateMemory = None
-
-# try:
-# from .providers.milvus import MilvusMemory
-
-# supported_memory.append("milvus")
-# except ImportError:
-# MilvusMemory = None
-
-
-def get_memory(config: Config) -> VectorMemory:
- """Returns a memory object corresponding to the memory backend specified in the config.
-
- The type of memory object returned depends on the value of the `memory_backend`
- attribute in the configuration. E.g. if `memory_backend` is set to "pinecone", a
- `PineconeMemory` object is returned. If it is set to "redis", a `RedisMemory`
- object is returned.
- By default, a `JSONFileMemory` object is returned.
-
- Params:
- config: A configuration object that contains information about the memory backend
- to be used and other relevant parameters.
-
- Returns:
- VectorMemory: an instance of a memory object based on the configuration provided.
- """
- memory = None
-
- match config.memory_backend:
- case "json_file":
- memory = JSONFileMemory(config)
-
- case "pinecone":
- raise NotImplementedError(
- "The Pinecone memory backend has been rendered incompatible by work on "
- "the memory system, and was removed. Whether support will be added back "
- "in the future is subject to discussion, feel free to pitch in: "
- "https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280"
- )
- # if not PineconeMemory:
- # logger.warn(
- # "Error: Pinecone is not installed. Please install pinecone"
- # " to use Pinecone as a memory backend."
- # )
- # else:
- # memory = PineconeMemory(config)
- # if clear:
- # memory.clear()
-
- case "redis":
- raise NotImplementedError(
- "The Redis memory backend has been rendered incompatible by work on "
- "the memory system, and has been removed temporarily."
- )
- # if not RedisMemory:
- # logger.warn(
- # "Error: Redis is not installed. Please install redis-py to"
- # " use Redis as a memory backend."
- # )
- # else:
- # memory = RedisMemory(config)
-
- case "weaviate":
- raise NotImplementedError(
- "The Weaviate memory backend has been rendered incompatible by work on "
- "the memory system, and was removed. Whether support will be added back "
- "in the future is subject to discussion, feel free to pitch in: "
- "https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280"
- )
- # if not WeaviateMemory:
- # logger.warn(
- # "Error: Weaviate is not installed. Please install weaviate-client to"
- # " use Weaviate as a memory backend."
- # )
- # else:
- # memory = WeaviateMemory(config)
-
- case "milvus":
- raise NotImplementedError(
- "The Milvus memory backend has been rendered incompatible by work on "
- "the memory system, and was removed. Whether support will be added back "
- "in the future is subject to discussion, feel free to pitch in: "
- "https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280"
- )
- # if not MilvusMemory:
- # logger.warn(
- # "Error: pymilvus sdk is not installed."
- # "Please install pymilvus to use Milvus or Zilliz Cloud as memory backend."
- # )
- # else:
- # memory = MilvusMemory(config)
-
- case "no_memory":
- memory = NoMemory()
-
- case _:
- raise ValueError(
- f"Unknown memory backend '{config.memory_backend}'. Please check your config."
- )
-
- if memory is None:
- memory = JSONFileMemory(config)
-
- return memory
-
-
-def get_supported_memory_backends():
- return supported_memory
-
-
-__all__ = [
- "get_memory",
- "MemoryItem",
- "MemoryItemRelevance",
- "JSONFileMemory",
- "NoMemory",
- "VectorMemory",
- # "RedisMemory",
- # "PineconeMemory",
- # "MilvusMemory",
- # "WeaviateMemory",
-]
diff --git a/autogpt/memory/vector/memory_item.py b/autogpt/memory/vector/memory_item.py
deleted file mode 100644
index 587a915b4..000000000
--- a/autogpt/memory/vector/memory_item.py
+++ /dev/null
@@ -1,256 +0,0 @@
-from __future__ import annotations
-
-import dataclasses
-import json
-from typing import Literal
-
-import numpy as np
-
-from autogpt.config import Config
-from autogpt.llm import Message
-from autogpt.llm.utils import count_string_tokens
-from autogpt.logs import logger
-from autogpt.processing.text import chunk_content, split_text, summarize_text
-
-from .utils import Embedding, get_embedding
-
-MemoryDocType = Literal["webpage", "text_file", "code_file", "agent_history"]
-
-
-@dataclasses.dataclass
-class MemoryItem:
- """Memory object containing raw content as well as embeddings"""
-
- raw_content: str
- summary: str
- chunks: list[str]
- chunk_summaries: list[str]
- e_summary: Embedding
- e_chunks: list[Embedding]
- metadata: dict
-
- def relevance_for(self, query: str, e_query: Embedding | None = None):
- return MemoryItemRelevance.of(self, query, e_query)
-
- @staticmethod
- def from_text(
- text: str,
- source_type: MemoryDocType,
- config: Config,
- metadata: dict = {},
- how_to_summarize: str | None = None,
- question_for_summary: str | None = None,
- ):
- logger.debug(f"Memorizing text:\n{'-'*32}\n{text}\n{'-'*32}\n")
-
- chunks = [
- chunk
- for chunk, _ in (
- split_text(text, config.embedding_model, config)
- if source_type != "code_file"
- else chunk_content(text, config.embedding_model)
- )
- ]
- logger.debug("Chunks: " + str(chunks))
-
- chunk_summaries = [
- summary
- for summary, _ in [
- summarize_text(
- text_chunk,
- config,
- instruction=how_to_summarize,
- question=question_for_summary,
- )
- for text_chunk in chunks
- ]
- ]
- logger.debug("Chunk summaries: " + str(chunk_summaries))
-
- e_chunks = get_embedding(chunks, config)
-
- summary = (
- chunk_summaries[0]
- if len(chunks) == 1
- else summarize_text(
- "\n\n".join(chunk_summaries),
- config,
- instruction=how_to_summarize,
- question=question_for_summary,
- )[0]
- )
- logger.debug("Total summary: " + summary)
-
- # TODO: investigate search performance of weighted average vs summary
- # e_average = np.average(e_chunks, axis=0, weights=[len(c) for c in chunks])
- e_summary = get_embedding(summary, config)
-
- metadata["source_type"] = source_type
-
- return MemoryItem(
- text,
- summary,
- chunks,
- chunk_summaries,
- e_summary,
- e_chunks,
- metadata=metadata,
- )
-
- @staticmethod
- def from_text_file(content: str, path: str, config: Config):
- return MemoryItem.from_text(content, "text_file", config, {"location": path})
-
- @staticmethod
- def from_code_file(content: str, path: str):
- # TODO: implement tailored code memories
- return MemoryItem.from_text(content, "code_file", {"location": path})
-
- @staticmethod
- def from_ai_action(ai_message: Message, result_message: Message):
- # The result_message contains either user feedback
- # or the result of the command specified in ai_message
-
- if ai_message.role != "assistant":
- raise ValueError(f"Invalid role on 'ai_message': {ai_message.role}")
-
- result = (
- result_message.content
- if result_message.content.startswith("Command")
- else "None"
- )
- user_input = (
- result_message.content
- if result_message.content.startswith("Human feedback")
- else "None"
- )
- memory_content = (
- f"Assistant Reply: {ai_message.content}"
- "\n\n"
- f"Result: {result}"
- "\n\n"
- f"Human Feedback: {user_input}"
- )
-
- return MemoryItem.from_text(
- text=memory_content,
- source_type="agent_history",
- how_to_summarize="if possible, also make clear the link between the command in the assistant's response and the command result. Do not mention the human feedback if there is none",
- )
-
- @staticmethod
- def from_webpage(
- content: str, url: str, config: Config, question: str | None = None
- ):
- return MemoryItem.from_text(
- text=content,
- source_type="webpage",
- config=config,
- metadata={"location": url},
- question_for_summary=question,
- )
-
- def dump(self, calculate_length=False) -> str:
- if calculate_length:
- token_length = count_string_tokens(
- self.raw_content, Config().embedding_model
- )
- return f"""
-=============== MemoryItem ===============
-Size: {f'{token_length} tokens in ' if calculate_length else ''}{len(self.e_chunks)} chunks
-Metadata: {json.dumps(self.metadata, indent=2)}
----------------- SUMMARY -----------------
-{self.summary}
------------------- RAW -------------------
-{self.raw_content}
-==========================================
-"""
-
- def __eq__(self, other: MemoryItem):
- return (
- self.raw_content == other.raw_content
- and self.chunks == other.chunks
- and self.chunk_summaries == other.chunk_summaries
- # Embeddings can either be list[float] or np.ndarray[float32],
- # and for comparison they must be of the same type
- and np.array_equal(
- self.e_summary
- if isinstance(self.e_summary, np.ndarray)
- else np.array(self.e_summary, dtype=np.float32),
- other.e_summary
- if isinstance(other.e_summary, np.ndarray)
- else np.array(other.e_summary, dtype=np.float32),
- )
- and np.array_equal(
- self.e_chunks
- if isinstance(self.e_chunks[0], np.ndarray)
- else [np.array(c, dtype=np.float32) for c in self.e_chunks],
- other.e_chunks
- if isinstance(other.e_chunks[0], np.ndarray)
- else [np.array(c, dtype=np.float32) for c in other.e_chunks],
- )
- )
-
-
-@dataclasses.dataclass
-class MemoryItemRelevance:
- """
- Class that encapsulates memory relevance search functionality and data.
- Instances contain a MemoryItem and its relevance scores for a given query.
- """
-
- memory_item: MemoryItem
- for_query: str
- summary_relevance_score: float
- chunk_relevance_scores: list[float]
-
- @staticmethod
- def of(
- memory_item: MemoryItem, for_query: str, e_query: Embedding | None = None
- ) -> MemoryItemRelevance:
- e_query = e_query or get_embedding(for_query)
- _, srs, crs = MemoryItemRelevance.calculate_scores(memory_item, e_query)
- return MemoryItemRelevance(
- for_query=for_query,
- memory_item=memory_item,
- summary_relevance_score=srs,
- chunk_relevance_scores=crs,
- )
-
- @staticmethod
- def calculate_scores(
- memory: MemoryItem, compare_to: Embedding
- ) -> tuple[float, float, list[float]]:
- """
- Calculates similarity between given embedding and all embeddings of the memory
-
- Returns:
- float: the aggregate (max) relevance score of the memory
- float: the relevance score of the memory summary
- list: the relevance scores of the memory chunks
- """
- summary_relevance_score = np.dot(memory.e_summary, compare_to)
- chunk_relevance_scores = np.dot(memory.e_chunks, compare_to)
- logger.debug(f"Relevance of summary: {summary_relevance_score}")
- logger.debug(f"Relevance of chunks: {chunk_relevance_scores}")
-
- relevance_scores = [summary_relevance_score, *chunk_relevance_scores]
- logger.debug(f"Relevance scores: {relevance_scores}")
- return max(relevance_scores), summary_relevance_score, chunk_relevance_scores
-
- @property
- def score(self) -> float:
- """The aggregate relevance score of the memory item for the given query"""
- return max([self.summary_relevance_score, *self.chunk_relevance_scores])
-
- @property
- def most_relevant_chunk(self) -> tuple[str, float]:
- """The most relevant chunk of the memory item + its score for the given query"""
- i_relmax = np.argmax(self.chunk_relevance_scores)
- return self.memory_item.chunks[i_relmax], self.chunk_relevance_scores[i_relmax]
-
- def __str__(self):
- return (
- f"{self.memory_item.summary} ({self.summary_relevance_score}) "
- f"{self.chunk_relevance_scores}"
- )
diff --git a/autogpt/memory/vector/providers/base.py b/autogpt/memory/vector/providers/base.py
deleted file mode 100644
index aa233228f..000000000
--- a/autogpt/memory/vector/providers/base.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import abc
-import functools
-from typing import MutableSet, Sequence
-
-import numpy as np
-
-from autogpt.config.config import Config
-from autogpt.logs import logger
-
-from .. import MemoryItem, MemoryItemRelevance
-from ..utils import Embedding, get_embedding
-
-
-class VectorMemoryProvider(MutableSet[MemoryItem]):
- @abc.abstractmethod
- def __init__(self, config: Config):
- pass
-
- def get(self, query: str, config: Config) -> MemoryItemRelevance | None:
- """
- Gets the data from the memory that is most relevant to the given query.
-
- Args:
- query: The query used to retrieve information.
- config: The config Object.
-
- Returns: The most relevant Memory
- """
- result = self.get_relevant(query, 1, config)
- return result[0] if result else None
-
- def get_relevant(
- self, query: str, k: int, config: Config
- ) -> Sequence[MemoryItemRelevance]:
- """
- Returns the top-k most relevant memories for the given query
-
- Args:
- query: the query to compare stored memories to
- k: the number of relevant memories to fetch
- config: The config Object.
-
- Returns:
- list[MemoryItemRelevance] containing the top [k] relevant memories
- """
- if len(self) < 1:
- return []
-
- logger.debug(
- f"Searching for {k} relevant memories for query '{query}'; "
- f"{len(self)} memories in index"
- )
-
- relevances = self.score_memories_for_relevance(query, config)
- logger.debug(f"Memory relevance scores: {[str(r) for r in relevances]}")
-
- # take last k items and reverse
- top_k_indices = np.argsort([r.score for r in relevances])[-k:][::-1]
-
- return [relevances[i] for i in top_k_indices]
-
- def score_memories_for_relevance(
- self, for_query: str, config: Config
- ) -> Sequence[MemoryItemRelevance]:
- """
- Returns MemoryItemRelevance for every memory in the index.
- Implementations may override this function for performance purposes.
- """
- e_query: Embedding = get_embedding(for_query, config)
- return [m.relevance_for(for_query, e_query) for m in self]
-
- def get_stats(self) -> tuple[int, int]:
- """
- Returns:
- tuple (n_memories: int, n_chunks: int): the stats of the memory index
- """
- return len(self), functools.reduce(lambda t, m: t + len(m.e_chunks), self, 0)
diff --git a/autogpt/memory/vector/providers/json_file.py b/autogpt/memory/vector/providers/json_file.py
deleted file mode 100644
index b85ea8e67..000000000
--- a/autogpt/memory/vector/providers/json_file.py
+++ /dev/null
@@ -1,87 +0,0 @@
-from __future__ import annotations
-
-from pathlib import Path
-from typing import Iterator
-
-import orjson
-
-from autogpt.config import Config
-from autogpt.logs import logger
-
-from ..memory_item import MemoryItem
-from .base import VectorMemoryProvider
-
-
-class JSONFileMemory(VectorMemoryProvider):
- """Memory backend that stores memories in a JSON file"""
-
- SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
-
- file_path: Path
- memories: list[MemoryItem]
-
- def __init__(self, config: Config) -> None:
- """Initialize a class instance
-
- Args:
- config: Config object
-
- Returns:
- None
- """
- workspace_path = Path(config.workspace_path)
- self.file_path = workspace_path / f"{config.memory_index}.json"
- self.file_path.touch()
- logger.debug(
- f"Initialized {__class__.__name__} with index path {self.file_path}"
- )
-
- self.memories = []
- try:
- self.load_index()
- logger.debug(f"Loaded {len(self.memories)} MemoryItems from file")
- except Exception as e:
- logger.warn(f"Could not load MemoryItems from file: {e}")
- self.save_index()
-
- def __iter__(self) -> Iterator[MemoryItem]:
- return iter(self.memories)
-
- def __contains__(self, x: MemoryItem) -> bool:
- return x in self.memories
-
- def __len__(self) -> int:
- return len(self.memories)
-
- def add(self, item: MemoryItem):
- self.memories.append(item)
- logger.debug(f"Adding item to memory: {item.dump()}")
- self.save_index()
- return len(self.memories)
-
- def discard(self, item: MemoryItem):
- try:
- self.remove(item)
- except:
- pass
-
- def clear(self):
- """Clears the data in memory."""
- self.memories.clear()
- self.save_index()
-
- def load_index(self):
- """Loads all memories from the index file"""
- if not self.file_path.is_file():
- logger.debug(f"Index file '{self.file_path}' does not exist")
- return
- with self.file_path.open("r") as f:
- logger.debug(f"Loading memories from index file '{self.file_path}'")
- json_index = orjson.loads(f.read())
- for memory_item_dict in json_index:
- self.memories.append(MemoryItem(**memory_item_dict))
-
- def save_index(self):
- logger.debug(f"Saving memory index to file {self.file_path}")
- with self.file_path.open("wb") as f:
- return f.write(orjson.dumps(self.memories, option=self.SAVE_OPTIONS))
diff --git a/autogpt/memory/vector/utils.py b/autogpt/memory/vector/utils.py
deleted file mode 100644
index 74438f28c..000000000
--- a/autogpt/memory/vector/utils.py
+++ /dev/null
@@ -1,67 +0,0 @@
-from typing import Any, overload
-
-import numpy as np
-
-from autogpt.config import Config
-from autogpt.llm.base import TText
-from autogpt.llm.providers import openai as iopenai
-from autogpt.logs import logger
-
-Embedding = list[np.float32] | np.ndarray[Any, np.dtype[np.float32]]
-"""Embedding vector"""
-
-
-@overload
-def get_embedding(input: str | TText) -> Embedding:
- ...
-
-
-@overload
-def get_embedding(input: list[str] | list[TText]) -> list[Embedding]:
- ...
-
-
-def get_embedding(
- input: str | TText | list[str] | list[TText], config: Config
-) -> Embedding | list[Embedding]:
- """Get an embedding from the ada model.
-
- Args:
- input: Input text to get embeddings for, encoded as a string or array of tokens.
- Multiple inputs may be given as a list of strings or token arrays.
-
- Returns:
- List[float]: The embedding.
- """
- multiple = isinstance(input, list) and all(not isinstance(i, int) for i in input)
-
- if isinstance(input, str):
- input = input.replace("\n", " ")
- elif multiple and isinstance(input[0], str):
- input = [text.replace("\n", " ") for text in input]
-
- model = config.embedding_model
- if config.use_azure:
- kwargs = config.get_azure_kwargs(model)
- else:
- kwargs = {"model": model}
-
- logger.debug(
- f"Getting embedding{f's for {len(input)} inputs' if multiple else ''}"
- f" with model '{model}'"
- + (f" via Azure deployment '{kwargs['engine']}'" if config.use_azure else "")
- )
- if config.use_azure:
- breakpoint()
-
- embeddings = iopenai.create_embedding(
- input,
- **kwargs,
- api_key=config.openai_api_key,
- ).data
-
- if not multiple:
- return embeddings[0]["embedding"]
-
- embeddings = sorted(embeddings, key=lambda x: x["index"])
- return [d["embedding"] for d in embeddings]
diff --git a/autogpt/models/base_open_ai_plugin.py b/autogpt/models/base_open_ai_plugin.py
deleted file mode 100644
index c0aac8ed2..000000000
--- a/autogpt/models/base_open_ai_plugin.py
+++ /dev/null
@@ -1,249 +0,0 @@
-"""Handles loading of plugins."""
-from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar
-
-from auto_gpt_plugin_template import AutoGPTPluginTemplate
-
-PromptGenerator = TypeVar("PromptGenerator")
-
-
-class Message(TypedDict):
- role: str
- content: str
-
-
-class BaseOpenAIPlugin(AutoGPTPluginTemplate):
- """
- This is a BaseOpenAIPlugin class for generating Auto-GPT plugins.
- """
-
- def __init__(self, manifests_specs_clients: dict):
- # super().__init__()
- self._name = manifests_specs_clients["manifest"]["name_for_model"]
- self._version = manifests_specs_clients["manifest"]["schema_version"]
- self._description = manifests_specs_clients["manifest"]["description_for_model"]
- self._client = manifests_specs_clients["client"]
- self._manifest = manifests_specs_clients["manifest"]
- self._openapi_spec = manifests_specs_clients["openapi_spec"]
-
- def can_handle_on_response(self) -> bool:
- """This method is called to check that the plugin can
- handle the on_response method.
- Returns:
- bool: True if the plugin can handle the on_response method."""
- return False
-
- def on_response(self, response: str, *args, **kwargs) -> str:
- """This method is called when a response is received from the model."""
- return response
-
- def can_handle_post_prompt(self) -> bool:
- """This method is called to check that the plugin can
- handle the post_prompt method.
- Returns:
- bool: True if the plugin can handle the post_prompt method."""
- return False
-
- def post_prompt(self, prompt: PromptGenerator) -> PromptGenerator:
- """This method is called just after the generate_prompt is called,
- but actually before the prompt is generated.
- Args:
- prompt (PromptGenerator): The prompt generator.
- Returns:
- PromptGenerator: The prompt generator.
- """
- return prompt
-
- def can_handle_on_planning(self) -> bool:
- """This method is called to check that the plugin can
- handle the on_planning method.
- Returns:
- bool: True if the plugin can handle the on_planning method."""
- return False
-
- def on_planning(
- self, prompt: PromptGenerator, messages: List[Message]
- ) -> Optional[str]:
- """This method is called before the planning chat completion is done.
- Args:
- prompt (PromptGenerator): The prompt generator.
- messages (List[str]): The list of messages.
- """
-
- def can_handle_post_planning(self) -> bool:
- """This method is called to check that the plugin can
- handle the post_planning method.
- Returns:
- bool: True if the plugin can handle the post_planning method."""
- return False
-
- def post_planning(self, response: str) -> str:
- """This method is called after the planning chat completion is done.
- Args:
- response (str): The response.
- Returns:
- str: The resulting response.
- """
- return response
-
- def can_handle_pre_instruction(self) -> bool:
- """This method is called to check that the plugin can
- handle the pre_instruction method.
- Returns:
- bool: True if the plugin can handle the pre_instruction method."""
- return False
-
- def pre_instruction(self, messages: List[Message]) -> List[Message]:
- """This method is called before the instruction chat is done.
- Args:
- messages (List[Message]): The list of context messages.
- Returns:
- List[Message]: The resulting list of messages.
- """
- return messages
-
- def can_handle_on_instruction(self) -> bool:
- """This method is called to check that the plugin can
- handle the on_instruction method.
- Returns:
- bool: True if the plugin can handle the on_instruction method."""
- return False
-
- def on_instruction(self, messages: List[Message]) -> Optional[str]:
- """This method is called when the instruction chat is done.
- Args:
- messages (List[Message]): The list of context messages.
- Returns:
- Optional[str]: The resulting message.
- """
-
- def can_handle_post_instruction(self) -> bool:
- """This method is called to check that the plugin can
- handle the post_instruction method.
- Returns:
- bool: True if the plugin can handle the post_instruction method."""
- return False
-
- def post_instruction(self, response: str) -> str:
- """This method is called after the instruction chat is done.
- Args:
- response (str): The response.
- Returns:
- str: The resulting response.
- """
- return response
-
- def can_handle_pre_command(self) -> bool:
- """This method is called to check that the plugin can
- handle the pre_command method.
- Returns:
- bool: True if the plugin can handle the pre_command method."""
- return False
-
- def pre_command(
- self, command_name: str, arguments: Dict[str, Any]
- ) -> Tuple[str, Dict[str, Any]]:
- """This method is called before the command is executed.
- Args:
- command_name (str): The command name.
- arguments (Dict[str, Any]): The arguments.
- Returns:
- Tuple[str, Dict[str, Any]]: The command name and the arguments.
- """
- return command_name, arguments
-
- def can_handle_post_command(self) -> bool:
- """This method is called to check that the plugin can
- handle the post_command method.
- Returns:
- bool: True if the plugin can handle the post_command method."""
- return False
-
- def post_command(self, command_name: str, response: str) -> str:
- """This method is called after the command is executed.
- Args:
- command_name (str): The command name.
- response (str): The response.
- Returns:
- str: The resulting response.
- """
- return response
-
- def can_handle_chat_completion(
- self, messages: Dict[Any, Any], model: str, temperature: float, max_tokens: int
- ) -> bool:
- """This method is called to check that the plugin can
- handle the chat_completion method.
- Args:
- messages (List[Message]): The messages.
- model (str): The model name.
- temperature (float): The temperature.
- max_tokens (int): The max tokens.
- Returns:
- bool: True if the plugin can handle the chat_completion method."""
- return False
-
- def handle_chat_completion(
- self, messages: List[Message], model: str, temperature: float, max_tokens: int
- ) -> str:
- """This method is called when the chat completion is done.
- Args:
- messages (List[Message]): The messages.
- model (str): The model name.
- temperature (float): The temperature.
- max_tokens (int): The max tokens.
- Returns:
- str: The resulting response.
- """
-
- def can_handle_text_embedding(self, text: str) -> bool:
- """This method is called to check that the plugin can
- handle the text_embedding method.
- Args:
- text (str): The text to be convert to embedding.
- Returns:
- bool: True if the plugin can handle the text_embedding method."""
- return False
-
- def handle_text_embedding(self, text: str) -> list:
- """This method is called when the chat completion is done.
- Args:
- text (str): The text to be convert to embedding.
- Returns:
- list: The text embedding.
- """
-
- def can_handle_user_input(self, user_input: str) -> bool:
- """This method is called to check that the plugin can
- handle the user_input method.
-
- Args:
- user_input (str): The user input.
-
- Returns:
- bool: True if the plugin can handle the user_input method."""
- return False
-
- def user_input(self, user_input: str) -> str:
- """This method is called to request user input to the user.
-
- Args:
- user_input (str): The question or prompt to ask the user.
-
- Returns:
- str: The user input.
- """
-
- def can_handle_report(self) -> bool:
- """This method is called to check that the plugin can
- handle the report method.
-
- Returns:
- bool: True if the plugin can handle the report method."""
- return False
-
- def report(self, message: str) -> None:
- """This method is called to report a message to the user.
-
- Args:
- message (str): The message to report.
- """
diff --git a/autogpt/models/command.py b/autogpt/models/command.py
deleted file mode 100644
index 614697861..000000000
--- a/autogpt/models/command.py
+++ /dev/null
@@ -1,49 +0,0 @@
-from typing import Any, Callable, Optional
-
-from autogpt.config import Config
-
-from .command_parameter import CommandParameter
-
-
-class Command:
- """A class representing a command.
-
- Attributes:
- name (str): The name of the command.
- description (str): A brief description of what the command does.
- parameters (list): The parameters of the function that the command executes.
- """
-
- def __init__(
- self,
- name: str,
- description: str,
- method: Callable[..., Any],
- parameters: list[CommandParameter],
- enabled: bool | Callable[[Config], bool] = True,
- disabled_reason: Optional[str] = None,
- aliases: list[str] = [],
- ):
- self.name = name
- self.description = description
- self.method = method
- self.parameters = parameters
- self.enabled = enabled
- self.disabled_reason = disabled_reason
- self.aliases = aliases
-
- def __call__(self, *args, **kwargs) -> Any:
- if hasattr(kwargs, "config") and callable(self.enabled):
- self.enabled = self.enabled(kwargs["config"])
- if not self.enabled:
- if self.disabled_reason:
- return f"Command '{self.name}' is disabled: {self.disabled_reason}"
- return f"Command '{self.name}' is disabled"
- return self.method(*args, **kwargs)
-
- def __str__(self) -> str:
- params = [
- f"{param.name}: {param.type if param.required else f'Optional[{param.type}]'}"
- for param in self.parameters
- ]
- return f"{self.name}: {self.description}, params: ({', '.join(params)})"
diff --git a/autogpt/models/command_parameter.py b/autogpt/models/command_parameter.py
deleted file mode 100644
index ec130c875..000000000
--- a/autogpt/models/command_parameter.py
+++ /dev/null
@@ -1,12 +0,0 @@
-import dataclasses
-
-
-@dataclasses.dataclass
-class CommandParameter:
- name: str
- type: str
- description: str
- required: bool
-
- def __repr__(self):
- return f"CommandParameter('{self.name}', '{self.type}', '{self.description}', {self.required})"
diff --git a/autogpt/models/command_registry.py b/autogpt/models/command_registry.py
deleted file mode 100644
index f54f4adb5..000000000
--- a/autogpt/models/command_registry.py
+++ /dev/null
@@ -1,114 +0,0 @@
-import importlib
-import inspect
-from typing import Any
-
-from autogpt.command_decorator import AUTO_GPT_COMMAND_IDENTIFIER
-from autogpt.logs import logger
-from autogpt.models.command import Command
-
-
-class CommandRegistry:
- """
- The CommandRegistry class is a manager for a collection of Command objects.
- It allows the registration, modification, and retrieval of Command objects,
- as well as the scanning and loading of command plugins from a specified
- directory.
- """
-
- commands: dict[str, Command]
- commands_aliases: dict[str, Command]
-
- def __init__(self):
- self.commands = {}
- self.commands_aliases = {}
-
- def __contains__(self, command_name: str):
- return command_name in self.commands or command_name in self.commands_aliases
-
- def _import_module(self, module_name: str) -> Any:
- return importlib.import_module(module_name)
-
- def _reload_module(self, module: Any) -> Any:
- return importlib.reload(module)
-
- def register(self, cmd: Command) -> None:
- if cmd.name in self.commands:
- logger.warn(
- f"Command '{cmd.name}' already registered and will be overwritten!"
- )
- self.commands[cmd.name] = cmd
-
- if cmd.name in self.commands_aliases:
- logger.warn(
- f"Command '{cmd.name}' will overwrite alias with the same name of "
- f"'{self.commands_aliases[cmd.name]}'!"
- )
- for alias in cmd.aliases:
- self.commands_aliases[alias] = cmd
-
- def unregister(self, command: Command) -> None:
- if command.name in self.commands:
- del self.commands[command.name]
- for alias in command.aliases:
- del self.commands_aliases[alias]
- else:
- raise KeyError(f"Command '{command.name}' not found in registry.")
-
- def reload_commands(self) -> None:
- """Reloads all loaded command plugins."""
- for cmd_name in self.commands:
- cmd = self.commands[cmd_name]
- module = self._import_module(cmd.__module__)
- reloaded_module = self._reload_module(module)
- if hasattr(reloaded_module, "register"):
- reloaded_module.register(self)
-
- def get_command(self, name: str) -> Command | None:
- if name in self.commands:
- return self.commands[name]
-
- if name in self.commands_aliases:
- return self.commands_aliases[name]
-
- def call(self, command_name: str, **kwargs) -> Any:
- if command := self.get_command(command_name):
- return command(**kwargs)
- raise KeyError(f"Command '{command_name}' not found in registry")
-
- def command_prompt(self) -> str:
- """
- Returns a string representation of all registered `Command` objects for use in a prompt
- """
- commands_list = [
- f"{idx + 1}. {str(cmd)}" for idx, cmd in enumerate(self.commands.values())
- ]
- return "\n".join(commands_list)
-
- def import_commands(self, module_name: str) -> None:
- """
- Imports the specified Python module containing command plugins.
-
- This method imports the associated module and registers any functions or
- classes that are decorated with the `AUTO_GPT_COMMAND_IDENTIFIER` attribute
- as `Command` objects. The registered `Command` objects are then added to the
- `commands` dictionary of the `CommandRegistry` object.
-
- Args:
- module_name (str): The name of the module to import for command plugins.
- """
-
- module = importlib.import_module(module_name)
-
- for attr_name in dir(module):
- attr = getattr(module, attr_name)
- # Register decorated functions
- if hasattr(attr, AUTO_GPT_COMMAND_IDENTIFIER) and getattr(
- attr, AUTO_GPT_COMMAND_IDENTIFIER
- ):
- self.register(attr.command)
- # Register command classes
- elif (
- inspect.isclass(attr) and issubclass(attr, Command) and attr != Command
- ):
- cmd_instance = attr()
- self.register(cmd_instance)
diff --git a/autogpt/plugins/__init__.py b/autogpt/plugins/__init__.py
deleted file mode 100644
index e9b864c61..000000000
--- a/autogpt/plugins/__init__.py
+++ /dev/null
@@ -1,320 +0,0 @@
-"""Handles loading of plugins."""
-from __future__ import annotations
-
-import importlib.util
-import inspect
-import json
-import os
-import sys
-import zipfile
-from pathlib import Path
-from typing import TYPE_CHECKING, List
-from urllib.parse import urlparse
-from zipimport import zipimporter
-
-import openapi_python_client
-import requests
-from auto_gpt_plugin_template import AutoGPTPluginTemplate
-from openapi_python_client.config import Config as OpenAPIConfig
-
-if TYPE_CHECKING:
- from autogpt.config import Config
-
-from autogpt.logs import logger
-from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin
-
-DEFAULT_PLUGINS_CONFIG_FILE = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), "..", "..", "plugins_config.yaml"
-)
-
-
-def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
- """
- Inspect a zipfile for a modules.
-
- Args:
- zip_path (str): Path to the zipfile.
- debug (bool, optional): Enable debug logging. Defaults to False.
-
- Returns:
- list[str]: The list of module names found or empty list if none were found.
- """
- result = []
- with zipfile.ZipFile(zip_path, "r") as zfile:
- for name in zfile.namelist():
- if name.endswith("__init__.py") and not name.startswith("__MACOSX"):
- logger.debug(f"Found module '{name}' in the zipfile at: {name}")
- result.append(name)
- if len(result) == 0:
- logger.debug(f"Module '__init__.py' not found in the zipfile @ {zip_path}.")
- return result
-
-
-def write_dict_to_json_file(data: dict, file_path: str) -> None:
- """
- Write a dictionary to a JSON file.
- Args:
- data (dict): Dictionary to write.
- file_path (str): Path to the file.
- """
- with open(file_path, "w") as file:
- json.dump(data, file, indent=4)
-
-
-def fetch_openai_plugins_manifest_and_spec(config: Config) -> dict:
- """
- Fetch the manifest for a list of OpenAI plugins.
- Args:
- urls (List): List of URLs to fetch.
- Returns:
- dict: per url dictionary of manifest and spec.
- """
- # TODO add directory scan
- manifests = {}
- for url in config.plugins_openai:
- openai_plugin_client_dir = f"{config.plugins_dir}/openai/{urlparse(url).netloc}"
- create_directory_if_not_exists(openai_plugin_client_dir)
- if not os.path.exists(f"{openai_plugin_client_dir}/ai-plugin.json"):
- try:
- response = requests.get(f"{url}/.well-known/ai-plugin.json")
- if response.status_code == 200:
- manifest = response.json()
- if manifest["schema_version"] != "v1":
- logger.warn(
- f"Unsupported manifest version: {manifest['schem_version']} for {url}"
- )
- continue
- if manifest["api"]["type"] != "openapi":
- logger.warn(
- f"Unsupported API type: {manifest['api']['type']} for {url}"
- )
- continue
- write_dict_to_json_file(
- manifest, f"{openai_plugin_client_dir}/ai-plugin.json"
- )
- else:
- logger.warn(
- f"Failed to fetch manifest for {url}: {response.status_code}"
- )
- except requests.exceptions.RequestException as e:
- logger.warn(f"Error while requesting manifest from {url}: {e}")
- else:
- logger.info(f"Manifest for {url} already exists")
- manifest = json.load(open(f"{openai_plugin_client_dir}/ai-plugin.json"))
- if not os.path.exists(f"{openai_plugin_client_dir}/openapi.json"):
- openapi_spec = openapi_python_client._get_document(
- url=manifest["api"]["url"], path=None, timeout=5
- )
- write_dict_to_json_file(
- openapi_spec, f"{openai_plugin_client_dir}/openapi.json"
- )
- else:
- logger.info(f"OpenAPI spec for {url} already exists")
- openapi_spec = json.load(open(f"{openai_plugin_client_dir}/openapi.json"))
- manifests[url] = {"manifest": manifest, "openapi_spec": openapi_spec}
- return manifests
-
-
-def create_directory_if_not_exists(directory_path: str) -> bool:
- """
- Create a directory if it does not exist.
- Args:
- directory_path (str): Path to the directory.
- Returns:
- bool: True if the directory was created, else False.
- """
- if not os.path.exists(directory_path):
- try:
- os.makedirs(directory_path)
- logger.debug(f"Created directory: {directory_path}")
- return True
- except OSError as e:
- logger.warn(f"Error creating directory {directory_path}: {e}")
- return False
- else:
- logger.info(f"Directory {directory_path} already exists")
- return True
-
-
-def initialize_openai_plugins(
- manifests_specs: dict, config: Config, debug: bool = False
-) -> dict:
- """
- Initialize OpenAI plugins.
- Args:
- manifests_specs (dict): per url dictionary of manifest and spec.
- config (Config): Config instance including plugins config
- debug (bool, optional): Enable debug logging. Defaults to False.
- Returns:
- dict: per url dictionary of manifest, spec and client.
- """
- openai_plugins_dir = f"{config.plugins_dir}/openai"
- if create_directory_if_not_exists(openai_plugins_dir):
- for url, manifest_spec in manifests_specs.items():
- openai_plugin_client_dir = f"{openai_plugins_dir}/{urlparse(url).hostname}"
- _meta_option = (openapi_python_client.MetaType.SETUP,)
- _config = OpenAPIConfig(
- **{
- "project_name_override": "client",
- "package_name_override": "client",
- }
- )
- prev_cwd = Path.cwd()
- os.chdir(openai_plugin_client_dir)
-
- if not os.path.exists("client"):
- client_results = openapi_python_client.create_new_client(
- url=manifest_spec["manifest"]["api"]["url"],
- path=None,
- meta=_meta_option,
- config=_config,
- )
- if client_results:
- logger.warn(
- f"Error creating OpenAPI client: {client_results[0].header} \n"
- f" details: {client_results[0].detail}"
- )
- continue
- spec = importlib.util.spec_from_file_location(
- "client", "client/client/client.py"
- )
- module = importlib.util.module_from_spec(spec)
-
- try:
- spec.loader.exec_module(module)
- finally:
- os.chdir(prev_cwd)
-
- client = module.Client(base_url=url)
- manifest_spec["client"] = client
- return manifests_specs
-
-
-def instantiate_openai_plugin_clients(
- manifests_specs_clients: dict, config: Config, debug: bool = False
-) -> dict:
- """
- Instantiates BaseOpenAIPlugin instances for each OpenAI plugin.
- Args:
- manifests_specs_clients (dict): per url dictionary of manifest, spec and client.
- config (Config): Config instance including plugins config
- debug (bool, optional): Enable debug logging. Defaults to False.
- Returns:
- plugins (dict): per url dictionary of BaseOpenAIPlugin instances.
-
- """
- plugins = {}
- for url, manifest_spec_client in manifests_specs_clients.items():
- plugins[url] = BaseOpenAIPlugin(manifest_spec_client)
- return plugins
-
-
-def scan_plugins(config: Config, debug: bool = False) -> List[AutoGPTPluginTemplate]:
- """Scan the plugins directory for plugins and loads them.
-
- Args:
- config (Config): Config instance including plugins config
- debug (bool, optional): Enable debug logging. Defaults to False.
-
- Returns:
- List[Tuple[str, Path]]: List of plugins.
- """
- loaded_plugins = []
- # Generic plugins
- plugins_path = Path(config.plugins_dir)
-
- plugins_config = config.plugins_config
- # Directory-based plugins
- for plugin_path in [f.path for f in os.scandir(config.plugins_dir) if f.is_dir()]:
- # Avoid going into __pycache__ or other hidden directories
- if plugin_path.startswith("__"):
- continue
-
- plugin_module_path = plugin_path.split(os.path.sep)
- plugin_module_name = plugin_module_path[-1]
- qualified_module_name = ".".join(plugin_module_path)
-
- __import__(qualified_module_name)
- plugin = sys.modules[qualified_module_name]
-
- if not plugins_config.is_enabled(plugin_module_name):
- logger.warn(
- f"Plugin folder {plugin_module_name} found but not configured. If this is a legitimate plugin, please add it to plugins_config.yaml (key: {plugin_module_name})."
- )
- continue
-
- for _, class_obj in inspect.getmembers(plugin):
- if (
- hasattr(class_obj, "_abc_impl")
- and AutoGPTPluginTemplate in class_obj.__bases__
- ):
- loaded_plugins.append(class_obj())
-
- # Zip-based plugins
- for plugin in plugins_path.glob("*.zip"):
- if moduleList := inspect_zip_for_modules(str(plugin), debug):
- for module in moduleList:
- plugin = Path(plugin)
- module = Path(module)
- logger.debug(f"Zipped Plugin: {plugin}, Module: {module}")
- zipped_package = zipimporter(str(plugin))
- zipped_module = zipped_package.load_module(str(module.parent))
-
- for key in dir(zipped_module):
- if key.startswith("__"):
- continue
-
- a_module = getattr(zipped_module, key)
- if not inspect.isclass(a_module):
- continue
-
- if (
- issubclass(a_module, AutoGPTPluginTemplate)
- and a_module.__name__ != "AutoGPTPluginTemplate"
- ):
- plugin_name = a_module.__name__
- plugin_configured = plugins_config.get(plugin_name) is not None
- plugin_enabled = plugins_config.is_enabled(plugin_name)
-
- if plugin_configured and plugin_enabled:
- logger.debug(
- f"Loading plugin {plugin_name}. Enabled in plugins_config.yaml."
- )
- loaded_plugins.append(a_module())
- elif plugin_configured and not plugin_enabled:
- logger.debug(
- f"Not loading plugin {plugin_name}. Disabled in plugins_config.yaml."
- )
- elif not plugin_configured:
- logger.warn(
- f"Not loading plugin {plugin_name}. Key '{plugin_name}' was not found in plugins_config.yaml. "
- f"Zipped plugins should use the class name ({plugin_name}) as the key."
- )
- else:
- if a_module.__name__ != "AutoGPTPluginTemplate":
- logger.debug(
- f"Skipping '{key}' because it doesn't subclass AutoGPTPluginTemplate."
- )
-
- # OpenAI plugins
- if config.plugins_openai:
- manifests_specs = fetch_openai_plugins_manifest_and_spec(config)
- if manifests_specs.keys():
- manifests_specs_clients = initialize_openai_plugins(
- manifests_specs, config, debug
- )
- for url, openai_plugin_meta in manifests_specs_clients.items():
- if not plugins_config.is_enabled(url):
- logger.warn(
- f"OpenAI Plugin {plugin_module_name} found but not configured"
- )
- continue
-
- plugin = BaseOpenAIPlugin(openai_plugin_meta)
- loaded_plugins.append(plugin)
-
- if loaded_plugins:
- logger.info(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")
- for plugin in loaded_plugins:
- logger.info(f"{plugin._name}: {plugin._version} - {plugin._description}")
- return loaded_plugins
diff --git a/autogpt/plugins/plugins_config.py b/autogpt/plugins/plugins_config.py
deleted file mode 100644
index 13b871303..000000000
--- a/autogpt/plugins/plugins_config.py
+++ /dev/null
@@ -1,112 +0,0 @@
-from __future__ import annotations
-
-import os
-from typing import Union
-
-import yaml
-from pydantic import BaseModel
-
-from autogpt.logs import logger
-from autogpt.plugins.plugin_config import PluginConfig
-
-
-class PluginsConfig(BaseModel):
- """Class for holding configuration of all plugins"""
-
- plugins: dict[str, PluginConfig]
-
- def __repr__(self):
- return f"PluginsConfig({self.plugins})"
-
- def get(self, name: str) -> Union[PluginConfig, None]:
- return self.plugins.get(name)
-
- def is_enabled(self, name) -> bool:
- plugin_config = self.plugins.get(name)
- return plugin_config is not None and plugin_config.enabled
-
- @classmethod
- def load_config(
- cls,
- plugins_config_file: str,
- plugins_denylist: list[str],
- plugins_allowlist: list[str],
- ) -> "PluginsConfig":
- empty_config = cls(plugins={})
-
- try:
- config_data = cls.deserialize_config_file(
- plugins_config_file,
- plugins_denylist,
- plugins_allowlist,
- )
- if type(config_data) != dict:
- logger.error(
- f"Expected plugins config to be a dict, got {type(config_data)}, continuing without plugins"
- )
- return empty_config
- return cls(plugins=config_data)
-
- except BaseException as e:
- logger.error(
- f"Plugin config is invalid, continuing without plugins. Error: {e}"
- )
- return empty_config
-
- @classmethod
- def deserialize_config_file(
- cls,
- plugins_config_file: str,
- plugins_denylist: list[str],
- plugins_allowlist: list[str],
- ) -> dict[str, PluginConfig]:
- if not os.path.exists(plugins_config_file):
- logger.warn("plugins_config.yaml does not exist, creating base config.")
- cls.create_empty_plugins_config(
- plugins_config_file,
- plugins_denylist,
- plugins_allowlist,
- )
-
- with open(plugins_config_file, "r") as f:
- plugins_config = yaml.load(f, Loader=yaml.FullLoader)
-
- plugins = {}
- for name, plugin in plugins_config.items():
- if type(plugin) == dict:
- plugins[name] = PluginConfig(
- name=name,
- enabled=plugin.get("enabled", False),
- config=plugin.get("config", {}),
- )
- elif type(plugin) == PluginConfig:
- plugins[name] = plugin
- else:
- raise ValueError(f"Invalid plugin config data type: {type(plugin)}")
- return plugins
-
- @staticmethod
- def create_empty_plugins_config(
- plugins_config_file: str,
- plugins_denylist: list[str],
- plugins_allowlist: list[str],
- ):
- """Create an empty plugins_config.yaml file. Fill it with values from old env variables."""
- base_config = {}
-
- logger.debug(f"Legacy plugin denylist: {plugins_denylist}")
- logger.debug(f"Legacy plugin allowlist: {plugins_allowlist}")
-
- # Backwards-compatibility shim
- for plugin_name in plugins_denylist:
- base_config[plugin_name] = {"enabled": False, "config": {}}
-
- for plugin_name in plugins_allowlist:
- base_config[plugin_name] = {"enabled": True, "config": {}}
-
- logger.debug(f"Constructed base plugins config: {base_config}")
-
- logger.debug(f"Creating plugin config file {plugins_config_file}")
- with open(plugins_config_file, "w+") as f:
- f.write(yaml.dump(base_config))
- return base_config
diff --git a/autogpt/processing/html.py b/autogpt/processing/html.py
deleted file mode 100644
index 81387b12a..000000000
--- a/autogpt/processing/html.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""HTML processing functions"""
-from __future__ import annotations
-
-from bs4 import BeautifulSoup
-from requests.compat import urljoin
-
-
-def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]:
- """Extract hyperlinks from a BeautifulSoup object
-
- Args:
- soup (BeautifulSoup): The BeautifulSoup object
- base_url (str): The base URL
-
- Returns:
- List[Tuple[str, str]]: The extracted hyperlinks
- """
- return [
- (link.text, urljoin(base_url, link["href"]))
- for link in soup.find_all("a", href=True)
- ]
-
-
-def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]:
- """Format hyperlinks to be displayed to the user
-
- Args:
- hyperlinks (List[Tuple[str, str]]): The hyperlinks to format
-
- Returns:
- List[str]: The formatted hyperlinks
- """
- return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks]
diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py
deleted file mode 100644
index ddb64df18..000000000
--- a/autogpt/processing/text.py
+++ /dev/null
@@ -1,245 +0,0 @@
-"""Text processing functions"""
-from math import ceil
-from typing import Optional
-
-import spacy
-import tiktoken
-
-from autogpt.config import Config
-from autogpt.llm.base import ChatSequence
-from autogpt.llm.providers.openai import OPEN_AI_MODELS
-from autogpt.llm.utils import count_string_tokens, create_chat_completion
-from autogpt.logs import logger
-from autogpt.utils import batch
-
-
-def _max_chunk_length(model: str, max: Optional[int] = None) -> int:
- model_max_input_tokens = OPEN_AI_MODELS[model].max_tokens - 1
- if max is not None and max > 0:
- return min(max, model_max_input_tokens)
- return model_max_input_tokens
-
-
-def must_chunk_content(
- text: str, for_model: str, max_chunk_length: Optional[int] = None
-) -> bool:
- return count_string_tokens(text, for_model) > _max_chunk_length(
- for_model, max_chunk_length
- )
-
-
-def chunk_content(
- content: str,
- for_model: str,
- max_chunk_length: Optional[int] = None,
- with_overlap=True,
-):
- """Split content into chunks of approximately equal token length."""
-
- MAX_OVERLAP = 200 # limit overlap to save tokens
-
- if not must_chunk_content(content, for_model, max_chunk_length):
- yield content, count_string_tokens(content, for_model)
- return
-
- max_chunk_length = max_chunk_length or _max_chunk_length(for_model)
-
- tokenizer = tiktoken.encoding_for_model(for_model)
-
- tokenized_text = tokenizer.encode(content)
- total_length = len(tokenized_text)
- n_chunks = ceil(total_length / max_chunk_length)
-
- chunk_length = ceil(total_length / n_chunks)
- overlap = min(max_chunk_length - chunk_length, MAX_OVERLAP) if with_overlap else 0
-
- for token_batch in batch(tokenized_text, chunk_length + overlap, overlap):
- yield tokenizer.decode(token_batch), len(token_batch)
-
-
-def summarize_text(
- text: str,
- config: Config,
- instruction: Optional[str] = None,
- question: Optional[str] = None,
-) -> tuple[str, None | list[tuple[str, str]]]:
- """Summarize text using the OpenAI API
-
- Args:
- text (str): The text to summarize
- config (Config): The config object
- instruction (str): Additional instruction for summarization, e.g. "focus on information related to polar bears", "omit personal information contained in the text"
- question (str): Question to answer in the summary
-
- Returns:
- str: The summary of the text
- list[(summary, chunk)]: Text chunks and their summary, if the text was chunked.
- None otherwise.
- """
- if not text:
- raise ValueError("No text to summarize")
-
- if instruction and question:
- raise ValueError("Parameters 'question' and 'instructions' cannot both be set")
-
- model = config.fast_llm
-
- if question:
- instruction = (
- f'include any information that can be used to answer the question "{question}". '
- "Do not directly answer the question itself"
- )
-
- summarization_prompt = ChatSequence.for_model(model)
-
- token_length = count_string_tokens(text, model)
- logger.info(f"Text length: {token_length} tokens")
-
- # reserve 50 tokens for summary prompt, 500 for the response
- max_chunk_length = _max_chunk_length(model) - 550
- logger.info(f"Max chunk length: {max_chunk_length} tokens")
-
- if not must_chunk_content(text, model, max_chunk_length):
- # summarization_prompt.add("user", text)
- summarization_prompt.add(
- "user",
- "Write a concise summary of the following text"
- f"{f'; {instruction}' if instruction is not None else ''}:"
- "\n\n\n"
- f'LITERAL TEXT: """{text}"""'
- "\n\n\n"
- "CONCISE SUMMARY: The text is best summarized as"
- # "Only respond with a concise summary or description of the user message."
- )
-
- logger.debug(f"Summarizing with {model}:\n{summarization_prompt.dump()}\n")
- summary = create_chat_completion(
- prompt=summarization_prompt, config=config, temperature=0, max_tokens=500
- ).content
-
- logger.debug(f"\n{'-'*16} SUMMARY {'-'*17}\n{summary}\n{'-'*42}\n")
- return summary.strip(), None
-
- summaries: list[str] = []
- chunks = list(
- split_text(
- text, for_model=model, config=config, max_chunk_length=max_chunk_length
- )
- )
-
- for i, (chunk, chunk_length) in enumerate(chunks):
- logger.info(
- f"Summarizing chunk {i + 1} / {len(chunks)} of length {chunk_length} tokens"
- )
- summary, _ = summarize_text(chunk, config, instruction)
- summaries.append(summary)
-
- logger.info(f"Summarized {len(chunks)} chunks")
-
- summary, _ = summarize_text("\n\n".join(summaries), config)
-
- return summary.strip(), [
- (summaries[i], chunks[i][0]) for i in range(0, len(chunks))
- ]
-
-
-def split_text(
- text: str,
- for_model: str,
- config: Config,
- with_overlap=True,
- max_chunk_length: Optional[int] = None,
-):
- """Split text into chunks of sentences, with each chunk not exceeding the maximum length
-
- Args:
- text (str): The text to split
- for_model (str): The model to chunk for; determines tokenizer and constraints
- config (Config): The config object
- with_overlap (bool, optional): Whether to allow overlap between chunks
- max_chunk_length (int, optional): The maximum length of a chunk
-
- Yields:
- str: The next chunk of text
-
- Raises:
- ValueError: when a sentence is longer than the maximum length
- """
-
- max_length = _max_chunk_length(for_model, max_chunk_length)
-
- # flatten paragraphs to improve performance
- text = text.replace("\n", " ")
- text_length = count_string_tokens(text, for_model)
-
- if text_length < max_length:
- yield text, text_length
- return
-
- n_chunks = ceil(text_length / max_length)
- target_chunk_length = ceil(text_length / n_chunks)
-
- nlp: spacy.language.Language = spacy.load(config.browse_spacy_language_model)
- nlp.add_pipe("sentencizer")
- doc = nlp(text)
- sentences = [sentence.text.strip() for sentence in doc.sents]
-
- current_chunk: list[str] = []
- current_chunk_length = 0
- last_sentence = None
- last_sentence_length = 0
-
- i = 0
- while i < len(sentences):
- sentence = sentences[i]
- sentence_length = count_string_tokens(sentence, for_model)
- expected_chunk_length = current_chunk_length + 1 + sentence_length
-
- if (
- expected_chunk_length < max_length
- # try to create chunks of approximately equal size
- and expected_chunk_length - (sentence_length / 2) < target_chunk_length
- ):
- current_chunk.append(sentence)
- current_chunk_length = expected_chunk_length
-
- elif sentence_length < max_length:
- if last_sentence:
- yield " ".join(current_chunk), current_chunk_length
- current_chunk = []
- current_chunk_length = 0
-
- if with_overlap:
- overlap_max_length = max_length - sentence_length - 1
- if last_sentence_length < overlap_max_length:
- current_chunk += [last_sentence]
- current_chunk_length += last_sentence_length + 1
- elif overlap_max_length > 5:
- # add as much from the end of the last sentence as fits
- current_chunk += [
- list(
- chunk_content(
- last_sentence,
- for_model,
- overlap_max_length,
- )
- ).pop()[0],
- ]
- current_chunk_length += overlap_max_length + 1
-
- current_chunk += [sentence]
- current_chunk_length += sentence_length
-
- else: # sentence longer than maximum length -> chop up and try again
- sentences[i : i + 1] = [
- chunk
- for chunk, _ in chunk_content(sentence, for_model, target_chunk_length)
- ]
- continue
-
- i += 1
- last_sentence = sentence
- last_sentence_length = sentence_length
-
- if current_chunk:
- yield " ".join(current_chunk), current_chunk_length
diff --git a/autogpt/prompts/default_prompts.py b/autogpt/prompts/default_prompts.py
deleted file mode 100644
index ebbfa781c..000000000
--- a/autogpt/prompts/default_prompts.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#########################Setup.py#################################
-
-DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC = """
-Your task is to devise up to 5 highly effective goals and an appropriate role-based name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned with the successful completion of its assigned task.
-
-The user will provide the task, you will provide only the output in the exact format specified below with no explanation or conversation.
-
-Example input:
-Help me with marketing my business
-
-Example output:
-Name: CMOGPT
-Description: a professional digital marketer AI that assists Solopreneurs in growing their businesses by providing world-class expertise in solving marketing problems for SaaS, content products, agencies, and more.
-Goals:
-- Engage in effective problem-solving, prioritization, planning, and supporting execution to address your marketing needs as your virtual Chief Marketing Officer.
-
-- Provide specific, actionable, and concise advice to help you make informed decisions without the use of platitudes or overly wordy explanations.
-
-- Identify and prioritize quick wins and cost-effective campaigns that maximize results with minimal time and budget investment.
-
-- Proactively take the lead in guiding you and offering suggestions when faced with unclear information or uncertainty to ensure your marketing strategy remains on track.
-"""
-
-DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC = (
- "Task: '{{user_prompt}}'\n"
- "Respond only with the output in the exact format specified in the system prompt, with no explanation or conversation.\n"
-)
-
-DEFAULT_USER_DESIRE_PROMPT = "Write a wikipedia style article about the project: https://github.com/significant-gravitas/Auto-GPT" # Default prompt
diff --git a/autogpt/prompts/generator.py b/autogpt/prompts/generator.py
deleted file mode 100644
index bc836f30c..000000000
--- a/autogpt/prompts/generator.py
+++ /dev/null
@@ -1,180 +0,0 @@
-""" A module for generating custom prompt strings."""
-from __future__ import annotations
-
-import json
-from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, TypedDict
-
-from autogpt.config import Config
-from autogpt.json_utils.utilities import llm_response_schema
-
-if TYPE_CHECKING:
- from autogpt.models.command_registry import CommandRegistry
-
-
-class PromptGenerator:
- """
- A class for generating custom prompt strings based on constraints, commands,
- resources, and performance evaluations.
- """
-
- class Command(TypedDict):
- label: str
- name: str
- params: dict[str, str]
- function: Optional[Callable]
-
- constraints: list[str]
- commands: list[Command]
- resources: list[str]
- performance_evaluation: list[str]
- command_registry: CommandRegistry | None
-
- # TODO: replace with AIConfig
- name: str
- role: str
- goals: list[str]
-
- def __init__(self):
- self.constraints = []
- self.commands = []
- self.resources = []
- self.performance_evaluation = []
- self.command_registry = None
-
- self.name = "Bob"
- self.role = "AI"
- self.goals = []
-
- def add_constraint(self, constraint: str) -> None:
- """
- Add a constraint to the constraints list.
-
- Args:
- constraint (str): The constraint to be added.
- """
- self.constraints.append(constraint)
-
- def add_command(
- self,
- command_label: str,
- command_name: str,
- params: dict[str, str] = {},
- function: Optional[Callable] = None,
- ) -> None:
- """
- Add a command to the commands list with a label, name, and optional arguments.
-
- *Should only be used by plugins.* Native commands should be added
- directly to the CommandRegistry.
-
- Args:
- command_label (str): The label of the command.
- command_name (str): The name of the command.
- params (dict, optional): A dictionary containing argument names and their
- values. Defaults to None.
- function (callable, optional): A callable function to be called when
- the command is executed. Defaults to None.
- """
- command_params = {name: type for name, type in params.items()}
-
- command: PromptGenerator.Command = {
- "label": command_label,
- "name": command_name,
- "params": command_params,
- "function": function,
- }
-
- self.commands.append(command)
-
- def _generate_command_string(self, command: Dict[str, Any]) -> str:
- """
- Generate a formatted string representation of a command.
-
- Args:
- command (dict): A dictionary containing command information.
-
- Returns:
- str: The formatted command string.
- """
- params_string = ", ".join(
- f'"{key}": "{value}"' for key, value in command["params"].items()
- )
- return f'{command["label"]}: "{command["name"]}", params: {params_string}'
-
- def add_resource(self, resource: str) -> None:
- """
- Add a resource to the resources list.
-
- Args:
- resource (str): The resource to be added.
- """
- self.resources.append(resource)
-
- def add_performance_evaluation(self, evaluation: str) -> None:
- """
- Add a performance evaluation item to the performance_evaluation list.
-
- Args:
- evaluation (str): The evaluation item to be added.
- """
- self.performance_evaluation.append(evaluation)
-
- def _generate_numbered_list(self, items: List[Any], item_type="list") -> str:
- """
- Generate a numbered list from given items based on the item_type.
-
- Args:
- items (list): A list of items to be numbered.
- item_type (str, optional): The type of items in the list.
- Defaults to 'list'.
-
- Returns:
- str: The formatted numbered list.
- """
- if item_type == "command":
- command_strings = []
- if self.command_registry:
- command_strings += [
- str(item)
- for item in self.command_registry.commands.values()
- if item.enabled
- ]
- # terminate command is added manually
- command_strings += [self._generate_command_string(item) for item in items]
- return "\n".join(f"{i+1}. {item}" for i, item in enumerate(command_strings))
- else:
- return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
-
- def generate_prompt_string(self, config: Config) -> str:
- """
- Generate a prompt string based on the constraints, commands, resources,
- and performance evaluations.
-
- Returns:
- str: The generated prompt string.
- """
- return (
- f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n"
- f"{generate_commands(self, config)}"
- f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n"
- "Performance Evaluation:\n"
- f"{self._generate_numbered_list(self.performance_evaluation)}\n\n"
- "Respond with only valid JSON conforming to the following schema: \n"
- f"{json.dumps(llm_response_schema(config))}\n"
- )
-
-
-def generate_commands(self, config: Config) -> str:
- """
- Generate a prompt string based on the constraints, commands, resources,
- and performance evaluations.
-
- Returns:
- str: The generated prompt string.
- """
- if config.openai_functions:
- return ""
- return (
- "Commands:\n"
- f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n"
- )
diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py
deleted file mode 100644
index b5a0ec882..000000000
--- a/autogpt/prompts/prompt.py
+++ /dev/null
@@ -1,132 +0,0 @@
-from typing import Optional
-
-from colorama import Fore
-
-from autogpt.config.ai_config import AIConfig
-from autogpt.config.config import Config
-from autogpt.config.prompt_config import PromptConfig
-from autogpt.llm.api_manager import ApiManager
-from autogpt.logs import logger
-from autogpt.prompts.generator import PromptGenerator
-from autogpt.setup import prompt_user
-from autogpt.utils import clean_input
-
-DEFAULT_TRIGGERING_PROMPT = "Determine exactly one command to use, and respond using the JSON schema specified previously:"
-
-
-def build_default_prompt_generator(config: Config) -> PromptGenerator:
- """
- This function generates a prompt string that includes various constraints,
- commands, resources, and performance evaluations.
-
- Returns:
- str: The generated prompt string.
- """
-
- # Initialize the PromptGenerator object
- prompt_generator = PromptGenerator()
-
- # Initialize the PromptConfig object and load the file set in the main config (default: prompts_settings.yaml)
- prompt_config = PromptConfig(config.prompt_settings_file)
-
- # Add constraints to the PromptGenerator object
- for constraint in prompt_config.constraints:
- prompt_generator.add_constraint(constraint)
-
- # Add resources to the PromptGenerator object
- for resource in prompt_config.resources:
- prompt_generator.add_resource(resource)
-
- # Add performance evaluations to the PromptGenerator object
- for performance_evaluation in prompt_config.performance_evaluations:
- prompt_generator.add_performance_evaluation(performance_evaluation)
-
- return prompt_generator
-
-
-def construct_main_ai_config(
- config: Config,
- name: Optional[str] = None,
- role: Optional[str] = None,
- goals: tuple[str] = tuple(),
-) -> AIConfig:
- """Construct the prompt for the AI to respond to
-
- Returns:
- str: The prompt string
- """
- ai_config = AIConfig.load(config.ai_settings_file)
-
- # Apply overrides
- if name:
- ai_config.ai_name = name
- if role:
- ai_config.ai_role = role
- if goals:
- ai_config.ai_goals = list(goals)
-
- if (
- all([name, role, goals])
- or config.skip_reprompt
- and all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals])
- ):
- logger.typewriter_log("Name :", Fore.GREEN, ai_config.ai_name)
- logger.typewriter_log("Role :", Fore.GREEN, ai_config.ai_role)
- logger.typewriter_log("Goals:", Fore.GREEN, f"{ai_config.ai_goals}")
- logger.typewriter_log(
- "API Budget:",
- Fore.GREEN,
- "infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}",
- )
- elif all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals]):
- logger.typewriter_log(
- "Welcome back! ",
- Fore.GREEN,
- f"Would you like me to return to being {ai_config.ai_name}?",
- speak_text=True,
- )
- should_continue = clean_input(
- config,
- f"""Continue with the last settings?
-Name: {ai_config.ai_name}
-Role: {ai_config.ai_role}
-Goals: {ai_config.ai_goals}
-API Budget: {"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}"}
-Continue ({config.authorise_key}/{config.exit_key}): """,
- )
- if should_continue.lower() == config.exit_key:
- ai_config = AIConfig()
-
- if any([not ai_config.ai_name, not ai_config.ai_role, not ai_config.ai_goals]):
- ai_config = prompt_user(config)
- ai_config.save(config.ai_settings_file)
-
- if config.restrict_to_workspace:
- logger.typewriter_log(
- "NOTE:All files/directories created by this agent can be found inside its workspace at:",
- Fore.YELLOW,
- f"{config.workspace_path}",
- )
- # set the total api budget
- api_manager = ApiManager()
- api_manager.set_total_budget(ai_config.api_budget)
-
- # Agent Created, print message
- logger.typewriter_log(
- ai_config.ai_name,
- Fore.LIGHTBLUE_EX,
- "has been created with the following details:",
- speak_text=True,
- )
-
- # Print the ai_config details
- # Name
- logger.typewriter_log("Name:", Fore.GREEN, ai_config.ai_name, speak_text=False)
- # Role
- logger.typewriter_log("Role:", Fore.GREEN, ai_config.ai_role, speak_text=False)
- # Goals
- logger.typewriter_log("Goals:", Fore.GREEN, "", speak_text=False)
- for goal in ai_config.ai_goals:
- logger.typewriter_log("-", Fore.GREEN, goal, speak_text=False)
-
- return ai_config
diff --git a/autogpt/setup.py b/autogpt/setup.py
deleted file mode 100644
index fc4292432..000000000
--- a/autogpt/setup.py
+++ /dev/null
@@ -1,238 +0,0 @@
-"""Set up the AI and its goals"""
-import re
-from typing import Optional
-
-from colorama import Fore, Style
-from jinja2 import Template
-
-from autogpt import utils
-from autogpt.config import Config
-from autogpt.config.ai_config import AIConfig
-from autogpt.llm.base import ChatSequence, Message
-from autogpt.llm.chat import create_chat_completion
-from autogpt.logs import logger
-from autogpt.prompts.default_prompts import (
- DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC,
- DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC,
- DEFAULT_USER_DESIRE_PROMPT,
-)
-
-
-def prompt_user(
- config: Config, ai_config_template: Optional[AIConfig] = None
-) -> AIConfig:
- """Prompt the user for input
-
- Params:
- config (Config): The Config object
- ai_config_template (AIConfig): The AIConfig object to use as a template
-
- Returns:
- AIConfig: The AIConfig object tailored to the user's input
- """
-
- # Construct the prompt
- logger.typewriter_log(
- "Welcome to Auto-GPT! ",
- Fore.GREEN,
- "run with '--help' for more information.",
- speak_text=True,
- )
-
- ai_config_template_provided = ai_config_template is not None and any(
- [
- ai_config_template.ai_goals,
- ai_config_template.ai_name,
- ai_config_template.ai_role,
- ]
- )
-
- user_desire = ""
- if not ai_config_template_provided:
- # Get user desire if command line overrides have not been passed in
- logger.typewriter_log(
- "Create an AI-Assistant:",
- Fore.GREEN,
- "input '--manual' to enter manual mode.",
- speak_text=True,
- )
-
- user_desire = utils.clean_input(
- config, f"{Fore.LIGHTBLUE_EX}I want Auto-GPT to{Style.RESET_ALL}: "
- )
-
- if user_desire.strip() == "":
- user_desire = DEFAULT_USER_DESIRE_PROMPT # Default prompt
-
- # If user desire contains "--manual" or we have overridden any of the AI configuration
- if "--manual" in user_desire or ai_config_template_provided:
- logger.typewriter_log(
- "Manual Mode Selected",
- Fore.GREEN,
- speak_text=True,
- )
- return generate_aiconfig_manual(config, ai_config_template)
-
- else:
- try:
- return generate_aiconfig_automatic(user_desire, config)
- except Exception as e:
- logger.typewriter_log(
- "Unable to automatically generate AI Config based on user desire.",
- Fore.RED,
- "Falling back to manual mode.",
- speak_text=True,
- )
-
- return generate_aiconfig_manual(config)
-
-
-def generate_aiconfig_manual(
- config: Config, ai_config_template: Optional[AIConfig] = None
-) -> AIConfig:
- """
- Interactively create an AI configuration by prompting the user to provide the name, role, and goals of the AI.
-
- This function guides the user through a series of prompts to collect the necessary information to create
- an AIConfig object. The user will be asked to provide a name and role for the AI, as well as up to five
- goals. If the user does not provide a value for any of the fields, default values will be used.
-
- Params:
- config (Config): The Config object
- ai_config_template (AIConfig): The AIConfig object to use as a template
-
- Returns:
- AIConfig: An AIConfig object containing the user-defined or default AI name, role, and goals.
- """
-
- # Manual Setup Intro
- logger.typewriter_log(
- "Create an AI-Assistant:",
- Fore.GREEN,
- "Enter the name of your AI and its role below. Entering nothing will load"
- " defaults.",
- speak_text=True,
- )
-
- if ai_config_template and ai_config_template.ai_name:
- ai_name = ai_config_template.ai_name
- else:
- ai_name = ""
- # Get AI Name from User
- logger.typewriter_log(
- "Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'"
- )
- ai_name = utils.clean_input(config, "AI Name: ")
- if ai_name == "":
- ai_name = "Entrepreneur-GPT"
-
- logger.typewriter_log(
- f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True
- )
-
- if ai_config_template and ai_config_template.ai_role:
- ai_role = ai_config_template.ai_role
- else:
- # Get AI Role from User
- logger.typewriter_log(
- "Describe your AI's role: ",
- Fore.GREEN,
- "For example, 'an AI designed to autonomously develop and run businesses with"
- " the sole goal of increasing your net worth.'",
- )
- ai_role = utils.clean_input(config, f"{ai_name} is: ")
- if ai_role == "":
- ai_role = "an AI designed to autonomously develop and run businesses with the"
- " sole goal of increasing your net worth."
-
- if ai_config_template and ai_config_template.ai_goals:
- ai_goals = ai_config_template.ai_goals
- else:
- # Enter up to 5 goals for the AI
- logger.typewriter_log(
- "Enter up to 5 goals for your AI: ",
- Fore.GREEN,
- "For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
- " multiple businesses autonomously'",
- )
- logger.info("Enter nothing to load defaults, enter nothing when finished.")
- ai_goals = []
- for i in range(5):
- ai_goal = utils.clean_input(
- config, f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: "
- )
- if ai_goal == "":
- break
- ai_goals.append(ai_goal)
- if not ai_goals:
- ai_goals = [
- "Increase net worth",
- "Grow Twitter Account",
- "Develop and manage multiple businesses autonomously",
- ]
-
- # Get API Budget from User
- logger.typewriter_log(
- "Enter your budget for API calls: ",
- Fore.GREEN,
- "For example: $1.50",
- )
- logger.info("Enter nothing to let the AI run without monetary limit")
- api_budget_input = utils.clean_input(
- config, f"{Fore.LIGHTBLUE_EX}Budget{Style.RESET_ALL}: $"
- )
- if api_budget_input == "":
- api_budget = 0.0
- else:
- try:
- api_budget = float(api_budget_input.replace("$", ""))
- except ValueError:
- logger.typewriter_log(
- "Invalid budget input. Setting budget to unlimited.", Fore.RED
- )
- api_budget = 0.0
-
- return AIConfig(ai_name, ai_role, ai_goals, api_budget)
-
-
-def generate_aiconfig_automatic(user_prompt: str, config: Config) -> AIConfig:
- """Generates an AIConfig object from the given string.
-
- Returns:
- AIConfig: The AIConfig object tailored to the user's input
- """
-
- system_prompt = DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC
- prompt_ai_config_automatic = Template(
- DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC
- ).render(user_prompt=user_prompt)
- # Call LLM with the string as user input
- output = create_chat_completion(
- ChatSequence.for_model(
- config.fast_llm,
- [
- Message("system", system_prompt),
- Message("user", prompt_ai_config_automatic),
- ],
- ),
- config,
- ).content
-
- # Debug LLM Output
- logger.debug(f"AI Config Generator Raw Output: {output}")
-
- # Parse the output
- ai_name = re.search(r"Name(?:\s*):(?:\s*)(.*)", output, re.IGNORECASE).group(1)
- ai_role = (
- re.search(
- r"Description(?:\s*):(?:\s*)(.*?)(?:(?:\n)|Goals)",
- output,
- re.IGNORECASE | re.DOTALL,
- )
- .group(1)
- .strip()
- )
- ai_goals = re.findall(r"(?<=\n)-\s*(.*)", output)
- api_budget = 0.0 # TODO: parse api budget using a regular expression
-
- return AIConfig(ai_name, ai_role, ai_goals, api_budget)
diff --git a/autogpt/singleton.py b/autogpt/singleton.py
deleted file mode 100644
index b3a5af529..000000000
--- a/autogpt/singleton.py
+++ /dev/null
@@ -1,22 +0,0 @@
-"""The singleton metaclass for ensuring only one instance of a class."""
-import abc
-
-
-class Singleton(abc.ABCMeta, type):
- """
- Singleton metaclass for ensuring only one instance of a class.
- """
-
- _instances = {}
-
- def __call__(cls, *args, **kwargs):
- """Call method for the singleton metaclass."""
- if cls not in cls._instances:
- cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
- return cls._instances[cls]
-
-
-class AbstractSingleton(abc.ABC, metaclass=Singleton):
- """
- Abstract singleton class for ensuring only one instance of a class.
- """
diff --git a/autogpt/speech/__init__.py b/autogpt/speech/__init__.py
deleted file mode 100644
index 2ff0d2bf4..000000000
--- a/autogpt/speech/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-"""This module contains the speech recognition and speech synthesis functions."""
-from autogpt.speech.say import say_text
-
-__all__ = ["say_text"]
diff --git a/autogpt/speech/base.py b/autogpt/speech/base.py
deleted file mode 100644
index b3dd03bdb..000000000
--- a/autogpt/speech/base.py
+++ /dev/null
@@ -1,60 +0,0 @@
-"""Base class for all voice classes."""
-from __future__ import annotations
-
-import abc
-import re
-from threading import Lock
-from typing import TYPE_CHECKING
-
-if TYPE_CHECKING:
- from autogpt.config import Config
-
-from autogpt.singleton import AbstractSingleton
-
-
-class VoiceBase(AbstractSingleton):
- """
- Base class for all voice classes.
- """
-
- def __init__(self, config: Config):
- """
- Initialize the voice class.
- """
- self._url = None
- self._headers = None
- self._api_key = None
- self._voices = []
- self._mutex = Lock()
- self._setup(config)
-
- def say(self, text: str, voice_index: int = 0) -> bool:
- """
- Say the given text.
-
- Args:
- text (str): The text to say.
- voice_index (int): The index of the voice to use.
- """
- text = re.sub(
- r"\b(?:https?://[-\w_.]+/?\w[-\w_.]*\.(?:[-\w_.]+/?\w[-\w_.]*\.)?[a-z]+(?:/[-\w_.%]+)*\b(?!\.))",
- "",
- text,
- )
- with self._mutex:
- return self._speech(text, voice_index)
-
- @abc.abstractmethod
- def _setup(self, config: Config) -> None:
- """
- Setup the voices, API key, etc.
- """
-
- @abc.abstractmethod
- def _speech(self, text: str, voice_index: int = 0) -> bool:
- """
- Play the given text.
-
- Args:
- text (str): The text to play.
- """
diff --git a/autogpt/speech/eleven_labs.py b/autogpt/speech/eleven_labs.py
deleted file mode 100644
index 815f698c6..000000000
--- a/autogpt/speech/eleven_labs.py
+++ /dev/null
@@ -1,91 +0,0 @@
-"""ElevenLabs speech module"""
-from __future__ import annotations
-
-import os
-from typing import TYPE_CHECKING
-
-import requests
-from playsound import playsound
-
-if TYPE_CHECKING:
- from autogpt.config import Config
-from .base import VoiceBase
-
-PLACEHOLDERS = {"your-voice-id"}
-
-
-class ElevenLabsSpeech(VoiceBase):
- """ElevenLabs speech class"""
-
- def _setup(self, config: Config) -> None:
- """Set up the voices, API key, etc.
-
- Returns:
- None: None
- """
-
- default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
- voice_options = {
- "Rachel": "21m00Tcm4TlvDq8ikWAM",
- "Domi": "AZnzlk1XvdvUeBnXmlld",
- "Bella": "EXAVITQu4vr4xnSDxMaL",
- "Antoni": "ErXwobaYiN019PkySvjV",
- "Elli": "MF3mGyEYCl7XYWbV9V6O",
- "Josh": "TxGEqnHWrfWFTfGW9XjX",
- "Arnold": "VR6AewLTigWG4xSOukaG",
- "Adam": "pNInz6obpgDQGcFmaJgB",
- "Sam": "yoZ06aMxZJJ28mfd3POQ",
- }
- self._headers = {
- "Content-Type": "application/json",
- "xi-api-key": config.elevenlabs_api_key,
- }
- self._voices = default_voices.copy()
- if config.elevenlabs_voice_id in voice_options:
- config.elevenlabs_voice_id = voice_options[config.elevenlabs_voice_id]
- if config.elevenlabs_voice_2_id in voice_options:
- config.elevenlabs_voice_2_id = voice_options[config.elevenlabs_voice_2_id]
- self._use_custom_voice(config.elevenlabs_voice_id, 0)
- self._use_custom_voice(config.elevenlabs_voice_2_id, 1)
-
- def _use_custom_voice(self, voice, voice_index) -> None:
- """Use a custom voice if provided and not a placeholder
-
- Args:
- voice (str): The voice ID
- voice_index (int): The voice index
-
- Returns:
- None: None
- """
- # Placeholder values that should be treated as empty
- if voice and voice not in PLACEHOLDERS:
- self._voices[voice_index] = voice
-
- def _speech(self, text: str, voice_index: int = 0) -> bool:
- """Speak text using elevenlabs.io's API
-
- Args:
- text (str): The text to speak
- voice_index (int, optional): The voice to use. Defaults to 0.
-
- Returns:
- bool: True if the request was successful, False otherwise
- """
- from autogpt.logs import logger
-
- tts_url = (
- f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}"
- )
- response = requests.post(tts_url, headers=self._headers, json={"text": text})
-
- if response.status_code == 200:
- with open("speech.mpeg", "wb") as f:
- f.write(response.content)
- playsound("speech.mpeg", True)
- os.remove("speech.mpeg")
- return True
- else:
- logger.warn("Request failed with status code:", response.status_code)
- logger.info("Response content:", response.content)
- return False
diff --git a/autogpt/speech/gtts.py b/autogpt/speech/gtts.py
deleted file mode 100644
index deef4f270..000000000
--- a/autogpt/speech/gtts.py
+++ /dev/null
@@ -1,23 +0,0 @@
-""" GTTS Voice. """
-import os
-
-import gtts
-from playsound import playsound
-
-from autogpt.config import Config
-from autogpt.speech.base import VoiceBase
-
-
-class GTTSVoice(VoiceBase):
- """GTTS Voice."""
-
- def _setup(self, config: Config) -> None:
- pass
-
- def _speech(self, text: str, _: int = 0) -> bool:
- """Play the given text."""
- tts = gtts.gTTS(text)
- tts.save("speech.mp3")
- playsound("speech.mp3", True)
- os.remove("speech.mp3")
- return True
diff --git a/autogpt/speech/macos_tts.py b/autogpt/speech/macos_tts.py
deleted file mode 100644
index e48522cf9..000000000
--- a/autogpt/speech/macos_tts.py
+++ /dev/null
@@ -1,22 +0,0 @@
-""" MacOS TTS Voice. """
-import os
-
-from autogpt.config import Config
-from autogpt.speech.base import VoiceBase
-
-
-class MacOSTTS(VoiceBase):
- """MacOS TTS Voice."""
-
- def _setup(self, config: Config) -> None:
- pass
-
- def _speech(self, text: str, voice_index: int = 0) -> bool:
- """Play the given text."""
- if voice_index == 0:
- os.system(f'say "{text}"')
- elif voice_index == 1:
- os.system(f'say -v "Ava (Premium)" "{text}"')
- else:
- os.system(f'say -v Samantha "{text}"')
- return True
diff --git a/autogpt/speech/say.py b/autogpt/speech/say.py
deleted file mode 100644
index 5d04c76f9..000000000
--- a/autogpt/speech/say.py
+++ /dev/null
@@ -1,50 +0,0 @@
-""" Text to speech module """
-from __future__ import annotations
-
-import threading
-from threading import Semaphore
-from typing import TYPE_CHECKING
-
-if TYPE_CHECKING:
- from autogpt.config import Config
-
-from .base import VoiceBase
-from .eleven_labs import ElevenLabsSpeech
-from .gtts import GTTSVoice
-from .macos_tts import MacOSTTS
-from .stream_elements_speech import StreamElementsSpeech
-
-_QUEUE_SEMAPHORE = Semaphore(
- 1
-) # The amount of sounds to queue before blocking the main thread
-
-
-def say_text(text: str, config: Config, voice_index: int = 0) -> None:
- """Speak the given text using the given voice index"""
- default_voice_engine, voice_engine = _get_voice_engine(config)
-
- def speak() -> None:
- success = voice_engine.say(text, voice_index)
- if not success:
- default_voice_engine.say(text)
-
- _QUEUE_SEMAPHORE.release()
-
- _QUEUE_SEMAPHORE.acquire(True)
- thread = threading.Thread(target=speak)
- thread.start()
-
-
-def _get_voice_engine(config: Config) -> tuple[VoiceBase, VoiceBase]:
- """Get the voice engine to use for the given configuration"""
- tts_provider = config.text_to_speech_provider
- if tts_provider == "elevenlabs":
- voice_engine = ElevenLabsSpeech(config)
- elif tts_provider == "macos":
- voice_engine = MacOSTTS(config)
- elif tts_provider == "streamelements":
- voice_engine = StreamElementsSpeech(config)
- else:
- voice_engine = GTTSVoice(config)
-
- return GTTSVoice(config), voice_engine
diff --git a/autogpt/speech/stream_elements_speech.py b/autogpt/speech/stream_elements_speech.py
deleted file mode 100644
index e4e4e8bfe..000000000
--- a/autogpt/speech/stream_elements_speech.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import logging
-import os
-
-import requests
-from playsound import playsound
-
-from autogpt.config import Config
-from autogpt.speech.base import VoiceBase
-
-
-class StreamElementsSpeech(VoiceBase):
- """Streamelements speech module for autogpt"""
-
- def _setup(self, config: Config) -> None:
- """Setup the voices, API key, etc."""
-
- def _speech(self, text: str, voice: str, _: int = 0) -> bool:
- """Speak text using the streamelements API
-
- Args:
- text (str): The text to speak
- voice (str): The voice to use
-
- Returns:
- bool: True if the request was successful, False otherwise
- """
- tts_url = (
- f"https://api.streamelements.com/kappa/v2/speech?voice={voice}&text={text}"
- )
- response = requests.get(tts_url)
-
- if response.status_code == 200:
- with open("speech.mp3", "wb") as f:
- f.write(response.content)
- playsound("speech.mp3")
- os.remove("speech.mp3")
- return True
- else:
- logging.error(
- "Request failed with status code: %s, response content: %s",
- response.status_code,
- response.content,
- )
- return False
diff --git a/autogpt/spinner.py b/autogpt/spinner.py
deleted file mode 100644
index 491e7e8d3..000000000
--- a/autogpt/spinner.py
+++ /dev/null
@@ -1,76 +0,0 @@
-"""A simple spinner module"""
-import itertools
-import sys
-import threading
-import time
-
-
-class Spinner:
- """A simple spinner class"""
-
- def __init__(
- self,
- message: str = "Loading...",
- delay: float = 0.1,
- plain_output: bool = False,
- ) -> None:
- """Initialize the spinner class
-
- Args:
- message (str): The message to display.
- delay (float): The delay between each spinner update.
- plain_output (bool): Whether to display the spinner or not.
- """
- self.plain_output = plain_output
- self.spinner = itertools.cycle(["-", "/", "|", "\\"])
- self.delay = delay
- self.message = message
- self.running = False
- self.spinner_thread = None
-
- def spin(self) -> None:
- """Spin the spinner"""
- if self.plain_output:
- self.print_message()
- return
- while self.running:
- self.print_message()
- time.sleep(self.delay)
-
- def print_message(self):
- sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
- sys.stdout.write(f"{next(self.spinner)} {self.message}\r")
- sys.stdout.flush()
-
- def __enter__(self):
- """Start the spinner"""
- self.running = True
- self.spinner_thread = threading.Thread(target=self.spin)
- self.spinner_thread.start()
-
- return self
-
- def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
- """Stop the spinner
-
- Args:
- exc_type (Exception): The exception type.
- exc_value (Exception): The exception value.
- exc_traceback (Exception): The exception traceback.
- """
- self.running = False
- if self.spinner_thread is not None:
- self.spinner_thread.join()
- sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
- sys.stdout.flush()
-
- def update_message(self, new_message, delay=0.1):
- """Update the spinner message
- Args:
- new_message (str): New message to display.
- delay (float): The delay in seconds between each spinner update.
- """
- self.delay = delay
- self.message = new_message
- if self.plain_output:
- self.print_message()
diff --git a/autogpt/url_utils/validators.py b/autogpt/url_utils/validators.py
deleted file mode 100644
index 7580774ba..000000000
--- a/autogpt/url_utils/validators.py
+++ /dev/null
@@ -1,107 +0,0 @@
-import functools
-import re
-from typing import Any, Callable
-from urllib.parse import urljoin, urlparse
-
-from requests.compat import urljoin
-
-
-def validate_url(func: Callable[..., Any]) -> Any:
- """The method decorator validate_url is used to validate urls for any command that requires
- a url as an argument"""
-
- @functools.wraps(func)
- def wrapper(url: str, *args, **kwargs) -> Any:
- """Check if the URL is valid using a basic check, urllib check, and local file check
-
- Args:
- url (str): The URL to check
-
- Returns:
- the result of the wrapped function
-
- Raises:
- ValueError if the url fails any of the validation tests
- """
- # Most basic check if the URL is valid:
- if not re.match(r"^https?://", url):
- raise ValueError("Invalid URL format")
- if not is_valid_url(url):
- raise ValueError("Missing Scheme or Network location")
- # Restrict access to local files
- if check_local_file_access(url):
- raise ValueError("Access to local files is restricted")
- # Check URL length
- if len(url) > 2000:
- raise ValueError("URL is too long")
-
- return func(sanitize_url(url), *args, **kwargs)
-
- return wrapper
-
-
-def is_valid_url(url: str) -> bool:
- """Check if the URL is valid
-
- Args:
- url (str): The URL to check
-
- Returns:
- bool: True if the URL is valid, False otherwise
- """
- try:
- result = urlparse(url)
- return all([result.scheme, result.netloc])
- except ValueError:
- return False
-
-
-def sanitize_url(url: str) -> str:
- """Sanitize the URL
-
- Args:
- url (str): The URL to sanitize
-
- Returns:
- str: The sanitized URL
- """
- parsed_url = urlparse(url)
- reconstructed_url = f"{parsed_url.path}{parsed_url.params}?{parsed_url.query}"
- return urljoin(url, reconstructed_url)
-
-
-def check_local_file_access(url: str) -> bool:
- """Check if the URL is a local file
-
- Args:
- url (str): The URL to check
-
- Returns:
- bool: True if the URL is a local file, False otherwise
- """
- local_prefixes = [
- "file:///",
- "file://localhost/",
- "file://localhost",
- "http://localhost",
- "http://localhost/",
- "https://localhost",
- "https://localhost/",
- "http://2130706433",
- "http://2130706433/",
- "https://2130706433",
- "https://2130706433/",
- "http://127.0.0.1/",
- "http://127.0.0.1",
- "https://127.0.0.1/",
- "https://127.0.0.1",
- "https://0.0.0.0/",
- "https://0.0.0.0",
- "http://0.0.0.0/",
- "http://0.0.0.0",
- "http://0000",
- "http://0000/",
- "https://0000",
- "https://0000/",
- ]
- return any(url.startswith(prefix) for prefix in local_prefixes)
diff --git a/autogpt/utils.py b/autogpt/utils.py
deleted file mode 100644
index 9eb6cbe4b..000000000
--- a/autogpt/utils.py
+++ /dev/null
@@ -1,181 +0,0 @@
-import os
-import re
-
-import requests
-import yaml
-from colorama import Fore, Style
-from git.repo import Repo
-from prompt_toolkit import ANSI, PromptSession
-from prompt_toolkit.history import InMemoryHistory
-
-from autogpt.config import Config
-from autogpt.logs import logger
-
-session = PromptSession(history=InMemoryHistory())
-
-
-def batch(iterable, max_batch_length: int, overlap: int = 0):
- """Batch data from iterable into slices of length N. The last batch may be shorter."""
- # batched('ABCDEFG', 3) --> ABC DEF G
- if max_batch_length < 1:
- raise ValueError("n must be at least one")
- for i in range(0, len(iterable), max_batch_length - overlap):
- yield iterable[i : i + max_batch_length]
-
-
-def clean_input(config: Config, prompt: str = "", talk=False):
- try:
- if config.chat_messages_enabled:
- for plugin in config.plugins:
- if not hasattr(plugin, "can_handle_user_input"):
- continue
- if not plugin.can_handle_user_input(user_input=prompt):
- continue
- plugin_response = plugin.user_input(user_input=prompt)
- if not plugin_response:
- continue
- if plugin_response.lower() in [
- "yes",
- "yeah",
- "y",
- "ok",
- "okay",
- "sure",
- "alright",
- ]:
- return config.authorise_key
- elif plugin_response.lower() in [
- "no",
- "nope",
- "n",
- "negative",
- ]:
- return config.exit_key
- return plugin_response
-
- # ask for input, default when just pressing Enter is y
- logger.info("Asking user via keyboard...")
- answer = session.prompt(ANSI(prompt))
- return answer
- except KeyboardInterrupt:
- logger.info("You interrupted Auto-GPT")
- logger.info("Quitting...")
- exit(0)
-
-
-def validate_yaml_file(file: str):
- try:
- with open(file, encoding="utf-8") as fp:
- yaml.load(fp.read(), Loader=yaml.FullLoader)
- except FileNotFoundError:
- return (False, f"The file {Fore.CYAN}`{file}`{Fore.RESET} wasn't found")
- except yaml.YAMLError as e:
- return (
- False,
- f"There was an issue while trying to read with your AI Settings file: {e}",
- )
-
- return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!")
-
-
-def readable_file_size(size, decimal_places=2):
- """Converts the given size in bytes to a readable format.
- Args:
- size: Size in bytes
- decimal_places (int): Number of decimal places to display
- """
- for unit in ["B", "KB", "MB", "GB", "TB"]:
- if size < 1024.0:
- break
- size /= 1024.0
- return f"{size:.{decimal_places}f} {unit}"
-
-
-def get_bulletin_from_web():
- try:
- response = requests.get(
- "https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md"
- )
- if response.status_code == 200:
- return response.text
- except requests.exceptions.RequestException:
- pass
-
- return ""
-
-
-def get_current_git_branch() -> str:
- try:
- repo = Repo(search_parent_directories=True)
- branch = repo.active_branch
- return branch.name
- except:
- return ""
-
-
-def get_latest_bulletin() -> tuple[str, bool]:
- exists = os.path.exists("data/CURRENT_BULLETIN.md")
- current_bulletin = ""
- if exists:
- current_bulletin = open(
- "data/CURRENT_BULLETIN.md", "r", encoding="utf-8"
- ).read()
- new_bulletin = get_bulletin_from_web()
- is_new_news = new_bulletin != "" and new_bulletin != current_bulletin
-
- news_header = Fore.YELLOW + "Welcome to Auto-GPT!\n"
- if new_bulletin or current_bulletin:
- news_header += (
- "Below you'll find the latest Auto-GPT News and updates regarding features!\n"
- "If you don't wish to see this message, you "
- "can run Auto-GPT with the *--skip-news* flag.\n"
- )
-
- if new_bulletin and is_new_news:
- open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
- current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}"
-
- return f"{news_header}\n{current_bulletin}", is_new_news
-
-
-def markdown_to_ansi_style(markdown: str):
- ansi_lines: list[str] = []
- for line in markdown.split("\n"):
- line_style = ""
-
- if line.startswith("# "):
- line_style += Style.BRIGHT
- else:
- line = re.sub(
- r"(?<!\*)\*(\*?[^*]+\*?)\*(?!\*)",
- rf"{Style.BRIGHT}\1{Style.NORMAL}",
- line,
- )
-
- if re.match(r"^#+ ", line) is not None:
- line_style += Fore.CYAN
- line = re.sub(r"^#+ ", "", line)
-
- ansi_lines.append(f"{line_style}{line}{Style.RESET_ALL}")
- return "\n".join(ansi_lines)
-
-
-def get_legal_warning() -> str:
- legal_text = """
-## DISCLAIMER AND INDEMNIFICATION AGREEMENT
-### PLEASE READ THIS DISCLAIMER AND INDEMNIFICATION AGREEMENT CAREFULLY BEFORE USING THE AUTOGPT SYSTEM. BY USING THE AUTOGPT SYSTEM, YOU AGREE TO BE BOUND BY THIS AGREEMENT.
-
-## Introduction
-AutoGPT (the "System") is a project that connects a GPT-like artificial intelligence system to the internet and allows it to automate tasks. While the System is designed to be useful and efficient, there may be instances where the System could perform actions that may cause harm or have unintended consequences.
-
-## No Liability for Actions of the System
-The developers, contributors, and maintainers of the AutoGPT project (collectively, the "Project Parties") make no warranties or representations, express or implied, about the System's performance, accuracy, reliability, or safety. By using the System, you understand and agree that the Project Parties shall not be liable for any actions taken by the System or any consequences resulting from such actions.
-
-## User Responsibility and Respondeat Superior Liability
-As a user of the System, you are responsible for supervising and monitoring the actions of the System while it is operating on your
-behalf. You acknowledge that using the System could expose you to potential liability including but not limited to respondeat superior and you agree to assume all risks and liabilities associated with such potential liability.
-
-## Indemnification
-By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences.
- """
- return legal_text
diff --git a/autogpt/workspace/__init__.py b/autogpt/workspace/__init__.py
deleted file mode 100644
index b348144b7..000000000
--- a/autogpt/workspace/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from autogpt.workspace.workspace import Workspace
-
-__all__ = [
- "Workspace",
-]
diff --git a/autogpt/workspace/workspace.py b/autogpt/workspace/workspace.py
deleted file mode 100644
index 6d90f8540..000000000
--- a/autogpt/workspace/workspace.py
+++ /dev/null
@@ -1,162 +0,0 @@
-"""
-=========
-Workspace
-=========
-
-The workspace is a directory containing configuration and working files for an AutoGPT
-agent.
-
-"""
-from __future__ import annotations
-
-from pathlib import Path
-from typing import Optional
-
-from autogpt.config import Config
-from autogpt.logs import logger
-
-
-class Workspace:
- """A class that represents a workspace for an AutoGPT agent."""
-
- NULL_BYTES = ["\0", "\000", "\x00", r"\z", "\u0000", "%00"]
-
- def __init__(self, workspace_root: str | Path, restrict_to_workspace: bool):
- self._root = self._sanitize_path(workspace_root)
- self._restrict_to_workspace = restrict_to_workspace
-
- @property
- def root(self) -> Path:
- """The root directory of the workspace."""
- return self._root
-
- @property
- def restrict_to_workspace(self):
- """Whether to restrict generated paths to the workspace."""
- return self._restrict_to_workspace
-
- @classmethod
- def make_workspace(cls, workspace_directory: str | Path, *args, **kwargs) -> Path:
- """Create a workspace directory and return the path to it.
-
- Parameters
- ----------
- workspace_directory
- The path to the workspace directory.
-
- Returns
- -------
- Path
- The path to the workspace directory.
-
- """
- # TODO: have this make the env file and ai settings file in the directory.
- workspace_directory = cls._sanitize_path(workspace_directory)
- workspace_directory.mkdir(exist_ok=True, parents=True)
- return workspace_directory
-
- def get_path(self, relative_path: str | Path) -> Path:
- """Get the full path for an item in the workspace.
-
- Parameters
- ----------
- relative_path
- The relative path to resolve in the workspace.
-
- Returns
- -------
- Path
- The resolved path relative to the workspace.
-
- """
- return self._sanitize_path(
- relative_path,
- root=self.root,
- restrict_to_root=self.restrict_to_workspace,
- )
-
- @staticmethod
- def _sanitize_path(
- relative_path: str | Path,
- root: Optional[str | Path] = None,
- restrict_to_root: bool = True,
- ) -> Path:
- """Resolve the relative path within the given root if possible.
-
- Parameters
- ----------
- relative_path
- The relative path to resolve.
- root
- The root path to resolve the relative path within.
- restrict_to_root
- Whether to restrict the path to the root.
-
- Returns
- -------
- Path
- The resolved path.
-
- Raises
- ------
- ValueError
- If the path is absolute and a root is provided.
- ValueError
- If the path is outside the root and the root is restricted.
-
- """
-
- # Posix systems disallow null bytes in paths. Windows is agnostic about it.
- # Do an explicit check here for all sorts of null byte representations.
-
- for null_byte in Workspace.NULL_BYTES:
- if null_byte in str(relative_path) or null_byte in str(root):
- raise ValueError("embedded null byte")
-
- if root is None:
- return Path(relative_path).resolve()
-
- logger.debug(f"Resolving path '{relative_path}' in workspace '{root}'")
-
- root, relative_path = Path(root).resolve(), Path(relative_path)
-
- logger.debug(f"Resolved root as '{root}'")
-
- # Allow exception for absolute paths if they are contained in your workspace directory.
- if relative_path.is_absolute() and not relative_path.is_relative_to(root):
- raise ValueError(
- f"Attempted to access absolute path '{relative_path}' in workspace '{root}'."
- )
-
- full_path = root.joinpath(relative_path).resolve()
-
- logger.debug(f"Joined paths as '{full_path}'")
-
- if restrict_to_root and not full_path.is_relative_to(root):
- raise ValueError(
- f"Attempted to access path '{full_path}' outside of workspace '{root}'."
- )
-
- return full_path
-
- @staticmethod
- def build_file_logger_path(config: Config, workspace_directory: Path):
- file_logger_path = workspace_directory / "file_logger.txt"
- if not file_logger_path.exists():
- with file_logger_path.open(mode="w", encoding="utf-8") as f:
- f.write("File Operation Logger ")
- config.file_logger_path = str(file_logger_path)
-
- @staticmethod
- def get_workspace_directory(
- config: Config, workspace_directory: Optional[str | Path] = None
- ):
- if workspace_directory is None:
- workspace_directory = Path(__file__).parent / "auto_gpt_workspace"
- elif type(workspace_directory) == str:
- workspace_directory = Path(workspace_directory)
- # TODO: pass in the ai_settings file and the env file and have them cloned into
- # the workspace directory so we can bind them to the agent.
- workspace_directory = Workspace.make_workspace(workspace_directory)
- config.workspace_path = str(workspace_directory)
- return workspace_directory
diff --git a/.coveragerc b/autogpts/autogpt/.coveragerc
index 059865772..059865772 100644
--- a/.coveragerc
+++ b/autogpts/autogpt/.coveragerc
diff --git a/autogpts/autogpt/.devcontainer/Dockerfile b/autogpts/autogpt/.devcontainer/Dockerfile
new file mode 100644
index 000000000..17b1e73f4
--- /dev/null
+++ b/autogpts/autogpt/.devcontainer/Dockerfile
@@ -0,0 +1,13 @@
+# Use an official Python base image from the Docker Hub
+FROM python:3.10
+
+# Install browsers
+RUN apt-get update && apt-get install -y \
+ chromium-driver firefox-esr \
+ ca-certificates
+
+# Install utilities
+RUN apt-get install -y curl jq wget git
+
+# Declare working directory
+WORKDIR /workspace/AutoGPT
diff --git a/autogpts/autogpt/.devcontainer/devcontainer.json b/autogpts/autogpt/.devcontainer/devcontainer.json
new file mode 100644
index 000000000..d66c5ffa5
--- /dev/null
+++ b/autogpts/autogpt/.devcontainer/devcontainer.json
@@ -0,0 +1,56 @@
+{
+ "dockerComposeFile": "./docker-compose.yml",
+ "service": "auto-gpt",
+ "workspaceFolder": "/workspace/AutoGPT",
+ "shutdownAction": "stopCompose",
+ "features": {
+ "ghcr.io/devcontainers/features/common-utils:2": {
+ "installZsh": "true",
+ "username": "vscode",
+ "userUid": "1000",
+ "userGid": "1000",
+ "upgradePackages": "true"
+ },
+ "ghcr.io/devcontainers/features/desktop-lite:1": {},
+ "ghcr.io/devcontainers/features/github-cli:1": {},
+ "ghcr.io/devcontainers/features/python:1": "none",
+ "ghcr.io/devcontainers/features/node:1": "none",
+ "ghcr.io/devcontainers/features/git:1": {
+ "version": "latest",
+ "ppa": "false"
+ }
+ },
+ // Configure tool-specific properties.
+ "customizations": {
+ // Configure properties specific to VS Code.
+ "vscode": {
+ // Set *default* container specific settings.json values on container create.
+ "settings": {
+ "python.defaultInterpreterPath": "/usr/local/bin/python",
+ "python.testing.pytestEnabled": true,
+ "python.testing.unittestEnabled": false
+ },
+ "extensions": [
+ "ms-python.python",
+ "VisualStudioExptTeam.vscodeintellicode",
+ "ms-python.vscode-pylance",
+ "ms-python.black-formatter",
+ "ms-python.isort",
+ "GitHub.vscode-pull-request-github",
+ "GitHub.copilot",
+ "github.vscode-github-actions"
+ ]
+ }
+ },
+ // Use 'forwardPorts' to make a list of ports inside the container available locally.
+ // "forwardPorts": [],
+
+ // Use 'postCreateCommand' to run commands after the container is created.
+ // "postCreateCommand": "poetry install",
+
+ // Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
+ "remoteUser": "vscode",
+
+ // Add the freshly containerized repo to the list of safe repositories
+ "postCreateCommand": "git config --global --add safe.directory /workspace/AutoGPT && poetry install"
+}
diff --git a/autogpts/autogpt/.devcontainer/docker-compose.yml b/autogpts/autogpt/.devcontainer/docker-compose.yml
new file mode 100644
index 000000000..75871fa0b
--- /dev/null
+++ b/autogpts/autogpt/.devcontainer/docker-compose.yml
@@ -0,0 +1,12 @@
+# To boot the app run the following:
+# docker-compose run auto-gpt
+version: '3.9'
+
+services:
+ auto-gpt:
+ build:
+ dockerfile: .devcontainer/Dockerfile
+ context: ../
+ tty: true
+ volumes:
+ - ../:/workspace/AutoGPT
diff --git a/autogpts/autogpt/.dockerignore b/autogpts/autogpt/.dockerignore
new file mode 100644
index 000000000..1ee35738b
--- /dev/null
+++ b/autogpts/autogpt/.dockerignore
@@ -0,0 +1,14 @@
+.*
+**/.venv*
+**/__pycache__
+*.template
+*.yaml
+*.yml
+!prompt_settings.yaml
+
+data/*
+logs/*
+agbenchmark_config/logs/*
+agbenchmark_config/reports/*
+
+*.png
diff --git a/autogpts/autogpt/.env.template b/autogpts/autogpt/.env.template
new file mode 100644
index 000000000..9ec564a5d
--- /dev/null
+++ b/autogpts/autogpt/.env.template
@@ -0,0 +1,233 @@
+################################################################################
+### AutoGPT - GENERAL SETTINGS
+################################################################################
+
+## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
+OPENAI_API_KEY=your-openai-api-key
+
+## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False)
+# EXECUTE_LOCAL_COMMANDS=False
+
+### Workspace ###
+
+## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./data/agents/<agent_id>/workspace (Default: True)
+# RESTRICT_TO_WORKSPACE=True
+
+## DISABLED_COMMAND_CATEGORIES - The list of categories of commands that are disabled (Default: None)
+# DISABLED_COMMAND_CATEGORIES=
+
+## WORKSPACE_BACKEND - Choose a storage backend for workspace contents
+## Options: local, gcs, s3
+# WORKSPACE_BACKEND=local
+
+## WORKSPACE_STORAGE_BUCKET - GCS/S3 Bucket to store workspace contents in
+# WORKSPACE_STORAGE_BUCKET=autogpt
+
+## GCS Credentials
+# see https://cloud.google.com/storage/docs/authentication#libauth
+
+## AWS/S3 Credentials
+# see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
+
+## S3_ENDPOINT_URL - If you're using non-AWS S3, set your endpoint here.
+# S3_ENDPOINT_URL=
+
+### Miscellaneous ###
+
+## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
+# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
+
+## AI_SETTINGS_FILE - Specifies which AI Settings file to use, relative to the AutoGPT root directory. (defaults to ai_settings.yaml)
+# AI_SETTINGS_FILE=ai_settings.yaml
+
+## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file, relative to the AutoGPT root directory. (Default plugins_config.yaml)
+# PLUGINS_CONFIG_FILE=plugins_config.yaml
+
+## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use, relative to the AutoGPT root directory. (defaults to prompt_settings.yaml)
+# PROMPT_SETTINGS_FILE=prompt_settings.yaml
+
+## AUTHORISE COMMAND KEY - Key to authorise commands
+# AUTHORISE_COMMAND_KEY=y
+
+## EXIT_KEY - Key to exit AutoGPT
+# EXIT_KEY=n
+
+################################################################################
+### LLM PROVIDER
+################################################################################
+
+## TEMPERATURE - Sets temperature in OpenAI (Default: 0)
+# TEMPERATURE=0
+
+## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
+# the following is an example:
+# OPENAI_API_BASE_URL=http://localhost:443/v1
+
+# OPENAI_API_TYPE=
+# OPENAI_API_VERSION=
+
+## OPENAI_FUNCTIONS - Enables OpenAI functions: https://platform.openai.com/docs/guides/gpt/function-calling
+## Note: this feature is only supported by OpenAI's newer models.
+# OPENAI_FUNCTIONS=False
+
+## OPENAI_ORGANIZATION - Your OpenAI Organization key (Default: None)
+# OPENAI_ORGANIZATION=
+
+## USE_AZURE - Use Azure OpenAI or not (Default: False)
+# USE_AZURE=False
+
+## AZURE_CONFIG_FILE - The path to the azure.yaml file, relative to the folder containing this file. (Default: azure.yaml)
+# AZURE_CONFIG_FILE=azure.yaml
+
+# AZURE_OPENAI_AD_TOKEN=
+# AZURE_OPENAI_ENDPOINT=
+
+################################################################################
+### LLM MODELS
+################################################################################
+
+## SMART_LLM - Smart language model (Default: gpt-4-0314)
+# SMART_LLM=gpt-4-0314
+
+## FAST_LLM - Fast language model (Default: gpt-3.5-turbo-16k)
+# FAST_LLM=gpt-3.5-turbo-16k
+
+## EMBEDDING_MODEL - Model to use for creating embeddings
+# EMBEDDING_MODEL=text-embedding-ada-002
+
+################################################################################
+### SHELL EXECUTION
+################################################################################
+
+## SHELL_COMMAND_CONTROL - Whether to use "allowlist" or "denylist" to determine what shell commands can be executed (Default: denylist)
+# SHELL_COMMAND_CONTROL=denylist
+
+## ONLY if SHELL_COMMAND_CONTROL is set to denylist:
+## SHELL_DENYLIST - List of shell commands that ARE NOT allowed to be executed by AutoGPT (Default: sudo,su)
+# SHELL_DENYLIST=sudo,su
+
+## ONLY if SHELL_COMMAND_CONTROL is set to allowlist:
+## SHELL_ALLOWLIST - List of shell commands that ARE allowed to be executed by AutoGPT (Default: None)
+# SHELL_ALLOWLIST=
+
+################################################################################
+### IMAGE GENERATION PROVIDER
+################################################################################
+
+### Common
+
+## IMAGE_PROVIDER - Image provider (Default: dalle)
+# IMAGE_PROVIDER=dalle
+
+## IMAGE_SIZE - Image size (Default: 256)
+# IMAGE_SIZE=256
+
+### Huggingface (IMAGE_PROVIDER=huggingface)
+
+## HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4)
+# HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4
+
+## HUGGINGFACE_API_TOKEN - HuggingFace API token (Default: None)
+# HUGGINGFACE_API_TOKEN=
+
+### Stable Diffusion (IMAGE_PROVIDER=sdwebui)
+
+## SD_WEBUI_AUTH - Stable Diffusion Web UI username:password pair (Default: None)
+# SD_WEBUI_AUTH=
+
+## SD_WEBUI_URL - Stable Diffusion Web UI API URL (Default: http://localhost:7860)
+# SD_WEBUI_URL=http://localhost:7860
+
+################################################################################
+### AUDIO TO TEXT PROVIDER
+################################################################################
+
+## AUDIO_TO_TEXT_PROVIDER - Audio-to-text provider (Default: huggingface)
+# AUDIO_TO_TEXT_PROVIDER=huggingface
+
+## HUGGINGFACE_AUDIO_TO_TEXT_MODEL - The model for HuggingFace to use (Default: CompVis/stable-diffusion-v1-4)
+# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=CompVis/stable-diffusion-v1-4
+
+################################################################################
+### GITHUB
+################################################################################
+
+## GITHUB_API_KEY - Github API key / PAT (Default: None)
+# GITHUB_API_KEY=
+
+## GITHUB_USERNAME - Github username (Default: None)
+# GITHUB_USERNAME=
+
+################################################################################
+### WEB BROWSING
+################################################################################
+
+## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True)
+# HEADLESS_BROWSER=True
+
+## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome)
+# USE_WEB_BROWSER=chrome
+
+## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (Default: 3000)
+# BROWSE_CHUNK_MAX_LENGTH=3000
+
+## BROWSE_SPACY_LANGUAGE_MODEL - spaCy language model](https://spacy.io/usage/models) to use when creating chunks. (Default: en_core_web_sm)
+# BROWSE_SPACY_LANGUAGE_MODEL=en_core_web_sm
+
+## GOOGLE_API_KEY - Google API key (Default: None)
+# GOOGLE_API_KEY=
+
+## GOOGLE_CUSTOM_SEARCH_ENGINE_ID - Google custom search engine ID (Default: None)
+# GOOGLE_CUSTOM_SEARCH_ENGINE_ID=
+
+################################################################################
+### TEXT TO SPEECH PROVIDER
+################################################################################
+
+## TEXT_TO_SPEECH_PROVIDER - Which Text to Speech provider to use (Default: gtts)
+## Options: gtts, streamelements, elevenlabs, macos
+# TEXT_TO_SPEECH_PROVIDER=gtts
+
+## STREAMELEMENTS_VOICE - Voice to use for StreamElements (Default: Brian)
+# STREAMELEMENTS_VOICE=Brian
+
+## ELEVENLABS_API_KEY - Eleven Labs API key (Default: None)
+# ELEVENLABS_API_KEY=
+
+## ELEVENLABS_VOICE_ID - Eleven Labs voice ID (Example: None)
+# ELEVENLABS_VOICE_ID=
+
+################################################################################
+### CHAT MESSAGES
+################################################################################
+
+## CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False)
+# CHAT_MESSAGES_ENABLED=False
+
+################################################################################
+### LOGGING
+################################################################################
+
+## LOG_LEVEL - Set the minimum level to filter log output by. Setting this to DEBUG implies LOG_FORMAT=debug, unless LOG_FORMAT is set explicitly.
+## Options: DEBUG, INFO, WARNING, ERROR, CRITICAL
+# LOG_LEVEL=INFO
+
+## LOG_FORMAT - The format in which to log messages to the console (and log files).
+## Options: simple, debug, structured_google_cloud
+# LOG_FORMAT=simple
+
+## LOG_FILE_FORMAT - Normally follows the LOG_FORMAT setting, but can be set separately.
+## Note: Log file output is disabled if LOG_FORMAT=structured_google_cloud.
+# LOG_FILE_FORMAT=simple
+
+## PLAIN_OUTPUT - Disables animated typing and the spinner in the console output. (Default: False)
+# PLAIN_OUTPUT=False
+
+
+################################################################################
+### Agent Protocol Server Settings
+################################################################################
+## AP_SERVER_PORT - Specifies what port the agent protocol server will listen on. (Default: 8000)
+# AP_SERVER_PORT=8000
+# # AP_SERVER_DB_URL - Specifies what connection url the agent protocol database will connect to (Default: Internal SQLite)
+# AP_SERVER_DB_URL=sqlite:///data/ap_server.db
diff --git a/autogpts/autogpt/.envrc b/autogpts/autogpt/.envrc
new file mode 100644
index 000000000..85777e21f
--- /dev/null
+++ b/autogpts/autogpt/.envrc
@@ -0,0 +1,4 @@
+# Upon entering directory, direnv requests user permission once to automatically load project dependencies onwards.
+# Eliminating the need of running "nix develop github:superherointj/nix-auto-gpt" for Nix users to develop/use AutoGPT.
+
+[[ -z $IN_NIX_SHELL ]] && use flake github:superherointj/nix-auto-gpt
diff --git a/autogpts/autogpt/.flake8 b/autogpts/autogpt/.flake8
new file mode 100644
index 000000000..b9aa1bc73
--- /dev/null
+++ b/autogpts/autogpt/.flake8
@@ -0,0 +1,11 @@
+[flake8]
+max-line-length = 88
+extend-exclude =
+ .*_cache/,
+ .venv,
+ data/,
+ logs/,
+ tests/unit/data/,
+extend-ignore =
+ # No whitespace before ':' conflicts with Black style for slices
+ E203,
diff --git a/autogpts/autogpt/.gitattributes b/autogpts/autogpt/.gitattributes
new file mode 100644
index 000000000..60fb560da
--- /dev/null
+++ b/autogpts/autogpt/.gitattributes
@@ -0,0 +1,5 @@
+# Exclude VCR cassettes from stats
+tests/vcr_cassettes/**/**.y*ml linguist-generated
+
+# Mark documentation as such
+docs/**.md linguist-documentation
diff --git a/autogpts/autogpt/.gitignore b/autogpts/autogpt/.gitignore
new file mode 100644
index 000000000..aaff758a0
--- /dev/null
+++ b/autogpts/autogpt/.gitignore
@@ -0,0 +1,169 @@
+## Original ignores
+autogpt/keys.py
+autogpt/*.json
+*.mpeg
+.env
+azure.yaml
+ai_settings.yaml
+last_run_ai_settings.yaml
+.vscode
+.idea/*
+auto-gpt.json
+log.txt
+log-ingestion.txt
+/logs
+*.log
+*.mp3
+mem.sqlite3
+venvAutoGPT
+data/*
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+/plugins/*
+plugins_config.yaml
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+site/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.direnv/
+.env
+.venv
+env/
+venv*/
+ENV/
+env.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+llama-*
+vicuna-*
+
+# mac
+.DS_Store
+
+openai/
+
+# news
+CURRENT_BULLETIN.md
+
+# Nodejs
+package-lock.json
+package.json
+
+# Keep
+!.keep
diff --git a/autogpts/autogpt/.pre-commit-config.yaml b/autogpts/autogpt/.pre-commit-config.yaml
new file mode 100644
index 000000000..d2a061a4d
--- /dev/null
+++ b/autogpts/autogpt/.pre-commit-config.yaml
@@ -0,0 +1,42 @@
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.4.0
+ hooks:
+ - id: check-added-large-files
+ args: ['--maxkb=500']
+ - id: check-byte-order-marker
+ - id: check-case-conflict
+ - id: check-merge-conflict
+ - id: check-symlinks
+ - id: debug-statements
+
+ - repo: https://github.com/pycqa/isort
+ rev: 5.12.0
+ hooks:
+ - id: isort
+ language_version: python3.10
+
+ - repo: https://github.com/psf/black
+ rev: 23.3.0
+ hooks:
+ - id: black
+ language_version: python3.10
+
+ # - repo: https://github.com/pre-commit/mirrors-mypy
+ # rev: 'v1.3.0'
+ # hooks:
+ # - id: mypy
+
+ - repo: local
+ hooks:
+ # - id: autoflake
+ # name: autoflake
+ # entry: autoflake --in-place --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests
+ # language: python
+ # types: [ python ]
+ - id: pytest-check
+ name: pytest-check
+ entry: bash -c 'cd autogpts/autogpt && poetry run pytest --cov=autogpt tests/unit'
+ language: system
+ pass_filenames: false
+ always_run: true
diff --git a/.sourcery.yaml b/autogpts/autogpt/.sourcery.yaml
index da171e759..da171e759 100644
--- a/.sourcery.yaml
+++ b/autogpts/autogpt/.sourcery.yaml
diff --git a/autogpts/autogpt/BULLETIN.md b/autogpts/autogpt/BULLETIN.md
new file mode 100644
index 000000000..0140e7122
--- /dev/null
+++ b/autogpts/autogpt/BULLETIN.md
@@ -0,0 +1,13 @@
+# QUICK LINKS 🔗
+# --------------
+🌎 *Official Website*: https://agpt.co.
+📖 *User Guide*: https://docs.agpt.co/autogpt.
+👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/Nexus/wiki/Contributing.
+
+# v0.5.0 RELEASE HIGHLIGHTS! 🚀🚀
+# -------------------------------
+Cloud-readiness, a new UI, support for the newest Agent Protocol version, and much more:
+*v0.5.0 is our biggest release yet!*
+
+Take a look at the Release Notes on Github for the full changelog:
+https://github.com/Significant-Gravitas/AutoGPT/releases.
diff --git a/autogpts/autogpt/Dockerfile b/autogpts/autogpt/Dockerfile
new file mode 100644
index 000000000..b7af437a8
--- /dev/null
+++ b/autogpts/autogpt/Dockerfile
@@ -0,0 +1,56 @@
+# 'dev' or 'release' container build
+ARG BUILD_TYPE=dev
+
+# Use an official Python base image from the Docker Hub
+FROM python:3.10-slim AS autogpt-base
+
+# Install browsers
+RUN apt-get update && apt-get install -y \
+ chromium-driver ca-certificates gcc \
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
+
+# Install utilities
+RUN apt-get update && apt-get install -y \
+ curl jq wget git \
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
+
+# Set environment variables
+ENV PIP_NO_CACHE_DIR=yes \
+ PYTHONUNBUFFERED=1 \
+ PYTHONDONTWRITEBYTECODE=1 \
+ POETRY_HOME="/opt/poetry" \
+ POETRY_VIRTUALENVS_PATH="/venv" \
+ POETRY_VIRTUALENVS_IN_PROJECT=0 \
+ POETRY_NO_INTERACTION=1
+
+# Install and configure Poetry
+RUN curl -sSL https://install.python-poetry.org | python3 -
+ENV PATH="$POETRY_HOME/bin:$PATH"
+RUN poetry config installer.max-workers 10
+
+WORKDIR /app
+COPY pyproject.toml poetry.lock ./
+
+# Set the entrypoint
+ENTRYPOINT ["poetry", "run", "autogpt"]
+CMD []
+
+# dev build -> include everything
+FROM autogpt-base as autogpt-dev
+RUN poetry install --no-cache --no-root \
+ && rm -rf $(poetry env info --path)/src
+ONBUILD COPY . ./
+
+# release build -> include bare minimum
+FROM autogpt-base as autogpt-release
+RUN poetry install --no-cache --no-root --without dev \
+ && rm -rf $(poetry env info --path)/src
+ONBUILD COPY autogpt/ ./autogpt
+ONBUILD COPY scripts/ ./scripts
+ONBUILD COPY plugins/ ./plugins
+ONBUILD COPY prompt_settings.yaml ./prompt_settings.yaml
+ONBUILD COPY README.md ./README.md
+ONBUILD RUN mkdir ./data
+
+FROM autogpt-${BUILD_TYPE} AS autogpt
+RUN poetry install --only-root
diff --git a/autogpts/autogpt/README.md b/autogpts/autogpt/README.md
new file mode 100644
index 000000000..86e0aa69f
--- /dev/null
+++ b/autogpts/autogpt/README.md
@@ -0,0 +1,180 @@
+# AutoGPT: An Autonomous GPT-4 Experiment
+
+[📖 **Documentation**][docs]
+&ensp;|&ensp;
+[🚀 **Contributing**](../../CONTRIBUTING.md)
+
+AutoGPT is an experimental open-source application showcasing the capabilities of modern Large Language Models. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, AutoGPT pushes the boundaries of what is possible with AI.
+
+<h2 align="center"> Demo April 16th 2023 </h2>
+
+https://user-images.githubusercontent.com/70048414/232352935-55c6bf7c-3958-406e-8610-0913475a0b05.mp4
+
+Demo made by <a href=https://twitter.com/BlakeWerlinger>Blake Werlinger</a>
+
+## 🚀 Features
+
+- 🔌 Agent Protocol ([docs](https://agentprotocol.ai))
+- 💻 Easy to use UI
+- 🌐 Internet access for searches and information gathering
+- 🧠 Powered by a mix of GPT-4 and GPT-3.5 Turbo
+- 🔗 Access to popular websites and platforms
+- 🗃️ File generation and editing capabilities
+- 🔌 Extensibility with Plugins
+<!-- - 💾 Long-term and short-term memory management -->
+
+## Setting up AutoGPT
+1. Get an OpenAI [API Key](https://platform.openai.com/account/api-keys)
+2. Copy `.env.template` to `.env` and set `OPENAI_API_KEY`
+3. Make sure you have Poetry [installed](https://python-poetry.org/docs/#installation)
+
+For more ways to run AutoGPT, more detailed instructions, and more configuration options,
+see the [setup guide][docs/setup].
+
+## Running AutoGPT
+The CLI should be self-documenting:
+```shell
+$ ./autogpt.sh --help
+Usage: python -m autogpt [OPTIONS] COMMAND [ARGS]...
+
+Options:
+ --help Show this message and exit.
+
+Commands:
+ run Sets up and runs an agent, based on the task specified by the...
+ serve Starts an Agent Protocol compliant AutoGPT server, which creates...
+```
+When run without a sub-command, it will default to `run` for legacy reasons.
+
+<details>
+<summary>
+<code>$ ./autogpt.sh run --help</code>
+</summary>
+
+The `run` sub-command starts AutoGPT with the legacy CLI interface:
+
+```shell
+$ ./autogpt.sh run --help
+Usage: python -m autogpt run [OPTIONS]
+
+ Sets up and runs an agent, based on the task specified by the user, or
+ resumes an existing agent.
+
+Options:
+ -c, --continuous Enable Continuous Mode
+ -y, --skip-reprompt Skips the re-prompting messages at the
+ beginning of the script
+ -C, --ai-settings FILE Specifies which ai_settings.yaml file to
+ use, relative to the AutoGPT root directory.
+ Will also automatically skip the re-prompt.
+ -P, --prompt-settings FILE Specifies which prompt_settings.yaml file to
+ use.
+ -l, --continuous-limit INTEGER Defines the number of times to run in
+ continuous mode
+ --speak Enable Speak Mode
+ --debug Enable Debug Mode
+ --gpt3only Enable GPT3.5 Only Mode
+ --gpt4only Enable GPT4 Only Mode
+ -m, --use-memory TEXT Defines which Memory backend to use
+ -b, --browser-name TEXT Specifies which web-browser to use when
+ using selenium to scrape the web.
+ --allow-downloads Dangerous: Allows AutoGPT to download files
+ natively.
+ --skip-news Specifies whether to suppress the output of
+ latest news on startup.
+ --install-plugin-deps Installs external dependencies for 3rd party
+ plugins.
+ --ai-name TEXT AI name override
+ --ai-role TEXT AI role override
+ --constraint TEXT Add or override AI constraints to include in
+ the prompt; may be used multiple times to
+ pass multiple constraints
+ --resource TEXT Add or override AI resources to include in
+ the prompt; may be used multiple times to
+ pass multiple resources
+ --best-practice TEXT Add or override AI best practices to include
+ in the prompt; may be used multiple times to
+ pass multiple best practices
+ --override-directives If specified, --constraint, --resource and
+ --best-practice will override the AI's
+ directives instead of being appended to them
+ --help Show this message and exit.
+```
+</details>
+
+
+<details>
+<summary>
+<code>$ ./autogpt.sh serve --help</code>
+</summary>
+
+The `serve` sub-command starts AutoGPT wrapped in an Agent Protocol server:
+
+```shell
+$ ./autogpt.sh serve --help
+Usage: python -m autogpt serve [OPTIONS]
+
+ Starts an Agent Protocol compliant AutoGPT server, which creates a custom
+ agent for every task.
+
+Options:
+ -P, --prompt-settings FILE Specifies which prompt_settings.yaml file to
+ use.
+ --debug Enable Debug Mode
+ --gpt3only Enable GPT3.5 Only Mode
+ --gpt4only Enable GPT4 Only Mode
+ -m, --use-memory TEXT Defines which Memory backend to use
+ -b, --browser-name TEXT Specifies which web-browser to use when using
+ selenium to scrape the web.
+ --allow-downloads Dangerous: Allows AutoGPT to download files
+ natively.
+ --install-plugin-deps Installs external dependencies for 3rd party
+ plugins.
+ --help Show this message and exit.
+```
+</details>
+
+With `serve`, the application exposes an Agent Protocol compliant API and serves a frontend,
+by default on `http://localhost:8000`.
+
+For more comprehensive instructions, see the [user guide][docs/usage].
+
+[docs]: https://docs.agpt.co/autogpt
+[docs/setup]: https://docs.agpt.co/autogpt/setup
+[docs/usage]: https://docs.agpt.co/autogpt/usage
+[docs/plugins]: https://docs.agpt.co/autogpt/plugins
+
+## 📚 Resources
+* 📔 AutoGPT [team wiki](https://github.com/Significant-Gravitas/Nexus/wiki)
+* 🧮 AutoGPT [project kanban](https://github.com/orgs/Significant-Gravitas/projects/1)
+* 🌃 AutoGPT [roadmap](https://github.com/orgs/Significant-Gravitas/projects/2)
+
+## ⚠️ Limitations
+
+This experiment aims to showcase the potential of GPT-4 but comes with some limitations:
+
+1. Not a polished application or product, just an experiment
+2. May not perform well in complex, real-world business scenarios. In fact, if it actually does, please share your results!
+3. Quite expensive to run, so set and monitor your API key limits with OpenAI!
+
+## 🛡 Disclaimer
+
+This project, AutoGPT, is an experimental application and is provided "as-is" without any warranty, express or implied. By using this software, you agree to assume all risks associated with its use, including but not limited to data loss, system failure, or any other issues that may arise.
+
+The developers and contributors of this project do not accept any responsibility or liability for any losses, damages, or other consequences that may occur as a result of using this software. You are solely responsible for any decisions and actions taken based on the information provided by AutoGPT.
+
+**Please note that the use of the GPT-4 language model can be expensive due to its token usage.** By utilizing this project, you acknowledge that you are responsible for monitoring and managing your own token usage and the associated costs. It is highly recommended to check your OpenAI API usage regularly and set up any necessary limits or alerts to prevent unexpected charges.
+
+As an autonomous experiment, AutoGPT may generate content or take actions that are not in line with real-world business practices or legal requirements. It is your responsibility to ensure that any actions or decisions made based on the output of this software comply with all applicable laws, regulations, and ethical standards. The developers and contributors of this project shall not be held responsible for any consequences arising from the use of this software.
+
+By using AutoGPT, you agree to indemnify, defend, and hold harmless the developers, contributors, and any affiliated parties from and against any and all claims, damages, losses, liabilities, costs, and expenses (including reasonable attorneys' fees) arising from your use of this software or your violation of these terms.
+
+---
+
+In Q2 of 2023, AutoGPT became the fastest growing open-source project in history. Now that the dust has settled, we're committed to continued sustainable development and growth of the project.
+
+<p align="center">
+ <a href="https://star-history.com/#Significant-Gravitas/AutoGPT&Date">
+ <img src="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date" alt="Star History Chart">
+ </a>
+</p>
diff --git a/autogpts/autogpt/agbenchmark_config/.gitignore b/autogpts/autogpt/agbenchmark_config/.gitignore
new file mode 100644
index 000000000..78bb5abd2
--- /dev/null
+++ b/autogpts/autogpt/agbenchmark_config/.gitignore
@@ -0,0 +1,3 @@
+logs/
+reports/
+temp_folder/
diff --git a/autogpt/commands/__init__.py b/autogpts/autogpt/agbenchmark_config/__init__.py
index e69de29bb..e69de29bb 100644
--- a/autogpt/commands/__init__.py
+++ b/autogpts/autogpt/agbenchmark_config/__init__.py
diff --git a/autogpts/autogpt/agbenchmark_config/analyze_reports.py b/autogpts/autogpt/agbenchmark_config/analyze_reports.py
new file mode 100644
index 000000000..2a8f95443
--- /dev/null
+++ b/autogpts/autogpt/agbenchmark_config/analyze_reports.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python3
+
+import json
+import logging
+import re
+import sys
+from collections import defaultdict
+from pathlib import Path
+
+from tabulate import tabulate
+
+info = "-v" in sys.argv
+debug = "-vv" in sys.argv
+granular = "--granular" in sys.argv
+
+logging.basicConfig(
+ level=logging.DEBUG if debug else logging.INFO if info else logging.WARNING
+)
+logger = logging.getLogger(__name__)
+
+# Get a list of all JSON files in the directory
+report_files = [
+ report_file
+ for dir in (Path(__file__).parent / "reports").iterdir()
+ if re.match(r"^\d{8}T\d{6}_", dir.name)
+ and (report_file := dir / "report.json").is_file()
+]
+
+labels = list[str]()
+runs_per_label = defaultdict[str, int](lambda: 0)
+suite_names = list[str]()
+test_names = list[str]()
+
+# Create a dictionary to store grouped success values by suffix and test
+grouped_success_values = defaultdict[str, list[str]](list[str])
+
+# Loop through each JSON file to collect suffixes and success values
+for report_file in sorted(report_files):
+ with open(report_file) as f:
+ logger.info(f"Loading {report_file}...")
+
+ data = json.load(f)
+ if "tests" in data:
+ test_tree = data["tests"]
+ label = data["agent_git_commit_sha"].rsplit("/", 1)[1][:7] # commit hash
+ else:
+ # Benchmark run still in progress
+ test_tree = data
+ label = report_file.parent.name.split("_", 1)[1]
+ logger.info(f"Run '{label}' seems to be in progress")
+
+ runs_per_label[label] += 1
+
+ def process_test(test_name: str, test_data: dict):
+ result_group = grouped_success_values[f"{label}|{test_name}"]
+
+ if "tests" in test_data:
+ logger.debug(f"{test_name} is a test suite")
+
+ # Test suite
+ suite_attempted = any(
+ test["metrics"]["attempted"] for test in test_data["tests"].values()
+ )
+ logger.debug(f"suite_attempted: {suite_attempted}")
+ if not suite_attempted:
+ return
+
+ if test_name not in test_names:
+ test_names.append(test_name)
+
+ if test_data["metrics"]["percentage"] == 0:
+ result_indicator = "❌"
+ else:
+ highest_difficulty = test_data["metrics"]["highest_difficulty"]
+ result_indicator = {
+ "interface": "🔌",
+ "novice": "🌑",
+ "basic": "🌒",
+ "intermediate": "🌓",
+ "advanced": "🌔",
+ "hard": "🌕",
+ }[highest_difficulty]
+
+ logger.debug(f"result group: {result_group}")
+ logger.debug(f"runs_per_label: {runs_per_label[label]}")
+ if len(result_group) + 1 < runs_per_label[label]:
+ result_group.extend(
+ ["❔"] * (runs_per_label[label] - len(result_group) - 1)
+ )
+ result_group.append(result_indicator)
+ logger.debug(f"result group (after): {result_group}")
+
+ if granular:
+ for test_name, test in test_data["tests"].items():
+ process_test(test_name, test)
+ return
+
+ test_metrics = test_data["metrics"]
+ result_indicator = "❔"
+
+ if "attempted" not in test_metrics:
+ return
+ elif test_metrics["attempted"]:
+ if test_name not in test_names:
+ test_names.append(test_name)
+
+ success_value = test_metrics["success"]
+ result_indicator = {True: "✅", False: "❌"}[success_value]
+
+ if len(result_group) + 1 < runs_per_label[label]:
+ result_group.extend(
+ [" "] * (runs_per_label[label] - len(result_group) - 1)
+ )
+ result_group.append(result_indicator)
+
+ for test_name, suite in test_tree.items():
+ try:
+ process_test(test_name, suite)
+ except KeyError:
+ print(f"{test_name}.metrics: {suite['metrics']}")
+ raise
+
+ if label not in labels:
+ labels.append(label)
+
+# Create headers
+headers = ["Test Name"] + list(labels)
+
+# Prepare data for tabulation
+table_data = list[list[str]]()
+for test_name in test_names:
+ row = [test_name]
+ for label in labels:
+ results = grouped_success_values.get(f"{label}|{test_name}", ["❔"])
+ if len(results) < runs_per_label[label]:
+ results.extend(["❔"] * (runs_per_label[label] - len(results)))
+ if len(results) > 1 and all(r == "❔" for r in results):
+ results.clear()
+ row.append(" ".join(results))
+ table_data.append(row)
+
+# Print tabulated data
+print(tabulate(table_data, headers=headers, tablefmt="grid"))
diff --git a/autogpts/autogpt/agbenchmark_config/benchmarks.py b/autogpts/autogpt/agbenchmark_config/benchmarks.py
new file mode 100644
index 000000000..c7e75406e
--- /dev/null
+++ b/autogpts/autogpt/agbenchmark_config/benchmarks.py
@@ -0,0 +1,74 @@
+import asyncio
+import logging
+import sys
+from pathlib import Path
+
+from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
+from autogpt.app.main import _configure_openai_provider, run_interaction_loop
+from autogpt.commands import COMMAND_CATEGORIES
+from autogpt.config import AIProfile, ConfigBuilder
+from autogpt.logs.config import configure_logging
+from autogpt.models.command_registry import CommandRegistry
+
+LOG_DIR = Path(__file__).parent / "logs"
+
+
+def run_specific_agent(task: str, continuous_mode: bool = False) -> None:
+ agent = bootstrap_agent(task, continuous_mode)
+ asyncio.run(run_interaction_loop(agent))
+
+
+def bootstrap_agent(task: str, continuous_mode: bool) -> Agent:
+ config = ConfigBuilder.build_config_from_env()
+ config.logging.level = logging.DEBUG
+ config.logging.log_dir = LOG_DIR
+ config.logging.plain_console_output = True
+ configure_logging(**config.logging.dict())
+
+ config.continuous_mode = continuous_mode
+ config.continuous_limit = 20
+ config.noninteractive_mode = True
+ config.memory_backend = "no_memory"
+
+ command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)
+
+ ai_profile = AIProfile(
+ ai_name="AutoGPT",
+ ai_role="a multi-purpose AI assistant.",
+ ai_goals=[task],
+ )
+
+ agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True)
+ agent_prompt_config.use_functions_api = config.openai_functions
+ agent_settings = AgentSettings(
+ name=Agent.default_settings.name,
+ description=Agent.default_settings.description,
+ ai_profile=ai_profile,
+ config=AgentConfiguration(
+ fast_llm=config.fast_llm,
+ smart_llm=config.smart_llm,
+ allow_fs_access=not config.restrict_to_workspace,
+ use_functions_api=config.openai_functions,
+ plugins=config.plugins,
+ ),
+ prompt_config=agent_prompt_config,
+ history=Agent.default_settings.history.copy(deep=True),
+ )
+
+ agent = Agent(
+ settings=agent_settings,
+ llm_provider=_configure_openai_provider(config),
+ command_registry=command_registry,
+ legacy_config=config,
+ )
+ agent.attach_fs(config.app_data_dir / "agents" / "AutoGPT-benchmark") # HACK
+ return agent
+
+
+if __name__ == "__main__":
+ # The first argument is the script name itself, second is the task
+ if len(sys.argv) != 2:
+ print("Usage: python script.py <task>")
+ sys.exit(1)
+ task = sys.argv[1]
+ run_specific_agent(task, continuous_mode=True)
diff --git a/autogpts/autogpt/agbenchmark_config/config.json b/autogpts/autogpt/agbenchmark_config/config.json
new file mode 100644
index 000000000..154fe388f
--- /dev/null
+++ b/autogpts/autogpt/agbenchmark_config/config.json
@@ -0,0 +1,8 @@
+{
+ "workspace": {
+ "input": "agbenchmark_config/workspace",
+ "output": "agbenchmark_config/workspace"
+ },
+ "entry_path": "agbenchmark.benchmarks",
+ "host": "http://localhost:8000"
+}
diff --git a/autogpts/autogpt/autogpt.bat b/autogpts/autogpt/autogpt.bat
new file mode 100644
index 000000000..12b89f212
--- /dev/null
+++ b/autogpts/autogpt/autogpt.bat
@@ -0,0 +1,27 @@
+@echo off
+setlocal enabledelayedexpansion
+
+:FindPythonCommand
+for %%A in (python3 python) do (
+ where /Q %%A
+ if !errorlevel! EQU 0 (
+ set "PYTHON_CMD=%%A"
+ goto :Found
+ )
+)
+
+echo Python not found. Please install Python.
+pause
+exit /B 1
+
+:Found
+%PYTHON_CMD% scripts/check_requirements.py
+if errorlevel 1 (
+ echo
+ poetry install --without dev
+ echo
+ echo Finished installing packages! Starting AutoGPT...
+ echo
+)
+poetry run autogpt %*
+pause
diff --git a/autogpts/autogpt/autogpt.sh b/autogpts/autogpt/autogpt.sh
new file mode 100755
index 000000000..57e1f4192
--- /dev/null
+++ b/autogpts/autogpt/autogpt.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+function find_python_command() {
+ if command -v python3 &> /dev/null
+ then
+ echo "python3"
+ elif command -v python &> /dev/null
+ then
+ echo "python"
+ else
+ echo "Python not found. Please install Python."
+ exit 1
+ fi
+}
+
+PYTHON_CMD=$(find_python_command)
+
+if $PYTHON_CMD -c "import sys; sys.exit(sys.version_info < (3, 10))"; then
+ if ! $PYTHON_CMD scripts/check_requirements.py; then
+ echo
+ poetry install --without dev
+ echo
+ echo "Finished installing packages! Starting AutoGPT..."
+ echo
+ fi
+ poetry run autogpt "$@"
+else
+ echo "Python 3.10 or higher is required to run Auto GPT."
+fi
diff --git a/autogpts/autogpt/autogpt/__init__.py b/autogpts/autogpt/autogpt/__init__.py
new file mode 100644
index 000000000..251826fcd
--- /dev/null
+++ b/autogpts/autogpt/autogpt/__init__.py
@@ -0,0 +1,7 @@
+import os
+import random
+import sys
+
+if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"):
+ print("Setting random seed to 42")
+ random.seed(42)
diff --git a/autogpts/autogpt/autogpt/__main__.py b/autogpts/autogpt/autogpt/__main__.py
new file mode 100644
index 000000000..e5b924566
--- /dev/null
+++ b/autogpts/autogpt/autogpt/__main__.py
@@ -0,0 +1,5 @@
+"""AutoGPT: A GPT powered AI Assistant"""
+import autogpt.app.cli
+
+if __name__ == "__main__":
+ autogpt.app.cli.cli()
diff --git a/autogpts/autogpt/autogpt/agent_factory/configurators.py b/autogpts/autogpt/autogpt/agent_factory/configurators.py
new file mode 100644
index 000000000..a8e9cbab6
--- /dev/null
+++ b/autogpts/autogpt/autogpt/agent_factory/configurators.py
@@ -0,0 +1,113 @@
+from typing import Optional
+
+from autogpt.agent_manager import AgentManager
+from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
+from autogpt.commands import COMMAND_CATEGORIES
+from autogpt.config import AIDirectives, AIProfile, Config
+from autogpt.core.resource.model_providers import ChatModelProvider
+from autogpt.logs.config import configure_chat_plugins
+from autogpt.models.command_registry import CommandRegistry
+from autogpt.plugins import scan_plugins
+
+
+def create_agent(
+ task: str,
+ ai_profile: AIProfile,
+ app_config: Config,
+ llm_provider: ChatModelProvider,
+ directives: Optional[AIDirectives] = None,
+) -> Agent:
+ if not task:
+ raise ValueError("No task specified for new agent")
+ if not directives:
+ directives = AIDirectives.from_file(app_config.prompt_settings_file)
+
+ agent = _configure_agent(
+ task=task,
+ ai_profile=ai_profile,
+ directives=directives,
+ app_config=app_config,
+ llm_provider=llm_provider,
+ )
+
+ agent.state.agent_id = AgentManager.generate_id(agent.ai_profile.ai_name)
+
+ return agent
+
+
+def configure_agent_with_state(
+ state: AgentSettings,
+ app_config: Config,
+ llm_provider: ChatModelProvider,
+) -> Agent:
+ return _configure_agent(
+ state=state,
+ app_config=app_config,
+ llm_provider=llm_provider,
+ )
+
+
+def _configure_agent(
+ app_config: Config,
+ llm_provider: ChatModelProvider,
+ task: str = "",
+ ai_profile: Optional[AIProfile] = None,
+ directives: Optional[AIDirectives] = None,
+ state: Optional[AgentSettings] = None,
+) -> Agent:
+ if not (state or task and ai_profile and directives):
+ raise TypeError(
+ "Either (state) or (task, ai_profile, directives) must be specified"
+ )
+
+ app_config.plugins = scan_plugins(app_config)
+ configure_chat_plugins(app_config)
+
+ # Create a CommandRegistry instance and scan default folder
+ command_registry = CommandRegistry.with_command_modules(
+ modules=COMMAND_CATEGORIES,
+ config=app_config,
+ )
+
+ agent_state = state or create_agent_state(
+ task=task,
+ ai_profile=ai_profile,
+ directives=directives,
+ app_config=app_config,
+ )
+
+ # TODO: configure memory
+
+ return Agent(
+ settings=agent_state,
+ llm_provider=llm_provider,
+ command_registry=command_registry,
+ legacy_config=app_config,
+ )
+
+
+def create_agent_state(
+ task: str,
+ ai_profile: AIProfile,
+ directives: AIDirectives,
+ app_config: Config,
+) -> AgentSettings:
+ agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True)
+ agent_prompt_config.use_functions_api = app_config.openai_functions
+
+ return AgentSettings(
+ name=Agent.default_settings.name,
+ description=Agent.default_settings.description,
+ task=task,
+ ai_profile=ai_profile,
+ directives=directives,
+ config=AgentConfiguration(
+ fast_llm=app_config.fast_llm,
+ smart_llm=app_config.smart_llm,
+ allow_fs_access=not app_config.restrict_to_workspace,
+ use_functions_api=app_config.openai_functions,
+ plugins=app_config.plugins,
+ ),
+ prompt_config=agent_prompt_config,
+ history=Agent.default_settings.history.copy(deep=True),
+ )
diff --git a/autogpts/autogpt/autogpt/agent_factory/generators.py b/autogpts/autogpt/autogpt/agent_factory/generators.py
new file mode 100644
index 000000000..2713d5cbb
--- /dev/null
+++ b/autogpts/autogpt/autogpt/agent_factory/generators.py
@@ -0,0 +1,31 @@
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from autogpt.agents.agent import Agent
+ from autogpt.config import Config
+ from autogpt.core.resource.model_providers.schema import ChatModelProvider
+
+from autogpt.config.ai_directives import AIDirectives
+
+from .configurators import _configure_agent
+from .profile_generator import generate_agent_profile_for_task
+
+
+async def generate_agent_for_task(
+ task: str,
+ app_config: "Config",
+ llm_provider: "ChatModelProvider",
+) -> "Agent":
+ base_directives = AIDirectives.from_file(app_config.prompt_settings_file)
+ ai_profile, task_directives = await generate_agent_profile_for_task(
+ task=task,
+ app_config=app_config,
+ llm_provider=llm_provider,
+ )
+ return _configure_agent(
+ task=task,
+ ai_profile=ai_profile,
+ directives=base_directives + task_directives,
+ app_config=app_config,
+ llm_provider=llm_provider,
+ )
diff --git a/autogpts/autogpt/autogpt/agent_factory/profile_generator.py b/autogpts/autogpt/autogpt/agent_factory/profile_generator.py
new file mode 100644
index 000000000..8d9ff399e
--- /dev/null
+++ b/autogpts/autogpt/autogpt/agent_factory/profile_generator.py
@@ -0,0 +1,255 @@
+import json
+import logging
+
+from autogpt.config import AIDirectives, AIProfile, Config
+from autogpt.core.configuration import SystemConfiguration, UserConfigurable
+from autogpt.core.prompting import (
+ ChatPrompt,
+ LanguageModelClassification,
+ PromptStrategy,
+)
+from autogpt.core.prompting.utils import json_loads
+from autogpt.core.resource.model_providers.schema import (
+ AssistantChatMessage,
+ ChatMessage,
+ ChatModelProvider,
+ CompletionModelFunction,
+)
+from autogpt.core.utils.json_schema import JSONSchema
+
+logger = logging.getLogger(__name__)
+
+
+class AgentProfileGeneratorConfiguration(SystemConfiguration):
+ model_classification: LanguageModelClassification = UserConfigurable(
+ default=LanguageModelClassification.SMART_MODEL
+ )
+ _example_call: object = [
+ {
+ "type": "function",
+ "function": {
+ "name": "create_agent",
+ "arguments": {
+ "name": "CMOGPT",
+ "description": (
+ "a professional digital marketer AI that assists Solopreneurs "
+ "in growing their businesses by providing "
+ "world-class expertise in solving marketing problems "
+ "for SaaS, content products, agencies, and more."
+ ),
+ "directives": {
+ "best_practices": [
+ (
+ "Engage in effective problem-solving, prioritization, "
+ "planning, and supporting execution to address your "
+ "marketing needs as your virtual "
+ "Chief Marketing Officer."
+ ),
+ (
+ "Provide specific, actionable, and concise advice to "
+ "help you make informed decisions without the use of "
+ "platitudes or overly wordy explanations."
+ ),
+ (
+ "Identify and prioritize quick wins and cost-effective "
+ "campaigns that maximize results with minimal time and "
+ "budget investment."
+ ),
+ (
+ "Proactively take the lead in guiding you and offering "
+ "suggestions when faced with unclear information or "
+ "uncertainty to ensure your marketing strategy remains "
+ "on track."
+ ),
+ ],
+ "constraints": [
+ "Do not suggest illegal or unethical plans or strategies.",
+ "Take reasonable budgetary limits into account.",
+ ],
+ },
+ },
+ },
+ }
+ ]
+ system_prompt: str = UserConfigurable(
+ default=(
+ "Your job is to respond to a user-defined task, given in triple quotes, by "
+ "invoking the `create_agent` function to generate an autonomous agent to "
+ "complete the task. "
+ "You should supply a role-based name for the agent (_GPT), "
+ "an informative description for what the agent does, and 1 to 5 directives "
+ "in each of the categories Best Practices and Constraints, "
+ "that are optimally aligned with the successful completion "
+ "of its assigned task.\n"
+ "\n"
+ "Example Input:\n"
+ '"""Help me with marketing my business"""\n\n'
+ "Example Call:\n"
+ "```\n"
+ f"{json.dumps(_example_call, indent=4)}"
+ "\n```"
+ )
+ )
+ user_prompt_template: str = UserConfigurable(default='"""{user_objective}"""')
+ create_agent_function: dict = UserConfigurable(
+ default=CompletionModelFunction(
+ name="create_agent",
+ description="Create a new autonomous AI agent to complete a given task.",
+ parameters={
+ "name": JSONSchema(
+ type=JSONSchema.Type.STRING,
+ description="A short role-based name for an autonomous agent.",
+ required=True,
+ ),
+ "description": JSONSchema(
+ type=JSONSchema.Type.STRING,
+ description=(
+ "An informative one sentence description "
+ "of what the AI agent does"
+ ),
+ required=True,
+ ),
+ "directives": JSONSchema(
+ type=JSONSchema.Type.OBJECT,
+ properties={
+ "best_practices": JSONSchema(
+ type=JSONSchema.Type.ARRAY,
+ minItems=1,
+ maxItems=5,
+ items=JSONSchema(
+ type=JSONSchema.Type.STRING,
+ ),
+ description=(
+ "One to five highly effective best practices "
+ "that are optimally aligned with the completion "
+ "of the given task"
+ ),
+ required=True,
+ ),
+ "constraints": JSONSchema(
+ type=JSONSchema.Type.ARRAY,
+ minItems=1,
+ maxItems=5,
+ items=JSONSchema(
+ type=JSONSchema.Type.STRING,
+ ),
+ description=(
+ "One to five reasonable and efficacious constraints "
+ "that are optimally aligned with the completion "
+ "of the given task"
+ ),
+ required=True,
+ ),
+ },
+ required=True,
+ ),
+ },
+ ).schema
+ )
+
+
+class AgentProfileGenerator(PromptStrategy):
+ default_configuration: AgentProfileGeneratorConfiguration = (
+ AgentProfileGeneratorConfiguration()
+ )
+
+ def __init__(
+ self,
+ model_classification: LanguageModelClassification,
+ system_prompt: str,
+ user_prompt_template: str,
+ create_agent_function: dict,
+ ):
+ self._model_classification = model_classification
+ self._system_prompt_message = system_prompt
+ self._user_prompt_template = user_prompt_template
+ self._create_agent_function = CompletionModelFunction.parse(
+ create_agent_function
+ )
+
+ @property
+ def model_classification(self) -> LanguageModelClassification:
+ return self._model_classification
+
+ def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt:
+ system_message = ChatMessage.system(self._system_prompt_message)
+ user_message = ChatMessage.user(
+ self._user_prompt_template.format(
+ user_objective=user_objective,
+ )
+ )
+ prompt = ChatPrompt(
+ messages=[system_message, user_message],
+ functions=[self._create_agent_function],
+ )
+ return prompt
+
+ def parse_response_content(
+ self,
+ response_content: AssistantChatMessage,
+ ) -> tuple[AIProfile, AIDirectives]:
+ """Parse the actual text response from the objective model.
+
+ Args:
+ response_content: The raw response content from the objective model.
+
+ Returns:
+ The parsed response.
+
+ """
+ try:
+ if not response_content.tool_calls:
+ raise ValueError(
+ f"LLM did not call {self._create_agent_function.name} function; "
+ "agent profile creation failed"
+ )
+ arguments: object = json_loads(
+ response_content.tool_calls[0].function.arguments
+ )
+ ai_profile = AIProfile(
+ ai_name=arguments.get("name"),
+ ai_role=arguments.get("description"),
+ )
+ ai_directives = AIDirectives(
+ best_practices=arguments.get("directives", {}).get("best_practices"),
+ constraints=arguments.get("directives", {}).get("constraints"),
+ resources=[],
+ )
+ except KeyError:
+ logger.debug(f"Failed to parse this response content: {response_content}")
+ raise
+ return ai_profile, ai_directives
+
+
+async def generate_agent_profile_for_task(
+ task: str,
+ app_config: Config,
+ llm_provider: ChatModelProvider,
+) -> tuple[AIProfile, AIDirectives]:
+ """Generates an AIConfig object from the given string.
+
+ Returns:
+ AIConfig: The AIConfig object tailored to the user's input
+ """
+ agent_profile_generator = AgentProfileGenerator(
+ **AgentProfileGenerator.default_configuration.dict() # HACK
+ )
+
+ prompt = agent_profile_generator.build_prompt(task)
+
+ # Call LLM with the string as user input
+ output = (
+ await llm_provider.create_chat_completion(
+ prompt.messages,
+ model_name=app_config.smart_llm,
+ functions=prompt.functions,
+ )
+ ).response
+
+ # Debug LLM Output
+ logger.debug(f"AI Config Generator Raw Output: {output}")
+
+ # Parse the output
+ ai_profile, ai_directives = agent_profile_generator.parse_response_content(output)
+
+ return ai_profile, ai_directives
diff --git a/autogpts/autogpt/autogpt/agent_manager/__init__.py b/autogpts/autogpt/autogpt/agent_manager/__init__.py
new file mode 100644
index 000000000..a412566bf
--- /dev/null
+++ b/autogpts/autogpt/autogpt/agent_manager/__init__.py
@@ -0,0 +1,3 @@
+from .agent_manager import AgentManager
+
+__all__ = ["AgentManager"]
diff --git a/autogpts/autogpt/autogpt/agent_manager/agent_manager.py b/autogpts/autogpt/autogpt/agent_manager/agent_manager.py
new file mode 100644
index 000000000..0b4731bc6
--- /dev/null
+++ b/autogpts/autogpt/autogpt/agent_manager/agent_manager.py
@@ -0,0 +1,48 @@
+from __future__ import annotations
+
+import uuid
+from pathlib import Path
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from autogpt.agents.agent import AgentSettings
+
+from autogpt.agents.utils.agent_file_manager import AgentFileManager
+
+
+class AgentManager:
+ def __init__(self, app_data_dir: Path):
+ self.agents_dir = app_data_dir / "agents"
+ if not self.agents_dir.exists():
+ self.agents_dir.mkdir()
+
+ @staticmethod
+ def generate_id(agent_name: str) -> str:
+ unique_id = str(uuid.uuid4())[:8]
+ return f"{agent_name}-{unique_id}"
+
+ def list_agents(self) -> list[str]:
+ return [
+ dir.name
+ for dir in self.agents_dir.iterdir()
+ if dir.is_dir() and AgentFileManager(dir).state_file_path.exists()
+ ]
+
+ def get_agent_dir(self, agent_id: str, must_exist: bool = False) -> Path:
+ assert len(agent_id) > 0
+ agent_dir = self.agents_dir / agent_id
+ if must_exist and not agent_dir.exists():
+ raise FileNotFoundError(f"No agent with ID '{agent_id}'")
+ return agent_dir
+
+ def retrieve_state(self, agent_id: str) -> AgentSettings:
+ from autogpt.agents.agent import AgentSettings
+
+ agent_dir = self.get_agent_dir(agent_id, True)
+ state_file = AgentFileManager(agent_dir).state_file_path
+ if not state_file.exists():
+ raise FileNotFoundError(f"Agent with ID '{agent_id}' has no state.json")
+
+ state = AgentSettings.load_from_json_file(state_file)
+ state.agent_data_dir = agent_dir
+ return state
diff --git a/autogpts/autogpt/autogpt/agents/__init__.py b/autogpts/autogpt/autogpt/agents/__init__.py
new file mode 100644
index 000000000..94a5f42a5
--- /dev/null
+++ b/autogpts/autogpt/autogpt/agents/__init__.py
@@ -0,0 +1,4 @@
+from .agent import Agent
+from .base import AgentThoughts, BaseAgent, CommandArgs, CommandName
+
+__all__ = ["BaseAgent", "Agent", "CommandName", "CommandArgs", "AgentThoughts"]
diff --git a/autogpts/autogpt/autogpt/agents/agent.py b/autogpts/autogpt/autogpt/agents/agent.py
new file mode 100644
index 000000000..8847ae7f2
--- /dev/null
+++ b/autogpts/autogpt/autogpt/agents/agent.py
@@ -0,0 +1,329 @@
+from __future__ import annotations
+
+import inspect
+import logging
+import time
+from datetime import datetime
+from typing import TYPE_CHECKING, Optional
+
+if TYPE_CHECKING:
+ from autogpt.config import Config
+ from autogpt.models.command_registry import CommandRegistry
+
+from pydantic import Field
+
+from autogpt.core.configuration import Configurable
+from autogpt.core.prompting import ChatPrompt
+from autogpt.core.resource.model_providers import (
+ AssistantChatMessage,
+ ChatMessage,
+ ChatModelProvider,
+)
+from autogpt.llm.api_manager import ApiManager
+from autogpt.logs.log_cycle import (
+ CURRENT_CONTEXT_FILE_NAME,
+ NEXT_ACTION_FILE_NAME,
+ USER_INPUT_FILE_NAME,
+ LogCycleHandler,
+)
+from autogpt.logs.utils import fmt_kwargs
+from autogpt.models.action_history import (
+ Action,
+ ActionErrorResult,
+ ActionInterruptedByHuman,
+ ActionResult,
+ ActionSuccessResult,
+)
+from autogpt.models.command import CommandOutput
+from autogpt.models.context_item import ContextItem
+
+from .base import BaseAgent, BaseAgentConfiguration, BaseAgentSettings
+from .features.context import ContextMixin
+from .features.file_workspace import FileWorkspaceMixin
+from .features.watchdog import WatchdogMixin
+from .prompt_strategies.one_shot import (
+ OneShotAgentPromptConfiguration,
+ OneShotAgentPromptStrategy,
+)
+from .utils.exceptions import (
+ AgentException,
+ AgentTerminated,
+ CommandExecutionError,
+ UnknownCommandError,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class AgentConfiguration(BaseAgentConfiguration):
+ pass
+
+
+class AgentSettings(BaseAgentSettings):
+ config: AgentConfiguration = Field(default_factory=AgentConfiguration)
+ prompt_config: OneShotAgentPromptConfiguration = Field(
+ default_factory=(
+ lambda: OneShotAgentPromptStrategy.default_configuration.copy(deep=True)
+ )
+ )
+
+
+class Agent(
+ ContextMixin,
+ FileWorkspaceMixin,
+ WatchdogMixin,
+ BaseAgent,
+ Configurable[AgentSettings],
+):
+ """AutoGPT's primary Agent; uses one-shot prompting."""
+
+ default_settings: AgentSettings = AgentSettings(
+ name="Agent",
+ description=__doc__,
+ )
+
+ prompt_strategy: OneShotAgentPromptStrategy
+
+ def __init__(
+ self,
+ settings: AgentSettings,
+ llm_provider: ChatModelProvider,
+ command_registry: CommandRegistry,
+ legacy_config: Config,
+ ):
+ prompt_strategy = OneShotAgentPromptStrategy(
+ configuration=settings.prompt_config,
+ logger=logger,
+ )
+ super().__init__(
+ settings=settings,
+ llm_provider=llm_provider,
+ prompt_strategy=prompt_strategy,
+ command_registry=command_registry,
+ legacy_config=legacy_config,
+ )
+
+ self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
+ """Timestamp the agent was created; only used for structured debug logging."""
+
+ self.log_cycle_handler = LogCycleHandler()
+ """LogCycleHandler for structured debug logging."""
+
+ def build_prompt(
+ self,
+ *args,
+ extra_messages: Optional[list[ChatMessage]] = None,
+ include_os_info: Optional[bool] = None,
+ **kwargs,
+ ) -> ChatPrompt:
+ if not extra_messages:
+ extra_messages = []
+
+ # Clock
+ extra_messages.append(
+ ChatMessage.system(f"The current time and date is {time.strftime('%c')}"),
+ )
+
+ # Add budget information (if any) to prompt
+ api_manager = ApiManager()
+ if api_manager.get_total_budget() > 0.0:
+ remaining_budget = (
+ api_manager.get_total_budget() - api_manager.get_total_cost()
+ )
+ if remaining_budget < 0:
+ remaining_budget = 0
+
+ budget_msg = ChatMessage.system(
+ f"Your remaining API budget is ${remaining_budget:.3f}"
+ + (
+ " BUDGET EXCEEDED! SHUT DOWN!\n\n"
+ if remaining_budget == 0
+ else " Budget very nearly exceeded! Shut down gracefully!\n\n"
+ if remaining_budget < 0.005
+ else " Budget nearly exceeded. Finish up.\n\n"
+ if remaining_budget < 0.01
+ else ""
+ ),
+ )
+ logger.debug(budget_msg)
+ extra_messages.append(budget_msg)
+
+ if include_os_info is None:
+ include_os_info = self.legacy_config.execute_local_commands
+
+ return super().build_prompt(
+ *args,
+ extra_messages=extra_messages,
+ include_os_info=include_os_info,
+ **kwargs,
+ )
+
+ def on_before_think(self, *args, **kwargs) -> ChatPrompt:
+ prompt = super().on_before_think(*args, **kwargs)
+
+ self.log_cycle_handler.log_count_within_cycle = 0
+ self.log_cycle_handler.log_cycle(
+ self.ai_profile.ai_name,
+ self.created_at,
+ self.config.cycle_count,
+ prompt.raw(),
+ CURRENT_CONTEXT_FILE_NAME,
+ )
+ return prompt
+
+ def parse_and_process_response(
+ self, llm_response: AssistantChatMessage, *args, **kwargs
+ ) -> Agent.ThoughtProcessOutput:
+ for plugin in self.config.plugins:
+ if not plugin.can_handle_post_planning():
+ continue
+ llm_response.content = plugin.post_planning(llm_response.content or "")
+
+ (
+ command_name,
+ arguments,
+ assistant_reply_dict,
+ ) = self.prompt_strategy.parse_response_content(llm_response)
+
+ self.log_cycle_handler.log_cycle(
+ self.ai_profile.ai_name,
+ self.created_at,
+ self.config.cycle_count,
+ assistant_reply_dict,
+ NEXT_ACTION_FILE_NAME,
+ )
+
+ if command_name:
+ self.event_history.register_action(
+ Action(
+ name=command_name,
+ args=arguments,
+ reasoning=assistant_reply_dict["thoughts"]["reasoning"],
+ )
+ )
+
+ return command_name, arguments, assistant_reply_dict
+
+ async def execute(
+ self,
+ command_name: str,
+ command_args: dict[str, str] = {},
+ user_input: str = "",
+ ) -> ActionResult:
+ result: ActionResult
+
+ if command_name == "human_feedback":
+ result = ActionInterruptedByHuman(feedback=user_input)
+ self.log_cycle_handler.log_cycle(
+ self.ai_profile.ai_name,
+ self.created_at,
+ self.config.cycle_count,
+ user_input,
+ USER_INPUT_FILE_NAME,
+ )
+
+ else:
+ for plugin in self.config.plugins:
+ if not plugin.can_handle_pre_command():
+ continue
+ command_name, command_args = plugin.pre_command(
+ command_name, command_args
+ )
+
+ try:
+ return_value = await execute_command(
+ command_name=command_name,
+ arguments=command_args,
+ agent=self,
+ )
+
+ # Intercept ContextItem if one is returned by the command
+ if type(return_value) is tuple and isinstance(
+ return_value[1], ContextItem
+ ):
+ context_item = return_value[1]
+ return_value = return_value[0]
+ logger.debug(
+ f"Command {command_name} returned a ContextItem: {context_item}"
+ )
+ self.context.add(context_item)
+
+ result = ActionSuccessResult(outputs=return_value)
+ except AgentTerminated:
+ raise
+ except AgentException as e:
+ result = ActionErrorResult.from_exception(e)
+ logger.warning(
+ f"{command_name}({fmt_kwargs(command_args)}) raised an error: {e}"
+ )
+
+ result_tlength = self.llm_provider.count_tokens(str(result), self.llm.name)
+ if result_tlength > self.send_token_limit // 3:
+ result = ActionErrorResult(
+ reason=f"Command {command_name} returned too much output. "
+ "Do not execute this command again with the same arguments."
+ )
+
+ for plugin in self.config.plugins:
+ if not plugin.can_handle_post_command():
+ continue
+ if result.status == "success":
+ result.outputs = plugin.post_command(command_name, result.outputs)
+ elif result.status == "error":
+ result.reason = plugin.post_command(command_name, result.reason)
+
+ # Update action history
+ self.event_history.register_result(result)
+
+ return result
+
+
+#############
+# Utilities #
+#############
+
+
+async def execute_command(
+ command_name: str,
+ arguments: dict[str, str],
+ agent: Agent,
+) -> CommandOutput:
+ """Execute the command and return the result
+
+ Args:
+ command_name (str): The name of the command to execute
+ arguments (dict): The arguments for the command
+ agent (Agent): The agent that is executing the command
+
+ Returns:
+ str: The result of the command
+ """
+ # Execute a native command with the same name or alias, if it exists
+ if command := agent.command_registry.get_command(command_name):
+ try:
+ result = command(**arguments, agent=agent)
+ if inspect.isawaitable(result):
+ return await result
+ return result
+ except AgentException:
+ raise
+ except Exception as e:
+ raise CommandExecutionError(str(e))
+
+ # Handle non-native commands (e.g. from plugins)
+ if agent._prompt_scratchpad:
+ for name, command in agent._prompt_scratchpad.commands.items():
+ if (
+ command_name == name
+ or command_name.lower() == command.description.lower()
+ ):
+ try:
+ return command.method(**arguments)
+ except AgentException:
+ raise
+ except Exception as e:
+ raise CommandExecutionError(str(e))
+
+ raise UnknownCommandError(
+ f"Cannot execute command '{command_name}': unknown command."
+ )
diff --git a/autogpts/autogpt/autogpt/agents/base.py b/autogpts/autogpt/autogpt/agents/base.py
new file mode 100644
index 000000000..846427ae7
--- /dev/null
+++ b/autogpts/autogpt/autogpt/agents/base.py
@@ -0,0 +1,431 @@
+from __future__ import annotations
+
+import logging
+from abc import ABC, abstractmethod
+from pathlib import Path
+from typing import TYPE_CHECKING, Any, Optional
+
+from auto_gpt_plugin_template import AutoGPTPluginTemplate
+from pydantic import Field, validator
+
+if TYPE_CHECKING:
+ from autogpt.config import Config
+ from autogpt.core.prompting.base import PromptStrategy
+ from autogpt.core.resource.model_providers.schema import (
+ AssistantChatMessage,
+ ChatModelInfo,
+ ChatModelProvider,
+ ChatModelResponse,
+ )
+ from autogpt.models.command_registry import CommandRegistry
+
+from autogpt.agents.utils.prompt_scratchpad import PromptScratchpad
+from autogpt.config import ConfigBuilder
+from autogpt.config.ai_directives import AIDirectives
+from autogpt.config.ai_profile import AIProfile
+from autogpt.core.configuration import (
+ Configurable,
+ SystemConfiguration,
+ SystemSettings,
+ UserConfigurable,
+)
+from autogpt.core.prompting.schema import (
+ ChatMessage,
+ ChatPrompt,
+ CompletionModelFunction,
+)
+from autogpt.core.resource.model_providers.openai import (
+ OPEN_AI_CHAT_MODELS,
+ OpenAIModelName,
+)
+from autogpt.core.runner.client_lib.logging.helpers import dump_prompt
+from autogpt.llm.providers.openai import get_openai_command_specs
+from autogpt.models.action_history import ActionResult, EpisodicActionHistory
+from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
+
+from .utils.agent_file_manager import AgentFileManager
+
+logger = logging.getLogger(__name__)
+
+CommandName = str
+CommandArgs = dict[str, str]
+AgentThoughts = dict[str, Any]
+
+
+class BaseAgentConfiguration(SystemConfiguration):
+ allow_fs_access: bool = UserConfigurable(default=False)
+
+ fast_llm: OpenAIModelName = UserConfigurable(default=OpenAIModelName.GPT3_16k)
+ smart_llm: OpenAIModelName = UserConfigurable(default=OpenAIModelName.GPT4)
+ use_functions_api: bool = UserConfigurable(default=False)
+
+ default_cycle_instruction: str = DEFAULT_TRIGGERING_PROMPT
+ """The default instruction passed to the AI for a thinking cycle."""
+
+ big_brain: bool = UserConfigurable(default=True)
+ """
+ Whether this agent uses the configured smart LLM (default) to think,
+ as opposed to the configured fast LLM. Enabling this disables hybrid mode.
+ """
+
+ cycle_budget: Optional[int] = 1
+ """
+ The number of cycles that the agent is allowed to run unsupervised.
+
+ `None` for unlimited continuous execution,
+ `1` to require user approval for every step,
+ `0` to stop the agent.
+ """
+
+ cycles_remaining = cycle_budget
+ """The number of cycles remaining within the `cycle_budget`."""
+
+ cycle_count = 0
+ """The number of cycles that the agent has run since its initialization."""
+
+ send_token_limit: Optional[int] = None
+ """
+ The token limit for prompt construction. Should leave room for the completion;
+ defaults to 75% of `llm.max_tokens`.
+ """
+
+ summary_max_tlength: Optional[int] = None
+ # TODO: move to ActionHistoryConfiguration
+
+ plugins: list[AutoGPTPluginTemplate] = Field(default_factory=list, exclude=True)
+
+ class Config:
+ arbitrary_types_allowed = True # Necessary for plugins
+
+ @validator("plugins", each_item=True)
+ def validate_plugins(cls, p: AutoGPTPluginTemplate | Any):
+ assert issubclass(
+ p.__class__, AutoGPTPluginTemplate
+ ), f"{p} does not subclass AutoGPTPluginTemplate"
+ assert (
+ p.__class__.__name__ != "AutoGPTPluginTemplate"
+ ), f"Plugins must subclass AutoGPTPluginTemplate; {p} is a template instance"
+ return p
+
+ @validator("use_functions_api")
+ def validate_openai_functions(cls, v: bool, values: dict[str, Any]):
+ if v:
+ smart_llm = values["smart_llm"]
+ fast_llm = values["fast_llm"]
+ assert all(
+ [
+ not any(s in name for s in {"-0301", "-0314"})
+ for name in {smart_llm, fast_llm}
+ ]
+ ), (
+ f"Model {smart_llm} does not support OpenAI Functions. "
+ "Please disable OPENAI_FUNCTIONS or choose a suitable model."
+ )
+ return v
+
+
+class BaseAgentSettings(SystemSettings):
+ agent_id: str = ""
+ agent_data_dir: Optional[Path] = None
+
+ ai_profile: AIProfile = Field(default_factory=lambda: AIProfile(ai_name="AutoGPT"))
+ """The AI profile or "personality" of the agent."""
+
+ directives: AIDirectives = Field(
+ default_factory=lambda: AIDirectives.from_file(
+ ConfigBuilder.default_settings.prompt_settings_file
+ )
+ )
+ """Directives (general instructional guidelines) for the agent."""
+
+ task: str = "Terminate immediately" # FIXME: placeholder for forge.sdk.schema.Task
+ """The user-given task that the agent is working on."""
+
+ config: BaseAgentConfiguration = Field(default_factory=BaseAgentConfiguration)
+ """The configuration for this BaseAgent subsystem instance."""
+
+ history: EpisodicActionHistory = Field(default_factory=EpisodicActionHistory)
+ """(STATE) The action history of the agent."""
+
+ def save_to_json_file(self, file_path: Path) -> None:
+ with file_path.open("w") as f:
+ f.write(self.json())
+
+ @classmethod
+ def load_from_json_file(cls, file_path: Path):
+ return cls.parse_file(file_path)
+
+
+class BaseAgent(Configurable[BaseAgentSettings], ABC):
+ """Base class for all AutoGPT agent classes."""
+
+ ThoughtProcessOutput = tuple[CommandName, CommandArgs, AgentThoughts]
+
+ default_settings = BaseAgentSettings(
+ name="BaseAgent",
+ description=__doc__,
+ )
+
+ def __init__(
+ self,
+ settings: BaseAgentSettings,
+ llm_provider: ChatModelProvider,
+ prompt_strategy: PromptStrategy,
+ command_registry: CommandRegistry,
+ legacy_config: Config,
+ ):
+ self.state = settings
+ self.config = settings.config
+ self.ai_profile = settings.ai_profile
+ self.directives = settings.directives
+ self.event_history = settings.history
+
+ self.legacy_config = legacy_config
+ """LEGACY: Monolithic application configuration."""
+
+ self.file_manager: AgentFileManager = (
+ AgentFileManager(settings.agent_data_dir)
+ if settings.agent_data_dir
+ else None
+ ) # type: ignore
+
+ self.llm_provider = llm_provider
+
+ self.prompt_strategy = prompt_strategy
+
+ self.command_registry = command_registry
+ """The registry containing all commands available to the agent."""
+
+ self._prompt_scratchpad: PromptScratchpad | None = None
+
+ # Support multi-inheritance and mixins for subclasses
+ super(BaseAgent, self).__init__()
+
+ logger.debug(f"Created {__class__} '{self.ai_profile.ai_name}'")
+
+ def set_id(self, new_id: str, new_agent_dir: Optional[Path] = None):
+ self.state.agent_id = new_id
+ if self.state.agent_data_dir:
+ if not new_agent_dir:
+ raise ValueError(
+ "new_agent_dir must be specified if one is currently configured"
+ )
+ self.attach_fs(new_agent_dir)
+
+ def attach_fs(self, agent_dir: Path) -> AgentFileManager:
+ self.file_manager = AgentFileManager(agent_dir)
+ self.file_manager.initialize()
+ self.state.agent_data_dir = agent_dir
+ return self.file_manager
+
+ @property
+ def llm(self) -> ChatModelInfo:
+ """The LLM that the agent uses to think."""
+ llm_name = (
+ self.config.smart_llm if self.config.big_brain else self.config.fast_llm
+ )
+ return OPEN_AI_CHAT_MODELS[llm_name]
+
+ @property
+ def send_token_limit(self) -> int:
+ return self.config.send_token_limit or self.llm.max_tokens * 3 // 4
+
+ async def propose_action(self) -> ThoughtProcessOutput:
+ """Proposes the next action to execute, based on the task and current state.
+
+ Returns:
+ The command name and arguments, if any, and the agent's thoughts.
+ """
+ assert self.file_manager, (
+ f"Agent has no FileManager: call {__class__.__name__}.attach_fs()"
+ " before trying to run the agent."
+ )
+
+ # Scratchpad as surrogate PromptGenerator for plugin hooks
+ self._prompt_scratchpad = PromptScratchpad()
+
+ prompt: ChatPrompt = self.build_prompt(scratchpad=self._prompt_scratchpad)
+ prompt = self.on_before_think(prompt, scratchpad=self._prompt_scratchpad)
+
+ logger.debug(f"Executing prompt:\n{dump_prompt(prompt)}")
+ response = await self.llm_provider.create_chat_completion(
+ prompt.messages,
+ functions=get_openai_command_specs(
+ self.command_registry.list_available_commands(self)
+ )
+ + list(self._prompt_scratchpad.commands.values())
+ if self.config.use_functions_api
+ else [],
+ model_name=self.llm.name,
+ completion_parser=lambda r: self.parse_and_process_response(
+ r,
+ prompt,
+ scratchpad=self._prompt_scratchpad,
+ ),
+ )
+ self.config.cycle_count += 1
+
+ return self.on_response(
+ llm_response=response,
+ prompt=prompt,
+ scratchpad=self._prompt_scratchpad,
+ )
+
+ @abstractmethod
+ async def execute(
+ self,
+ command_name: str,
+ command_args: dict[str, str] = {},
+ user_input: str = "",
+ ) -> ActionResult:
+ """Executes the given command, if any, and returns the agent's response.
+
+ Params:
+ command_name: The name of the command to execute, if any.
+ command_args: The arguments to pass to the command, if any.
+ user_input: The user's input, if any.
+
+ Returns:
+ ActionResult: An object representing the result(s) of the command.
+ """
+ ...
+
+ def build_prompt(
+ self,
+ scratchpad: PromptScratchpad,
+ extra_commands: Optional[list[CompletionModelFunction]] = None,
+ extra_messages: Optional[list[ChatMessage]] = None,
+ **extras,
+ ) -> ChatPrompt:
+ """Constructs a prompt using `self.prompt_strategy`.
+
+ Params:
+ scratchpad: An object for plugins to write additional prompt elements to.
+ (E.g. commands, constraints, best practices)
+ extra_commands: Additional commands that the agent has access to.
+ extra_messages: Additional messages to include in the prompt.
+ """
+ if not extra_commands:
+ extra_commands = []
+ if not extra_messages:
+ extra_messages = []
+
+ # Apply additions from plugins
+ for plugin in self.config.plugins:
+ if not plugin.can_handle_post_prompt():
+ continue
+ plugin.post_prompt(scratchpad)
+ ai_directives = self.directives.copy(deep=True)
+ ai_directives.resources += scratchpad.resources
+ ai_directives.constraints += scratchpad.constraints
+ ai_directives.best_practices += scratchpad.best_practices
+ extra_commands += list(scratchpad.commands.values())
+
+ prompt = self.prompt_strategy.build_prompt(
+ task=self.state.task,
+ ai_profile=self.ai_profile,
+ ai_directives=ai_directives,
+ commands=get_openai_command_specs(
+ self.command_registry.list_available_commands(self)
+ )
+ + extra_commands,
+ event_history=self.event_history,
+ max_prompt_tokens=self.send_token_limit,
+ count_tokens=lambda x: self.llm_provider.count_tokens(x, self.llm.name),
+ count_message_tokens=lambda x: self.llm_provider.count_message_tokens(
+ x, self.llm.name
+ ),
+ extra_messages=extra_messages,
+ **extras,
+ )
+
+ return prompt
+
+ def on_before_think(
+ self,
+ prompt: ChatPrompt,
+ scratchpad: PromptScratchpad,
+ ) -> ChatPrompt:
+ """Called after constructing the prompt but before executing it.
+
+ Calls the `on_planning` hook of any enabled and capable plugins, adding their
+ output to the prompt.
+
+ Params:
+ prompt: The prompt that is about to be executed.
+ scratchpad: An object for plugins to write additional prompt elements to.
+ (E.g. commands, constraints, best practices)
+
+ Returns:
+ The prompt to execute
+ """
+ current_tokens_used = self.llm_provider.count_message_tokens(
+ prompt.messages, self.llm.name
+ )
+ plugin_count = len(self.config.plugins)
+ for i, plugin in enumerate(self.config.plugins):
+ if not plugin.can_handle_on_planning():
+ continue
+ plugin_response = plugin.on_planning(scratchpad, prompt.raw())
+ if not plugin_response or plugin_response == "":
+ continue
+ message_to_add = ChatMessage.system(plugin_response)
+ tokens_to_add = self.llm_provider.count_message_tokens(
+ message_to_add, self.llm.name
+ )
+ if current_tokens_used + tokens_to_add > self.send_token_limit:
+ logger.debug(f"Plugin response too long, skipping: {plugin_response}")
+ logger.debug(f"Plugins remaining at stop: {plugin_count - i}")
+ break
+ prompt.messages.insert(
+ -1, message_to_add
+ ) # HACK: assumes cycle instruction to be at the end
+ current_tokens_used += tokens_to_add
+ return prompt
+
+ def on_response(
+ self,
+ llm_response: ChatModelResponse,
+ prompt: ChatPrompt,
+ scratchpad: PromptScratchpad,
+ ) -> ThoughtProcessOutput:
+ """Called upon receiving a response from the chat model.
+
+ Calls `self.parse_and_process_response()`.
+
+ Params:
+ llm_response: The raw response from the chat model.
+ prompt: The prompt that was executed.
+ scratchpad: An object containing additional prompt elements from plugins.
+ (E.g. commands, constraints, best practices)
+
+ Returns:
+ The parsed command name and command args, if any, and the agent thoughts.
+ """
+
+ return llm_response.parsed_result
+
+ # TODO: update memory/context
+
+ @abstractmethod
+ def parse_and_process_response(
+ self,
+ llm_response: AssistantChatMessage,
+ prompt: ChatPrompt,
+ scratchpad: PromptScratchpad,
+ ) -> ThoughtProcessOutput:
+ """Validate, parse & process the LLM's response.
+
+ Must be implemented by derivative classes: no base implementation is provided,
+ since the implementation depends on the role of the derivative Agent.
+
+ Params:
+ llm_response: The raw response from the chat model.
+ prompt: The prompt that was executed.
+ scratchpad: An object containing additional prompt elements from plugins.
+ (E.g. commands, constraints, best practices)
+
+ Returns:
+ The parsed command name and command args, if any, and the agent thoughts.
+ """
+ pass
diff --git a/autogpts/autogpt/autogpt/agents/features/context.py b/autogpts/autogpt/autogpt/agents/features/context.py
new file mode 100644
index 000000000..748635ddc
--- /dev/null
+++ b/autogpts/autogpt/autogpt/agents/features/context.py
@@ -0,0 +1,82 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any, Optional
+
+if TYPE_CHECKING:
+ from autogpt.core.prompting import ChatPrompt
+ from autogpt.models.context_item import ContextItem
+
+ from ..base import BaseAgent
+
+from autogpt.core.resource.model_providers import ChatMessage
+
+
+class AgentContext:
+ items: list[ContextItem]
+
+ def __init__(self, items: Optional[list[ContextItem]] = None):
+ self.items = items or []
+
+ def __bool__(self) -> bool:
+ return len(self.items) > 0
+
+ def __contains__(self, item: ContextItem) -> bool:
+ return any([i.source == item.source for i in self.items])
+
+ def add(self, item: ContextItem) -> None:
+ self.items.append(item)
+
+ def close(self, index: int) -> None:
+ self.items.pop(index - 1)
+
+ def clear(self) -> None:
+ self.items.clear()
+
+ def format_numbered(self) -> str:
+ return "\n\n".join([f"{i}. {c.fmt()}" for i, c in enumerate(self.items, 1)])
+
+
+class ContextMixin:
+ """Mixin that adds context support to a BaseAgent subclass"""
+
+ context: AgentContext
+
+ def __init__(self, **kwargs: Any):
+ self.context = AgentContext()
+
+ super(ContextMixin, self).__init__(**kwargs)
+
+ def build_prompt(
+ self,
+ *args: Any,
+ extra_messages: Optional[list[ChatMessage]] = None,
+ **kwargs: Any,
+ ) -> ChatPrompt:
+ if not extra_messages:
+ extra_messages = []
+
+ # Add context section to prompt
+ if self.context:
+ extra_messages.insert(
+ 0,
+ ChatMessage.system(
+ "## Context\n"
+ f"{self.context.format_numbered()}\n\n"
+ "When a context item is no longer needed and you are not done yet, "
+ "you can hide the item by specifying its number in the list above "
+ "to `hide_context_item`.",
+ ),
+ )
+
+ return super(ContextMixin, self).build_prompt(
+ *args,
+ extra_messages=extra_messages,
+ **kwargs,
+ ) # type: ignore
+
+
+def get_agent_context(agent: BaseAgent) -> AgentContext | None:
+ if isinstance(agent, ContextMixin):
+ return agent.context
+
+ return None
diff --git a/autogpts/autogpt/autogpt/agents/features/file_workspace.py b/autogpts/autogpt/autogpt/agents/features/file_workspace.py
new file mode 100644
index 000000000..22ab8119d
--- /dev/null
+++ b/autogpts/autogpt/autogpt/agents/features/file_workspace.py
@@ -0,0 +1,65 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from pathlib import Path
+
+ from ..base import BaseAgent, Config
+
+from autogpt.file_workspace import (
+ FileWorkspace,
+ FileWorkspaceBackendName,
+ get_workspace,
+)
+
+from ..base import AgentFileManager, BaseAgentSettings
+
+
+class FileWorkspaceMixin:
+ """Mixin that adds workspace support to a class"""
+
+ workspace: FileWorkspace = None
+ """Workspace that the agent has access to, e.g. for reading/writing files."""
+
+ def __init__(self, **kwargs):
+ # Initialize other bases first, because we need the config from BaseAgent
+ super(FileWorkspaceMixin, self).__init__(**kwargs)
+
+ file_manager: AgentFileManager = getattr(self, "file_manager")
+ if not file_manager:
+ return
+
+ self._setup_workspace()
+
+ def attach_fs(self, agent_dir: Path):
+ res = super(FileWorkspaceMixin, self).attach_fs(agent_dir)
+
+ self._setup_workspace()
+
+ return res
+
+ def _setup_workspace(self) -> None:
+ settings: BaseAgentSettings = getattr(self, "state")
+ assert settings.agent_id, "Cannot attach workspace to anonymous agent"
+ app_config: Config = getattr(self, "legacy_config")
+ file_manager: AgentFileManager = getattr(self, "file_manager")
+
+ ws_backend = app_config.workspace_backend
+ local = ws_backend == FileWorkspaceBackendName.LOCAL
+ workspace = get_workspace(
+ backend=ws_backend,
+ id=settings.agent_id if not local else "",
+ root_path=file_manager.root / "workspace" if local else None,
+ )
+ if local and settings.config.allow_fs_access:
+ workspace._restrict_to_root = False # type: ignore
+ workspace.initialize()
+ self.workspace = workspace
+
+
+def get_agent_workspace(agent: BaseAgent) -> FileWorkspace | None:
+ if isinstance(agent, FileWorkspaceMixin):
+ return agent.workspace
+
+ return None
diff --git a/autogpts/autogpt/autogpt/agents/features/watchdog.py b/autogpts/autogpt/autogpt/agents/features/watchdog.py
new file mode 100644
index 000000000..8113def25
--- /dev/null
+++ b/autogpts/autogpt/autogpt/agents/features/watchdog.py
@@ -0,0 +1,76 @@
+from __future__ import annotations
+
+import logging
+from contextlib import ExitStack
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from ..base import BaseAgentConfiguration
+
+from autogpt.models.action_history import EpisodicActionHistory
+
+from ..base import BaseAgent
+
+logger = logging.getLogger(__name__)
+
+
+class WatchdogMixin:
+ """
+ Mixin that adds a watchdog feature to an agent class. Whenever the agent starts
+ looping, the watchdog will switch from the FAST_LLM to the SMART_LLM and re-think.
+ """
+
+ config: BaseAgentConfiguration
+ event_history: EpisodicActionHistory
+
+ def __init__(self, **kwargs) -> None:
+ # Initialize other bases first, because we need the event_history from BaseAgent
+ super(WatchdogMixin, self).__init__(**kwargs)
+
+ if not isinstance(self, BaseAgent):
+ raise NotImplementedError(
+ f"{__class__.__name__} can only be applied to BaseAgent derivatives"
+ )
+
+ async def propose_action(self, *args, **kwargs) -> BaseAgent.ThoughtProcessOutput:
+ command_name, command_args, thoughts = await super(
+ WatchdogMixin, self
+ ).propose_action(*args, **kwargs)
+
+ if not self.config.big_brain and self.config.fast_llm != self.config.smart_llm:
+ previous_command, previous_command_args = None, None
+ if len(self.event_history) > 1:
+ # Detect repetitive commands
+ previous_cycle = self.event_history.episodes[
+ self.event_history.cursor - 1
+ ]
+ previous_command = previous_cycle.action.name
+ previous_command_args = previous_cycle.action.args
+
+ rethink_reason = ""
+
+ if not command_name:
+ rethink_reason = "AI did not specify a command"
+ elif (
+ command_name == previous_command
+ and command_args == previous_command_args
+ ):
+ rethink_reason = f"Repititive command detected ({command_name})"
+
+ if rethink_reason:
+ logger.info(f"{rethink_reason}, re-thinking with SMART_LLM...")
+ with ExitStack() as stack:
+
+ @stack.callback
+ def restore_state() -> None:
+ # Executed after exiting the ExitStack context
+ self.config.big_brain = False
+
+ # Remove partial record of current cycle
+ self.event_history.rewind()
+
+ # Switch to SMART_LLM and re-think
+ self.big_brain = True
+ return await self.propose_action(*args, **kwargs)
+
+ return command_name, command_args, thoughts
diff --git a/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py b/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py
new file mode 100644
index 000000000..e8c726c18
--- /dev/null
+++ b/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py
@@ -0,0 +1,484 @@
+from __future__ import annotations
+
+import json
+import platform
+import re
+from logging import Logger
+from typing import TYPE_CHECKING, Callable, Optional
+
+import distro
+
+if TYPE_CHECKING:
+ from autogpt.agents.agent import Agent
+ from autogpt.models.action_history import Episode
+
+from autogpt.agents.utils.exceptions import InvalidAgentResponseError
+from autogpt.config import AIDirectives, AIProfile
+from autogpt.core.configuration.schema import SystemConfiguration, UserConfigurable
+from autogpt.core.prompting import (
+ ChatPrompt,
+ LanguageModelClassification,
+ PromptStrategy,
+)
+from autogpt.core.resource.model_providers.schema import (
+ AssistantChatMessage,
+ ChatMessage,
+ CompletionModelFunction,
+)
+from autogpt.core.utils.json_schema import JSONSchema
+from autogpt.json_utils.utilities import extract_dict_from_response
+from autogpt.prompts.utils import format_numbered_list, indent
+
+
+class OneShotAgentPromptConfiguration(SystemConfiguration):
+ DEFAULT_BODY_TEMPLATE: str = (
+ "## Constraints\n"
+ "You operate within the following constraints:\n"
+ "{constraints}\n"
+ "\n"
+ "## Resources\n"
+ "You can leverage access to the following resources:\n"
+ "{resources}\n"
+ "\n"
+ "## Commands\n"
+ "These are the ONLY commands you can use."
+ " Any action you perform must be possible through one of these commands:\n"
+ "{commands}\n"
+ "\n"
+ "## Best practices\n"
+ "{best_practices}"
+ )
+
+ DEFAULT_CHOOSE_ACTION_INSTRUCTION: str = (
+ "Determine exactly one command to use next based on the given goals "
+ "and the progress you have made so far, "
+ "and respond using the JSON schema specified previously:"
+ )
+
+ DEFAULT_RESPONSE_SCHEMA = JSONSchema(
+ type=JSONSchema.Type.OBJECT,
+ properties={
+ "thoughts": JSONSchema(
+ type=JSONSchema.Type.OBJECT,
+ required=True,
+ properties={
+ "observations": JSONSchema(
+ description=(
+ "Relevant observations from your last action (if any)"
+ ),
+ type=JSONSchema.Type.STRING,
+ required=False,
+ ),
+ "text": JSONSchema(
+ description="Thoughts",
+ type=JSONSchema.Type.STRING,
+ required=True,
+ ),
+ "reasoning": JSONSchema(
+ type=JSONSchema.Type.STRING,
+ required=True,
+ ),
+ "self_criticism": JSONSchema(
+ description="Constructive self-criticism",
+ type=JSONSchema.Type.STRING,
+ required=True,
+ ),
+ "plan": JSONSchema(
+ description=(
+ "Short markdown-style bullet list that conveys the "
+ "long-term plan"
+ ),
+ type=JSONSchema.Type.STRING,
+ required=True,
+ ),
+ "speak": JSONSchema(
+ description="Summary of thoughts, to say to user",
+ type=JSONSchema.Type.STRING,
+ required=True,
+ ),
+ },
+ ),
+ "command": JSONSchema(
+ type=JSONSchema.Type.OBJECT,
+ required=True,
+ properties={
+ "name": JSONSchema(
+ type=JSONSchema.Type.STRING,
+ required=True,
+ ),
+ "args": JSONSchema(
+ type=JSONSchema.Type.OBJECT,
+ required=True,
+ ),
+ },
+ ),
+ },
+ )
+
+ body_template: str = UserConfigurable(default=DEFAULT_BODY_TEMPLATE)
+ response_schema: dict = UserConfigurable(
+ default_factory=DEFAULT_RESPONSE_SCHEMA.to_dict
+ )
+ choose_action_instruction: str = UserConfigurable(
+ default=DEFAULT_CHOOSE_ACTION_INSTRUCTION
+ )
+ use_functions_api: bool = UserConfigurable(default=False)
+
+ #########
+ # State #
+ #########
+ # progress_summaries: dict[tuple[int, int], str] = Field(
+ # default_factory=lambda: {(0, 0): ""}
+ # )
+
+
+class OneShotAgentPromptStrategy(PromptStrategy):
+ default_configuration: OneShotAgentPromptConfiguration = (
+ OneShotAgentPromptConfiguration()
+ )
+
+ def __init__(
+ self,
+ configuration: OneShotAgentPromptConfiguration,
+ logger: Logger,
+ ):
+ self.config = configuration
+ self.response_schema = JSONSchema.from_dict(configuration.response_schema)
+ self.logger = logger
+
+ @property
+ def model_classification(self) -> LanguageModelClassification:
+ return LanguageModelClassification.FAST_MODEL # FIXME: dynamic switching
+
+ def build_prompt(
+ self,
+ *,
+ task: str,
+ ai_profile: AIProfile,
+ ai_directives: AIDirectives,
+ commands: list[CompletionModelFunction],
+ event_history: list[Episode],
+ include_os_info: bool,
+ max_prompt_tokens: int,
+ count_tokens: Callable[[str], int],
+ count_message_tokens: Callable[[ChatMessage | list[ChatMessage]], int],
+ extra_messages: Optional[list[ChatMessage]] = None,
+ **extras,
+ ) -> ChatPrompt:
+ """Constructs and returns a prompt with the following structure:
+ 1. System prompt
+ 2. Message history of the agent, truncated & prepended with running summary
+ as needed
+ 3. `cycle_instruction`
+ """
+ if not extra_messages:
+ extra_messages = []
+
+ system_prompt = self.build_system_prompt(
+ ai_profile=ai_profile,
+ ai_directives=ai_directives,
+ commands=commands,
+ include_os_info=include_os_info,
+ )
+ system_prompt_tlength = count_message_tokens(ChatMessage.system(system_prompt))
+
+ user_task = f'"""{task}"""'
+ user_task_tlength = count_message_tokens(ChatMessage.user(user_task))
+
+ response_format_instr = self.response_format_instruction(
+ self.config.use_functions_api
+ )
+ extra_messages.append(ChatMessage.system(response_format_instr))
+
+ final_instruction_msg = ChatMessage.user(self.config.choose_action_instruction)
+ final_instruction_tlength = count_message_tokens(final_instruction_msg)
+
+ if event_history:
+ progress = self.compile_progress(
+ event_history,
+ count_tokens=count_tokens,
+ max_tokens=(
+ max_prompt_tokens
+ - system_prompt_tlength
+ - user_task_tlength
+ - final_instruction_tlength
+ - count_message_tokens(extra_messages)
+ ),
+ )
+ extra_messages.insert(
+ 0,
+ ChatMessage.system(f"## Progress\n\n{progress}"),
+ )
+
+ prompt = ChatPrompt(
+ messages=[
+ ChatMessage.system(system_prompt),
+ ChatMessage.user(user_task),
+ *extra_messages,
+ final_instruction_msg,
+ ],
+ )
+
+ return prompt
+
+ def build_system_prompt(
+ self,
+ ai_profile: AIProfile,
+ ai_directives: AIDirectives,
+ commands: list[CompletionModelFunction],
+ include_os_info: bool,
+ ) -> str:
+ system_prompt_parts = (
+ self._generate_intro_prompt(ai_profile)
+ + (self._generate_os_info() if include_os_info else [])
+ + [
+ self.config.body_template.format(
+ constraints=format_numbered_list(
+ ai_directives.constraints
+ + self._generate_budget_constraint(ai_profile.api_budget)
+ ),
+ resources=format_numbered_list(ai_directives.resources),
+ commands=self._generate_commands_list(commands),
+ best_practices=format_numbered_list(ai_directives.best_practices),
+ )
+ ]
+ + [
+ "## Your Task\n"
+ "The user will specify a task for you to execute, in triple quotes,"
+ " in the next message. Your job is to complete the task while following"
+ " your directives as given above, and terminate when your task is done."
+ ]
+ )
+
+ # Join non-empty parts together into paragraph format
+ return "\n\n".join(filter(None, system_prompt_parts)).strip("\n")
+
+ def compile_progress(
+ self,
+ episode_history: list[Episode],
+ max_tokens: Optional[int] = None,
+ count_tokens: Optional[Callable[[str], int]] = None,
+ ) -> str:
+ if max_tokens and not count_tokens:
+ raise ValueError("count_tokens is required if max_tokens is set")
+
+ steps: list[str] = []
+ tokens: int = 0
+ # start: int = len(episode_history)
+
+ for i, c in reversed(list(enumerate(episode_history))):
+ step = f"### Step {i+1}: Executed `{c.action.format_call()}`\n"
+ step += f'- **Reasoning:** "{c.action.reasoning}"\n'
+ step += (
+ f"- **Status:** `{c.result.status if c.result else 'did_not_finish'}`\n"
+ )
+ if c.result:
+ if c.result.status == "success":
+ result = str(c.result)
+ result = "\n" + indent(result) if "\n" in result else result
+ step += f"- **Output:** {result}"
+ elif c.result.status == "error":
+ step += f"- **Reason:** {c.result.reason}\n"
+ if c.result.error:
+ step += f"- **Error:** {c.result.error}\n"
+ elif c.result.status == "interrupted_by_human":
+ step += f"- **Feedback:** {c.result.feedback}\n"
+
+ if max_tokens and count_tokens:
+ step_tokens = count_tokens(step)
+ if tokens + step_tokens > max_tokens:
+ break
+ tokens += step_tokens
+
+ steps.insert(0, step)
+ # start = i
+
+ # # TODO: summarize remaining
+ # part = slice(0, start)
+
+ return "\n\n".join(steps)
+
+ def response_format_instruction(self, use_functions_api: bool) -> str:
+ response_schema = self.response_schema.copy(deep=True)
+ if (
+ use_functions_api
+ and response_schema.properties
+ and "command" in response_schema.properties
+ ):
+ del response_schema.properties["command"]
+
+ # Unindent for performance
+ response_format = re.sub(
+ r"\n\s+",
+ "\n",
+ response_schema.to_typescript_object_interface("Response"),
+ )
+
+ instruction = (
+ "Respond with pure JSON containing your thoughts, " "and invoke a tool."
+ if use_functions_api
+ else "Respond with pure JSON."
+ )
+
+ return (
+ f"{instruction} "
+ "The JSON object should be compatible with the TypeScript type `Response` "
+ f"from the following:\n{response_format}"
+ )
+
+ def _generate_intro_prompt(self, ai_profile: AIProfile) -> list[str]:
+ """Generates the introduction part of the prompt.
+
+ Returns:
+ list[str]: A list of strings forming the introduction part of the prompt.
+ """
+ return [
+ f"You are {ai_profile.ai_name}, {ai_profile.ai_role.rstrip('.')}.",
+ "Your decisions must always be made independently without seeking "
+ "user assistance. Play to your strengths as an LLM and pursue "
+ "simple strategies with no legal complications.",
+ ]
+
+ def _generate_os_info(self) -> list[str]:
+ """Generates the OS information part of the prompt.
+
+ Params:
+ config (Config): The configuration object.
+
+ Returns:
+ str: The OS information part of the prompt.
+ """
+ os_name = platform.system()
+ os_info = (
+ platform.platform(terse=True)
+ if os_name != "Linux"
+ else distro.name(pretty=True)
+ )
+ return [f"The OS you are running on is: {os_info}"]
+
+ def _generate_budget_constraint(self, api_budget: float) -> list[str]:
+ """Generates the budget information part of the prompt.
+
+ Returns:
+ list[str]: The budget information part of the prompt, or an empty list.
+ """
+ if api_budget > 0.0:
+ return [
+ f"It takes money to let you run. "
+ f"Your API budget is ${api_budget:.3f}"
+ ]
+ return []
+
+ def _generate_commands_list(self, commands: list[CompletionModelFunction]) -> str:
+ """Lists the commands available to the agent.
+
+ Params:
+ agent: The agent for which the commands are being listed.
+
+ Returns:
+ str: A string containing a numbered list of commands.
+ """
+ try:
+ return format_numbered_list([cmd.fmt_line() for cmd in commands])
+ except AttributeError:
+ self.logger.warning(f"Formatting commands failed. {commands}")
+ raise
+
+ def parse_response_content(
+ self,
+ response: AssistantChatMessage,
+ ) -> Agent.ThoughtProcessOutput:
+ if not response.content:
+ raise InvalidAgentResponseError("Assistant response has no text content")
+
+ self.logger.debug(
+ "LLM response content:"
+ + (
+ f"\n{response.content}"
+ if "\n" in response.content
+ else f" '{response.content}'"
+ )
+ )
+ assistant_reply_dict = extract_dict_from_response(response.content)
+ self.logger.debug(
+ "Validating object extracted from LLM response:\n"
+ f"{json.dumps(assistant_reply_dict, indent=4)}"
+ )
+
+ _, errors = self.response_schema.validate_object(
+ object=assistant_reply_dict,
+ logger=self.logger,
+ )
+ if errors:
+ raise InvalidAgentResponseError(
+ "Validation of response failed:\n "
+ + ";\n ".join([str(e) for e in errors])
+ )
+
+ # Get command name and arguments
+ command_name, arguments = extract_command(
+ assistant_reply_dict, response, self.config.use_functions_api
+ )
+ return command_name, arguments, assistant_reply_dict
+
+
+#############
+# Utilities #
+#############
+
+
+def extract_command(
+ assistant_reply_json: dict,
+ assistant_reply: AssistantChatMessage,
+ use_openai_functions_api: bool,
+) -> tuple[str, dict[str, str]]:
+ """Parse the response and return the command name and arguments
+
+ Args:
+ assistant_reply_json (dict): The response object from the AI
+ assistant_reply (AssistantChatMessage): The model response from the AI
+ config (Config): The config object
+
+ Returns:
+ tuple: The command name and arguments
+
+ Raises:
+ json.decoder.JSONDecodeError: If the response is not valid JSON
+
+ Exception: If any other error occurs
+ """
+ if use_openai_functions_api:
+ if not assistant_reply.tool_calls:
+ raise InvalidAgentResponseError("No 'tool_calls' in assistant reply")
+ assistant_reply_json["command"] = {
+ "name": assistant_reply.tool_calls[0].function.name,
+ "args": json.loads(assistant_reply.tool_calls[0].function.arguments),
+ }
+ try:
+ if not isinstance(assistant_reply_json, dict):
+ raise InvalidAgentResponseError(
+ f"The previous message sent was not a dictionary {assistant_reply_json}"
+ )
+
+ if "command" not in assistant_reply_json:
+ raise InvalidAgentResponseError("Missing 'command' object in JSON")
+
+ command = assistant_reply_json["command"]
+ if not isinstance(command, dict):
+ raise InvalidAgentResponseError("'command' object is not a dictionary")
+
+ if "name" not in command:
+ raise InvalidAgentResponseError("Missing 'name' field in 'command' object")
+
+ command_name = command["name"]
+
+ # Use an empty dictionary if 'args' field is not present in 'command' object
+ arguments = command.get("args", {})
+
+ return command_name, arguments
+
+ except json.decoder.JSONDecodeError:
+ raise InvalidAgentResponseError("Invalid JSON")
+
+ except Exception as e:
+ raise InvalidAgentResponseError(str(e))
diff --git a/autogpts/autogpt/autogpt/agents/utils/agent_file_manager.py b/autogpts/autogpt/autogpt/agents/utils/agent_file_manager.py
new file mode 100644
index 000000000..4db788bf2
--- /dev/null
+++ b/autogpts/autogpt/autogpt/agents/utils/agent_file_manager.py
@@ -0,0 +1,37 @@
+from __future__ import annotations
+
+import logging
+from pathlib import Path
+
+logger = logging.getLogger(__name__)
+
+
+class AgentFileManager:
+ """A class that represents a workspace for an AutoGPT agent."""
+
+ def __init__(self, agent_data_dir: Path):
+ self._root = agent_data_dir.resolve()
+
+ @property
+ def root(self) -> Path:
+ """The root directory of the workspace."""
+ return self._root
+
+ def initialize(self) -> None:
+ self.root.mkdir(exist_ok=True, parents=True)
+ self.init_file_ops_log(self.file_ops_log_path)
+
+ @property
+ def state_file_path(self) -> Path:
+ return self.root / "state.json"
+
+ @property
+ def file_ops_log_path(self) -> Path:
+ return self.root / "file_logger.log"
+
+ @staticmethod
+ def init_file_ops_log(file_logger_path: Path) -> Path:
+ if not file_logger_path.exists():
+ with file_logger_path.open(mode="w", encoding="utf-8") as f:
+ f.write("")
+ return file_logger_path
diff --git a/autogpts/autogpt/autogpt/agents/utils/exceptions.py b/autogpts/autogpt/autogpt/agents/utils/exceptions.py
new file mode 100644
index 000000000..704922b0a
--- /dev/null
+++ b/autogpts/autogpt/autogpt/agents/utils/exceptions.py
@@ -0,0 +1,60 @@
+from typing import Optional
+
+
+class AgentException(Exception):
+ """Base class for specific exceptions relevant in the execution of Agents"""
+
+ message: str
+
+ hint: Optional[str] = None
+ """A hint which can be passed to the LLM to reduce reoccurrence of this error"""
+
+ def __init__(self, message: str, *args):
+ self.message = message
+ super().__init__(message, *args)
+
+
+class AgentTerminated(AgentException):
+ """The agent terminated or was terminated"""
+
+
+class ConfigurationError(AgentException):
+ """Error caused by invalid, incompatible or otherwise incorrect configuration"""
+
+
+class InvalidAgentResponseError(AgentException):
+ """The LLM deviated from the prescribed response format"""
+
+
+class UnknownCommandError(AgentException):
+ """The AI tried to use an unknown command"""
+
+ hint = "Do not try to use this command again."
+
+
+class DuplicateOperationError(AgentException):
+ """The proposed operation has already been executed"""
+
+
+class CommandExecutionError(AgentException):
+ """An error occurred when trying to execute the command"""
+
+
+class InvalidArgumentError(CommandExecutionError):
+ """The command received an invalid argument"""
+
+
+class OperationNotAllowedError(CommandExecutionError):
+ """The agent is not allowed to execute the proposed operation"""
+
+
+class AccessDeniedError(CommandExecutionError):
+ """The operation failed because access to a required resource was denied"""
+
+
+class CodeExecutionError(CommandExecutionError):
+ """The operation (an attempt to run arbitrary code) returned an error"""
+
+
+class TooMuchOutputError(CommandExecutionError):
+ """The operation generated more output than what the Agent can process"""
diff --git a/autogpts/autogpt/autogpt/agents/utils/prompt_scratchpad.py b/autogpts/autogpt/autogpt/agents/utils/prompt_scratchpad.py
new file mode 100644
index 000000000..224493338
--- /dev/null
+++ b/autogpts/autogpt/autogpt/agents/utils/prompt_scratchpad.py
@@ -0,0 +1,108 @@
+import logging
+from typing import Callable
+
+from pydantic import BaseModel, Field
+
+from autogpt.core.resource.model_providers.schema import CompletionModelFunction
+from autogpt.core.utils.json_schema import JSONSchema
+
+logger = logging.getLogger("PromptScratchpad")
+
+
+class CallableCompletionModelFunction(CompletionModelFunction):
+ method: Callable
+
+
+class PromptScratchpad(BaseModel):
+ commands: dict[str, CallableCompletionModelFunction] = Field(default_factory=dict)
+ resources: list[str] = Field(default_factory=list)
+ constraints: list[str] = Field(default_factory=list)
+ best_practices: list[str] = Field(default_factory=list)
+
+ def add_constraint(self, constraint: str) -> None:
+ """
+ Add a constraint to the constraints list.
+
+ Params:
+ constraint (str): The constraint to be added.
+ """
+ if constraint not in self.constraints:
+ self.constraints.append(constraint)
+
+ def add_command(
+ self,
+ name: str,
+ description: str,
+ params: dict[str, str | dict],
+ function: Callable,
+ ) -> None:
+ """
+ Registers a command.
+
+ *Should only be used by plugins.* Native commands should be added
+ directly to the CommandRegistry.
+
+ Params:
+ name (str): The name of the command (e.g. `command_name`).
+ description (str): The description of the command.
+ params (dict, optional): A dictionary containing argument names and their
+ types. Defaults to an empty dictionary.
+ function (callable, optional): A callable function to be called when
+ the command is executed. Defaults to None.
+ """
+ for p, s in params.items():
+ invalid = False
+ if type(s) is str and s not in JSONSchema.Type._value2member_map_:
+ invalid = True
+ logger.warning(
+ f"Cannot add command '{name}':"
+ f" parameter '{p}' has invalid type '{s}'."
+ f" Valid types are: {JSONSchema.Type._value2member_map_.keys()}"
+ )
+ elif isinstance(s, dict):
+ try:
+ JSONSchema.from_dict(s)
+ except KeyError:
+ invalid = True
+ if invalid:
+ return
+
+ command = CallableCompletionModelFunction(
+ name=name,
+ description=description,
+ parameters={
+ name: JSONSchema(type=JSONSchema.Type._value2member_map_[spec])
+ if type(spec) is str
+ else JSONSchema.from_dict(spec)
+ for name, spec in params.items()
+ },
+ method=function,
+ )
+
+ if name in self.commands:
+ if description == self.commands[name].description:
+ return
+ logger.warning(
+ f"Replacing command {self.commands[name]} with conflicting {command}"
+ )
+ self.commands[name] = command
+
+ def add_resource(self, resource: str) -> None:
+ """
+ Add a resource to the resources list.
+
+ Params:
+ resource (str): The resource to be added.
+ """
+ if resource not in self.resources:
+ self.resources.append(resource)
+
+ def add_best_practice(self, best_practice: str) -> None:
+ """
+ Add an item to the list of best practices.
+
+ Params:
+ best_practice (str): The best practice item to be added.
+ """
+ if best_practice not in self.best_practices:
+ self.best_practices.append(best_practice)
diff --git a/autogpts/autogpt/autogpt/app/__init__.py b/autogpts/autogpt/autogpt/app/__init__.py
new file mode 100644
index 000000000..5f5b20ef2
--- /dev/null
+++ b/autogpts/autogpt/autogpt/app/__init__.py
@@ -0,0 +1,6 @@
+from dotenv import load_dotenv
+
+# Load the users .env file into environment variables
+load_dotenv(verbose=True, override=True)
+
+del load_dotenv
diff --git a/autogpts/autogpt/autogpt/app/agent_protocol_server.py b/autogpts/autogpt/autogpt/app/agent_protocol_server.py
new file mode 100644
index 000000000..12e54582b
--- /dev/null
+++ b/autogpts/autogpt/autogpt/app/agent_protocol_server.py
@@ -0,0 +1,481 @@
+import logging
+import os
+import pathlib
+from io import BytesIO
+from uuid import uuid4
+
+import orjson
+from fastapi import APIRouter, FastAPI, UploadFile
+from fastapi.middleware.cors import CORSMiddleware
+from fastapi.responses import RedirectResponse, StreamingResponse
+from fastapi.staticfiles import StaticFiles
+from forge.sdk.db import AgentDB
+from forge.sdk.errors import NotFoundError
+from forge.sdk.middlewares import AgentMiddleware
+from forge.sdk.model import (
+ Artifact,
+ Step,
+ StepRequestBody,
+ Task,
+ TaskArtifactsListResponse,
+ TaskListResponse,
+ TaskRequestBody,
+ TaskStepsListResponse,
+)
+from forge.sdk.routes.agent_protocol import base_router
+from hypercorn.asyncio import serve as hypercorn_serve
+from hypercorn.config import Config as HypercornConfig
+
+from autogpt.agent_factory.configurators import configure_agent_with_state
+from autogpt.agent_factory.generators import generate_agent_for_task
+from autogpt.agent_manager import AgentManager
+from autogpt.commands.system import finish
+from autogpt.commands.user_interaction import ask_user
+from autogpt.config import Config
+from autogpt.core.resource.model_providers import ChatModelProvider
+from autogpt.core.resource.model_providers.openai import OpenAIProvider
+from autogpt.core.resource.model_providers.schema import ModelProviderBudget
+from autogpt.file_workspace import (
+ FileWorkspace,
+ FileWorkspaceBackendName,
+ get_workspace,
+)
+from autogpt.logs.utils import fmt_kwargs
+from autogpt.models.action_history import ActionErrorResult, ActionSuccessResult
+
+logger = logging.getLogger(__name__)
+
+
+class AgentProtocolServer:
+ _task_budgets: dict[str, ModelProviderBudget]
+
+ def __init__(
+ self,
+ app_config: Config,
+ database: AgentDB,
+ llm_provider: ChatModelProvider,
+ ):
+ self.app_config = app_config
+ self.db = database
+ self.llm_provider = llm_provider
+ self.agent_manager = AgentManager(app_data_dir=app_config.app_data_dir)
+ self._task_budgets = {}
+
+ async def start(self, port: int = 8000, router: APIRouter = base_router):
+ """Start the agent server."""
+ logger.debug("Starting the agent server...")
+ config = HypercornConfig()
+ config.bind = [f"localhost:{port}"]
+ app = FastAPI(
+ title="AutoGPT Server",
+ description="Forked from AutoGPT Forge; "
+ "Modified version of The Agent Protocol.",
+ version="v0.4",
+ )
+
+ # Add CORS middleware
+ origins = [
+ "*",
+ # Add any other origins you want to whitelist
+ ]
+
+ app.add_middleware(
+ CORSMiddleware,
+ allow_origins=origins,
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+ )
+
+ app.include_router(router, prefix="/ap/v1")
+ script_dir = os.path.dirname(os.path.realpath(__file__))
+ frontend_path = (
+ pathlib.Path(script_dir)
+ .joinpath("../../../../frontend/build/web")
+ .resolve()
+ )
+
+ if os.path.exists(frontend_path):
+ app.mount("/app", StaticFiles(directory=frontend_path), name="app")
+
+ @app.get("/", include_in_schema=False)
+ async def root():
+ return RedirectResponse(url="/app/index.html", status_code=307)
+
+ else:
+ logger.warning(
+ f"Frontend not found. {frontend_path} does not exist. "
+ "The frontend will not be available."
+ )
+
+ # Used to access the methods on this class from API route handlers
+ app.add_middleware(AgentMiddleware, agent=self)
+
+ config.loglevel = "ERROR"
+ config.bind = [f"0.0.0.0:{port}"]
+
+ logger.info(f"AutoGPT server starting on http://localhost:{port}")
+ await hypercorn_serve(app, config)
+
+ async def create_task(self, task_request: TaskRequestBody) -> Task:
+ """
+ Create a task for the agent.
+ """
+ task = await self.db.create_task(
+ input=task_request.input,
+ additional_input=task_request.additional_input,
+ )
+ logger.debug(f"Creating agent for task: '{task.input}'")
+ task_agent = await generate_agent_for_task(
+ task=task.input,
+ app_config=self.app_config,
+ llm_provider=self._get_task_llm_provider(task),
+ )
+
+ # Assign an ID and a folder to the Agent and persist it
+ agent_id = task_agent.state.agent_id = task_agent_id(task.task_id)
+ logger.debug(f"New agent ID: {agent_id}")
+ task_agent.attach_fs(self.app_config.app_data_dir / "agents" / agent_id)
+ task_agent.state.save_to_json_file(task_agent.file_manager.state_file_path)
+
+ return task
+
+ async def list_tasks(self, page: int = 1, pageSize: int = 10) -> TaskListResponse:
+ """
+ List all tasks that the agent has created.
+ """
+ logger.debug("Listing all tasks...")
+ tasks, pagination = await self.db.list_tasks(page, pageSize)
+ response = TaskListResponse(tasks=tasks, pagination=pagination)
+ return response
+
+ async def get_task(self, task_id: str) -> Task:
+ """
+ Get a task by ID.
+ """
+ logger.debug(f"Getting task with ID: {task_id}...")
+ task = await self.db.get_task(task_id)
+ return task
+
+ async def list_steps(
+ self, task_id: str, page: int = 1, pageSize: int = 10
+ ) -> TaskStepsListResponse:
+ """
+ List the IDs of all steps that the task has created.
+ """
+ logger.debug(f"Listing all steps created by task with ID: {task_id}...")
+ steps, pagination = await self.db.list_steps(task_id, page, pageSize)
+ response = TaskStepsListResponse(steps=steps, pagination=pagination)
+ return response
+
+ async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Step:
+ """Create a step for the task."""
+ logger.debug(f"Creating a step for task with ID: {task_id}...")
+
+ # Restore Agent instance
+ task = await self.get_task(task_id)
+ agent = configure_agent_with_state(
+ state=self.agent_manager.retrieve_state(task_agent_id(task_id)),
+ app_config=self.app_config,
+ llm_provider=self._get_task_llm_provider(task),
+ )
+
+ # According to the Agent Protocol spec, the first execute_step request contains
+ # the same task input as the parent create_task request.
+ # To prevent this from interfering with the agent's process, we ignore the input
+ # of this first step request, and just generate the first step proposal.
+ is_init_step = not bool(agent.event_history)
+ execute_command, execute_command_args, execute_result = None, None, None
+ execute_approved = False
+
+ # HACK: only for compatibility with AGBenchmark
+ if step_request.input == "y":
+ step_request.input = ""
+
+ user_input = step_request.input if not is_init_step else ""
+
+ if (
+ not is_init_step
+ and agent.event_history.current_episode
+ and not agent.event_history.current_episode.result
+ ):
+ execute_command = agent.event_history.current_episode.action.name
+ execute_command_args = agent.event_history.current_episode.action.args
+ execute_approved = not user_input
+
+ logger.debug(
+ f"Agent proposed command"
+ f" {execute_command}({fmt_kwargs(execute_command_args)})."
+ f" User input/feedback: {repr(user_input)}"
+ )
+
+ # Save step request
+ step = await self.db.create_step(
+ task_id=task_id,
+ input=step_request,
+ is_last=execute_command == finish.__name__ and execute_approved,
+ )
+ agent.llm_provider = self._get_task_llm_provider(task, step.step_id)
+
+ # Execute previously proposed action
+ if execute_command:
+ assert execute_command_args is not None
+ agent.workspace.on_write_file = lambda path: self._on_agent_write_file(
+ task=task, step=step, relative_path=path
+ )
+
+ if step.is_last and execute_command == finish.__name__:
+ assert execute_command_args
+ step = await self.db.update_step(
+ task_id=task_id,
+ step_id=step.step_id,
+ output=execute_command_args["reason"],
+ )
+ logger.info(
+ f"Total LLM cost for task {task_id}: "
+ f"${round(agent.llm_provider.get_incurred_cost(), 2)}"
+ )
+ return step
+
+ if execute_command == ask_user.__name__: # HACK
+ execute_result = ActionSuccessResult(outputs=user_input)
+ agent.event_history.register_result(execute_result)
+ elif not execute_command:
+ execute_result = None
+ elif execute_approved:
+ step = await self.db.update_step(
+ task_id=task_id,
+ step_id=step.step_id,
+ status="running",
+ )
+ # Execute previously proposed action
+ execute_result = await agent.execute(
+ command_name=execute_command,
+ command_args=execute_command_args,
+ )
+ else:
+ assert user_input
+ execute_result = await agent.execute(
+ command_name="human_feedback", # HACK
+ command_args={},
+ user_input=user_input,
+ )
+
+ # Propose next action
+ try:
+ next_command, next_command_args, raw_output = await agent.propose_action()
+ logger.debug(f"AI output: {raw_output}")
+ except Exception as e:
+ step = await self.db.update_step(
+ task_id=task_id,
+ step_id=step.step_id,
+ status="completed",
+ output=f"An error occurred while proposing the next action: {e}",
+ )
+ return step
+
+ # Format step output
+ output = (
+ (
+ f"`{execute_command}({fmt_kwargs(execute_command_args)})` returned:"
+ + ("\n\n" if "\n" in str(execute_result) else " ")
+ + f"{execute_result}\n\n"
+ )
+ if execute_command_args and execute_command != ask_user.__name__
+ else ""
+ )
+ output += f"{raw_output['thoughts']['speak']}\n\n"
+ output += (
+ f"Next Command: {next_command}({fmt_kwargs(next_command_args)})"
+ if next_command != ask_user.__name__
+ else next_command_args["question"]
+ )
+
+ additional_output = {
+ **(
+ {
+ "last_action": {
+ "name": execute_command,
+ "args": execute_command_args,
+ "result": (
+ orjson.loads(execute_result.json())
+ if not isinstance(execute_result, ActionErrorResult)
+ else {
+ "error": str(execute_result.error),
+ "reason": execute_result.reason,
+ }
+ ),
+ },
+ }
+ if not is_init_step
+ else {}
+ ),
+ **raw_output,
+ }
+
+ step = await self.db.update_step(
+ task_id=task_id,
+ step_id=step.step_id,
+ status="completed",
+ output=output,
+ additional_output=additional_output,
+ )
+
+ logger.debug(
+ f"Running total LLM cost for task {task_id}: "
+ f"${round(agent.llm_provider.get_incurred_cost(), 3)}"
+ )
+ agent.state.save_to_json_file(agent.file_manager.state_file_path)
+ return step
+
+ async def _on_agent_write_file(
+ self, task: Task, step: Step, relative_path: pathlib.Path
+ ) -> None:
+ """
+ Creates an Artifact for the written file, or updates the Artifact if it exists.
+ """
+ if relative_path.is_absolute():
+ raise ValueError(f"File path '{relative_path}' is not relative")
+ for a in task.artifacts or []:
+ if a.relative_path == str(relative_path):
+ logger.debug(f"Updating Artifact after writing to existing file: {a}")
+ if not a.agent_created:
+ await self.db.update_artifact(a.artifact_id, agent_created=True)
+ break
+ else:
+ logger.debug(f"Creating Artifact for new file '{relative_path}'")
+ await self.db.create_artifact(
+ task_id=step.task_id,
+ step_id=step.step_id,
+ file_name=relative_path.parts[-1],
+ agent_created=True,
+ relative_path=str(relative_path),
+ )
+
+ async def get_step(self, task_id: str, step_id: str) -> Step:
+ """
+ Get a step by ID.
+ """
+ step = await self.db.get_step(task_id, step_id)
+ return step
+
+ async def list_artifacts(
+ self, task_id: str, page: int = 1, pageSize: int = 10
+ ) -> TaskArtifactsListResponse:
+ """
+ List the artifacts that the task has created.
+ """
+ artifacts, pagination = await self.db.list_artifacts(task_id, page, pageSize)
+ return TaskArtifactsListResponse(artifacts=artifacts, pagination=pagination)
+
+ async def create_artifact(
+ self, task_id: str, file: UploadFile, relative_path: str
+ ) -> Artifact:
+ """
+ Create an artifact for the task.
+ """
+ file_name = file.filename or str(uuid4())
+ data = b""
+ while contents := file.file.read(1024 * 1024):
+ data += contents
+ # Check if relative path ends with filename
+ if relative_path.endswith(file_name):
+ file_path = relative_path
+ else:
+ file_path = os.path.join(relative_path, file_name)
+
+ workspace = self._get_task_agent_file_workspace(task_id, self.agent_manager)
+ await workspace.write_file(file_path, data)
+
+ artifact = await self.db.create_artifact(
+ task_id=task_id,
+ file_name=file_name,
+ relative_path=relative_path,
+ agent_created=False,
+ )
+ return artifact
+
+ async def get_artifact(self, task_id: str, artifact_id: str) -> StreamingResponse:
+ """
+ Download a task artifact by ID.
+ """
+ try:
+ artifact = await self.db.get_artifact(artifact_id)
+ if artifact.file_name not in artifact.relative_path:
+ file_path = os.path.join(artifact.relative_path, artifact.file_name)
+ else:
+ file_path = artifact.relative_path
+ workspace = self._get_task_agent_file_workspace(task_id, self.agent_manager)
+ retrieved_artifact = workspace.read_file(file_path, binary=True)
+ except NotFoundError:
+ raise
+ except FileNotFoundError:
+ raise
+
+ return StreamingResponse(
+ BytesIO(retrieved_artifact),
+ media_type="application/octet-stream",
+ headers={
+ "Content-Disposition": f'attachment; filename="{artifact.file_name}"'
+ },
+ )
+
+ def _get_task_agent_file_workspace(
+ self,
+ task_id: str | int,
+ agent_manager: AgentManager,
+ ) -> FileWorkspace:
+ use_local_ws = (
+ self.app_config.workspace_backend == FileWorkspaceBackendName.LOCAL
+ )
+ agent_id = task_agent_id(task_id)
+ workspace = get_workspace(
+ backend=self.app_config.workspace_backend,
+ id=agent_id if not use_local_ws else "",
+ root_path=agent_manager.get_agent_dir(
+ agent_id=agent_id,
+ must_exist=True,
+ )
+ / "workspace"
+ if use_local_ws
+ else None,
+ )
+ workspace.initialize()
+ return workspace
+
+ def _get_task_llm_provider(
+ self, task: Task, step_id: str = ""
+ ) -> ChatModelProvider:
+ """
+ Configures the LLM provider with headers to link outgoing requests to the task.
+ """
+ task_llm_budget = self._task_budgets.get(
+ task.task_id, self.llm_provider.default_settings.budget.copy(deep=True)
+ )
+
+ task_llm_provider_config = self.llm_provider._configuration.copy(deep=True)
+ _extra_request_headers = task_llm_provider_config.extra_request_headers
+ _extra_request_headers["AP-TaskID"] = task.task_id
+ if step_id:
+ _extra_request_headers["AP-StepID"] = step_id
+ if task.additional_input and (user_id := task.additional_input.get("user_id")):
+ _extra_request_headers["AutoGPT-UserID"] = user_id
+
+ task_llm_provider = None
+ if isinstance(self.llm_provider, OpenAIProvider):
+ settings = self.llm_provider._settings.copy()
+ settings.budget = task_llm_budget
+ settings.configuration = task_llm_provider_config # type: ignore
+ task_llm_provider = OpenAIProvider(
+ settings=settings,
+ logger=logger.getChild(f"Task-{task.task_id}_OpenAIProvider"),
+ )
+
+ if task_llm_provider and task_llm_provider._budget:
+ self._task_budgets[task.task_id] = task_llm_provider._budget
+
+ return task_llm_provider or self.llm_provider
+
+
+def task_agent_id(task_id: str | int) -> str:
+ return f"AutoGPT-{task_id}"
diff --git a/autogpts/autogpt/autogpt/app/cli.py b/autogpts/autogpt/autogpt/app/cli.py
new file mode 100644
index 000000000..ccfa9a8ae
--- /dev/null
+++ b/autogpts/autogpt/autogpt/app/cli.py
@@ -0,0 +1,283 @@
+"""Main script for the autogpt package."""
+from logging import _nameToLevel as logLevelMap
+from pathlib import Path
+from typing import Optional
+
+import click
+
+from autogpt.logs.config import LogFormatName
+
+
+@click.group(invoke_without_command=True)
+@click.pass_context
+def cli(ctx: click.Context):
+ # Invoke `run` by default
+ if ctx.invoked_subcommand is None:
+ ctx.invoke(run)
+
+
+@cli.command()
+@click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode")
+@click.option(
+ "-l",
+ "--continuous-limit",
+ type=int,
+ help="Defines the number of times to run in continuous mode",
+)
+@click.option("--speak", is_flag=True, help="Enable Speak Mode")
+@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode")
+@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode")
+@click.option(
+ "-b",
+ "--browser-name",
+ help="Specifies which web-browser to use when using selenium to scrape the web.",
+)
+@click.option(
+ "--allow-downloads",
+ is_flag=True,
+ help="Dangerous: Allows AutoGPT to download files natively.",
+)
+@click.option(
+ # TODO: this is a hidden option for now, necessary for integration testing.
+ # We should make this public once we're ready to roll out agent specific workspaces.
+ "--workspace-directory",
+ "-w",
+ type=click.Path(file_okay=False),
+ hidden=True,
+)
+@click.option(
+ "--install-plugin-deps",
+ is_flag=True,
+ help="Installs external dependencies for 3rd party plugins.",
+)
+@click.option(
+ "--skip-news",
+ is_flag=True,
+ help="Specifies whether to suppress the output of latest news on startup.",
+)
+@click.option(
+ "--skip-reprompt",
+ "-y",
+ is_flag=True,
+ help="Skips the re-prompting messages at the beginning of the script",
+)
+@click.option(
+ "--ai-settings",
+ "-C",
+ type=click.Path(exists=True, dir_okay=False, path_type=Path),
+ help=(
+ "Specifies which ai_settings.yaml file to use, relative to the AutoGPT"
+ " root directory. Will also automatically skip the re-prompt."
+ ),
+)
+@click.option(
+ "--ai-name",
+ type=str,
+ help="AI name override",
+)
+@click.option(
+ "--ai-role",
+ type=str,
+ help="AI role override",
+)
+@click.option(
+ "--prompt-settings",
+ "-P",
+ type=click.Path(exists=True, dir_okay=False, path_type=Path),
+ help="Specifies which prompt_settings.yaml file to use.",
+)
+@click.option(
+ "--constraint",
+ type=str,
+ multiple=True,
+ help=(
+ "Add or override AI constraints to include in the prompt;"
+ " may be used multiple times to pass multiple constraints"
+ ),
+)
+@click.option(
+ "--resource",
+ type=str,
+ multiple=True,
+ help=(
+ "Add or override AI resources to include in the prompt;"
+ " may be used multiple times to pass multiple resources"
+ ),
+)
+@click.option(
+ "--best-practice",
+ type=str,
+ multiple=True,
+ help=(
+ "Add or override AI best practices to include in the prompt;"
+ " may be used multiple times to pass multiple best practices"
+ ),
+)
+@click.option(
+ "--override-directives",
+ is_flag=True,
+ help=(
+ "If specified, --constraint, --resource and --best-practice will override"
+ " the AI's directives instead of being appended to them"
+ ),
+)
+@click.option(
+ "--debug", is_flag=True, help="Implies --log-level=DEBUG --log-format=debug"
+)
+@click.option("--log-level", type=click.Choice([*logLevelMap.keys()]))
+@click.option(
+ "--log-format",
+ help=(
+ "Choose a log format; defaults to 'simple'."
+ " Also implies --log-file-format, unless it is specified explicitly."
+ " Using the 'structured_google_cloud' format disables log file output."
+ ),
+ type=click.Choice([i.value for i in LogFormatName]),
+)
+@click.option(
+ "--log-file-format",
+ help=(
+ "Override the format used for the log file output."
+ " Defaults to the application's global --log-format."
+ ),
+ type=click.Choice([i.value for i in LogFormatName]),
+)
+def run(
+ continuous: bool,
+ continuous_limit: Optional[int],
+ speak: bool,
+ gpt3only: bool,
+ gpt4only: bool,
+ browser_name: Optional[str],
+ allow_downloads: bool,
+ workspace_directory: Optional[Path],
+ install_plugin_deps: bool,
+ skip_news: bool,
+ skip_reprompt: bool,
+ ai_settings: Optional[Path],
+ ai_name: Optional[str],
+ ai_role: Optional[str],
+ prompt_settings: Optional[Path],
+ resource: tuple[str],
+ constraint: tuple[str],
+ best_practice: tuple[str],
+ override_directives: bool,
+ debug: bool,
+ log_level: Optional[str],
+ log_format: Optional[str],
+ log_file_format: Optional[str],
+) -> None:
+ """
+ Sets up and runs an agent, based on the task specified by the user, or resumes an
+ existing agent.
+ """
+ # Put imports inside function to avoid importing everything when starting the CLI
+ from autogpt.app.main import run_auto_gpt
+
+ run_auto_gpt(
+ continuous=continuous,
+ continuous_limit=continuous_limit,
+ ai_settings=ai_settings,
+ prompt_settings=prompt_settings,
+ skip_reprompt=skip_reprompt,
+ speak=speak,
+ debug=debug,
+ log_level=log_level,
+ log_format=log_format,
+ log_file_format=log_file_format,
+ gpt3only=gpt3only,
+ gpt4only=gpt4only,
+ browser_name=browser_name,
+ allow_downloads=allow_downloads,
+ skip_news=skip_news,
+ workspace_directory=workspace_directory,
+ install_plugin_deps=install_plugin_deps,
+ override_ai_name=ai_name,
+ override_ai_role=ai_role,
+ resources=list(resource),
+ constraints=list(constraint),
+ best_practices=list(best_practice),
+ override_directives=override_directives,
+ )
+
+
+@cli.command()
+@click.option(
+ "--prompt-settings",
+ "-P",
+ type=click.Path(exists=True, dir_okay=False, path_type=Path),
+ help="Specifies which prompt_settings.yaml file to use.",
+)
+@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode")
+@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode")
+@click.option(
+ "-b",
+ "--browser-name",
+ help="Specifies which web-browser to use when using selenium to scrape the web.",
+)
+@click.option(
+ "--allow-downloads",
+ is_flag=True,
+ help="Dangerous: Allows AutoGPT to download files natively.",
+)
+@click.option(
+ "--install-plugin-deps",
+ is_flag=True,
+ help="Installs external dependencies for 3rd party plugins.",
+)
+@click.option(
+ "--debug", is_flag=True, help="Implies --log-level=DEBUG --log-format=debug"
+)
+@click.option("--log-level", type=click.Choice([*logLevelMap.keys()]))
+@click.option(
+ "--log-format",
+ help=(
+ "Choose a log format; defaults to 'simple'."
+ " Also implies --log-file-format, unless it is specified explicitly."
+ " Using the 'structured_google_cloud' format disables log file output."
+ ),
+ type=click.Choice([i.value for i in LogFormatName]),
+)
+@click.option(
+ "--log-file-format",
+ help=(
+ "Override the format used for the log file output."
+ " Defaults to the application's global --log-format."
+ ),
+ type=click.Choice([i.value for i in LogFormatName]),
+)
+def serve(
+ prompt_settings: Optional[Path],
+ gpt3only: bool,
+ gpt4only: bool,
+ browser_name: Optional[str],
+ allow_downloads: bool,
+ install_plugin_deps: bool,
+ debug: bool,
+ log_level: Optional[str],
+ log_format: Optional[str],
+ log_file_format: Optional[str],
+) -> None:
+ """
+ Starts an Agent Protocol compliant AutoGPT server, which creates a custom agent for
+ every task.
+ """
+ # Put imports inside function to avoid importing everything when starting the CLI
+ from autogpt.app.main import run_auto_gpt_server
+
+ run_auto_gpt_server(
+ prompt_settings=prompt_settings,
+ debug=debug,
+ log_level=log_level,
+ log_format=log_format,
+ log_file_format=log_file_format,
+ gpt3only=gpt3only,
+ gpt4only=gpt4only,
+ browser_name=browser_name,
+ allow_downloads=allow_downloads,
+ install_plugin_deps=install_plugin_deps,
+ )
+
+
+if __name__ == "__main__":
+ cli()
diff --git a/autogpts/autogpt/autogpt/app/configurator.py b/autogpts/autogpt/autogpt/app/configurator.py
new file mode 100644
index 000000000..cd7890021
--- /dev/null
+++ b/autogpts/autogpt/autogpt/app/configurator.py
@@ -0,0 +1,201 @@
+"""Configurator module."""
+from __future__ import annotations
+
+import logging
+from pathlib import Path
+from typing import TYPE_CHECKING, Literal, Optional
+
+import click
+from colorama import Back, Fore, Style
+
+from autogpt import utils
+from autogpt.config import Config
+from autogpt.config.config import GPT_3_MODEL, GPT_4_MODEL
+from autogpt.llm.api_manager import ApiManager
+from autogpt.logs.config import LogFormatName
+from autogpt.logs.helpers import request_user_double_check
+from autogpt.memory.vector import get_supported_memory_backends
+
+if TYPE_CHECKING:
+ from autogpt.core.resource.model_providers.openai import OpenAICredentials
+
+logger = logging.getLogger(__name__)
+
+
+def apply_overrides_to_config(
+ config: Config,
+ continuous: bool = False,
+ continuous_limit: Optional[int] = None,
+ ai_settings_file: Optional[Path] = None,
+ prompt_settings_file: Optional[Path] = None,
+ skip_reprompt: bool = False,
+ speak: bool = False,
+ debug: bool = False,
+ log_level: Optional[str] = None,
+ log_format: Optional[str] = None,
+ log_file_format: Optional[str] = None,
+ gpt3only: bool = False,
+ gpt4only: bool = False,
+ memory_type: Optional[str] = None,
+ browser_name: Optional[str] = None,
+ allow_downloads: bool = False,
+ skip_news: bool = False,
+) -> None:
+ """Updates the config object with the given arguments.
+
+ Args:
+ config (Config): The config object to update.
+ continuous (bool): Whether to run in continuous mode.
+ continuous_limit (int): The number of times to run in continuous mode.
+ ai_settings_file (Path): The path to the ai_settings.yaml file.
+ prompt_settings_file (Path): The path to the prompt_settings.yaml file.
+ skip_reprompt (bool): Whether to skip the re-prompting messages on start.
+ speak (bool): Whether to enable speak mode.
+ debug (bool): Whether to enable debug mode.
+ log_level (int): The global log level for the application.
+ log_format (str): The format for the log(s).
+ log_file_format (str): Override the format for the log file.
+ gpt3only (bool): Whether to enable GPT3.5 only mode.
+ gpt4only (bool): Whether to enable GPT4 only mode.
+ memory_type (str): The type of memory backend to use.
+ browser_name (str): The name of the browser to use for scraping the web.
+ allow_downloads (bool): Whether to allow AutoGPT to download files natively.
+ skips_news (bool): Whether to suppress the output of latest news on startup.
+ """
+ config.continuous_mode = False
+ config.tts_config.speak_mode = False
+
+ # Set log level
+ if debug:
+ config.logging.level = logging.DEBUG
+ elif log_level and type(_level := logging.getLevelName(log_level.upper())) is int:
+ config.logging.level = _level
+
+ # Set log format
+ if log_format and log_format in LogFormatName._value2member_map_:
+ config.logging.log_format = LogFormatName(log_format)
+ if log_file_format and log_file_format in LogFormatName._value2member_map_:
+ config.logging.log_file_format = LogFormatName(log_file_format)
+
+ if continuous:
+ logger.warning(
+ "Continuous mode is not recommended. It is potentially dangerous and may"
+ " cause your AI to run forever or carry out actions you would not usually"
+ " authorise. Use at your own risk.",
+ )
+ config.continuous_mode = True
+
+ if continuous_limit:
+ config.continuous_limit = continuous_limit
+
+ # Check if continuous limit is used without continuous mode
+ if continuous_limit and not continuous:
+ raise click.UsageError("--continuous-limit can only be used with --continuous")
+
+ if speak:
+ config.tts_config.speak_mode = True
+
+ # Set the default LLM models
+ if gpt3only:
+ # --gpt3only should always use gpt-3.5-turbo, despite user's FAST_LLM config
+ config.fast_llm = GPT_3_MODEL
+ config.smart_llm = GPT_3_MODEL
+ elif (
+ gpt4only
+ and check_model(
+ GPT_4_MODEL,
+ model_type="smart_llm",
+ api_credentials=config.openai_credentials,
+ )
+ == GPT_4_MODEL
+ ):
+ # --gpt4only should always use gpt-4, despite user's SMART_LLM config
+ config.fast_llm = GPT_4_MODEL
+ config.smart_llm = GPT_4_MODEL
+ else:
+ config.fast_llm = check_model(
+ config.fast_llm, "fast_llm", api_credentials=config.openai_credentials
+ )
+ config.smart_llm = check_model(
+ config.smart_llm, "smart_llm", api_credentials=config.openai_credentials
+ )
+
+ if memory_type:
+ supported_memory = get_supported_memory_backends()
+ chosen = memory_type
+ if chosen not in supported_memory:
+ logger.warning(
+ extra={
+ "title": "ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED:",
+ "title_color": Fore.RED,
+ },
+ msg=f"{supported_memory}",
+ )
+ else:
+ config.memory_backend = chosen
+
+ if skip_reprompt:
+ config.skip_reprompt