aboutsummaryrefslogtreecommitdiff
path: root/autogpts/autogpt/autogpt/core/prompting/schema.py
diff options
context:
space:
mode:
Diffstat (limited to 'autogpts/autogpt/autogpt/core/prompting/schema.py')
-rw-r--r--autogpts/autogpt/autogpt/core/prompting/schema.py34
1 files changed, 34 insertions, 0 deletions
diff --git a/autogpts/autogpt/autogpt/core/prompting/schema.py b/autogpts/autogpt/autogpt/core/prompting/schema.py
new file mode 100644
index 000000000..45efc40fe
--- /dev/null
+++ b/autogpts/autogpt/autogpt/core/prompting/schema.py
@@ -0,0 +1,34 @@
+import enum
+
+from pydantic import BaseModel, Field
+
+from autogpt.core.resource.model_providers.schema import (
+ ChatMessage,
+ ChatMessageDict,
+ CompletionModelFunction,
+)
+
+
+class LanguageModelClassification(str, enum.Enum):
+ """The LanguageModelClassification is a functional description of the model.
+
+ This is used to determine what kind of model to use for a given prompt.
+ Sometimes we prefer a faster or cheaper model to accomplish a task when
+ possible.
+ """
+
+ FAST_MODEL = "fast_model"
+ SMART_MODEL = "smart_model"
+
+
+class ChatPrompt(BaseModel):
+ messages: list[ChatMessage]
+ functions: list[CompletionModelFunction] = Field(default_factory=list)
+
+ def raw(self) -> list[ChatMessageDict]:
+ return [m.dict() for m in self.messages]
+
+ def __str__(self):
+ return "\n\n".join(
+ f"{m.role.value.upper()}: {m.content}" for m in self.messages
+ )