aboutsummaryrefslogtreecommitdiff
path: root/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py
diff options
context:
space:
mode:
Diffstat (limited to 'autogpts/autogpt/autogpt/core/resource/model_providers/openai.py')
-rw-r--r--autogpts/autogpt/autogpt/core/resource/model_providers/openai.py36
1 files changed, 5 insertions, 31 deletions
diff --git a/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py b/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py
index 2ebb56638..874282764 100644
--- a/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py
+++ b/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py
@@ -1,6 +1,5 @@
import enum
import logging
-import math
import os
from pathlib import Path
from typing import Any, Callable, Coroutine, Iterator, Optional, ParamSpec, TypeVar
@@ -37,9 +36,7 @@ from autogpt.core.resource.model_providers.schema import (
ModelProviderConfiguration,
ModelProviderCredentials,
ModelProviderName,
- ModelProviderService,
ModelProviderSettings,
- ModelProviderUsage,
ModelTokenizer,
)
from autogpt.core.utils.json_schema import JSONSchema
@@ -49,7 +46,6 @@ _T = TypeVar("_T")
_P = ParamSpec("_P")
OpenAIEmbeddingParser = Callable[[Embedding], Embedding]
-OpenAIChatParser = Callable[[str], dict]
class OpenAIModelName(str, enum.Enum):
@@ -87,7 +83,6 @@ OPEN_AI_EMBEDDING_MODELS = {
for info in [
EmbeddingModelInfo(
name=OpenAIModelName.EMBEDDING_v2,
- service=ModelProviderService.EMBEDDING,
provider_name=ModelProviderName.OPENAI,
prompt_token_cost=0.0001 / 1000,
max_tokens=8191,
@@ -95,7 +90,6 @@ OPEN_AI_EMBEDDING_MODELS = {
),
EmbeddingModelInfo(
name=OpenAIModelName.EMBEDDING_v3_S,
- service=ModelProviderService.EMBEDDING,
provider_name=ModelProviderName.OPENAI,
prompt_token_cost=0.00002 / 1000,
max_tokens=8191,
@@ -103,7 +97,6 @@ OPEN_AI_EMBEDDING_MODELS = {
),
EmbeddingModelInfo(
name=OpenAIModelName.EMBEDDING_v3_L,
- service=ModelProviderService.EMBEDDING,
provider_name=ModelProviderName.OPENAI,
prompt_token_cost=0.00013 / 1000,
max_tokens=8191,
@@ -118,7 +111,6 @@ OPEN_AI_CHAT_MODELS = {
for info in [
ChatModelInfo(
name=OpenAIModelName.GPT3_v1,
- service=ModelProviderService.CHAT,
provider_name=ModelProviderName.OPENAI,
prompt_token_cost=0.0015 / 1000,
completion_token_cost=0.002 / 1000,
@@ -127,7 +119,6 @@ OPEN_AI_CHAT_MODELS = {
),
ChatModelInfo(
name=OpenAIModelName.GPT3_v2_16k,
- service=ModelProviderService.CHAT,
provider_name=ModelProviderName.OPENAI,
prompt_token_cost=0.003 / 1000,
completion_token_cost=0.004 / 1000,
@@ -136,7 +127,6 @@ OPEN_AI_CHAT_MODELS = {
),
ChatModelInfo(
name=OpenAIModelName.GPT3_v3,
- service=ModelProviderService.CHAT,
provider_name=ModelProviderName.OPENAI,
prompt_token_cost=0.001 / 1000,
completion_token_cost=0.002 / 1000,
@@ -145,7 +135,6 @@ OPEN_AI_CHAT_MODELS = {
),
ChatModelInfo(
name=OpenAIModelName.GPT3_v4,
- service=ModelProviderService.CHAT,
provider_name=ModelProviderName.OPENAI,
prompt_token_cost=0.0005 / 1000,
completion_token_cost=0.0015 / 1000,
@@ -154,7 +143,6 @@ OPEN_AI_CHAT_MODELS = {
),
ChatModelInfo(
name=OpenAIModelName.GPT4_v1,
- service=ModelProviderService.CHAT,
provider_name=ModelProviderName.OPENAI,
prompt_token_cost=0.03 / 1000,
completion_token_cost=0.06 / 1000,
@@ -163,7 +151,6 @@ OPEN_AI_CHAT_MODELS = {
),
ChatModelInfo(
name=OpenAIModelName.GPT4_v1_32k,
- service=ModelProviderService.CHAT,
provider_name=ModelProviderName.OPENAI,
prompt_token_cost=0.06 / 1000,
completion_token_cost=0.12 / 1000,
@@ -172,7 +159,6 @@ OPEN_AI_CHAT_MODELS = {
),
ChatModelInfo(
name=OpenAIModelName.GPT4_TURBO,
- service=ModelProviderService.CHAT,
provider_name=ModelProviderName.OPENAI,
prompt_token_cost=0.01 / 1000,
completion_token_cost=0.03 / 1000,
@@ -305,21 +291,12 @@ class OpenAIProvider(
retries_per_request=10,
),
credentials=None,
- budget=ModelProviderBudget(
- total_budget=math.inf,
- total_cost=0.0,
- remaining_budget=math.inf,
- usage=ModelProviderUsage(
- prompt_tokens=0,
- completion_tokens=0,
- total_tokens=0,
- ),
- ),
+ budget=ModelProviderBudget(),
)
- _budget: ModelProviderBudget
_configuration: OpenAIConfiguration
_credentials: OpenAICredentials
+ _budget: ModelProviderBudget
def __init__(
self,
@@ -648,12 +625,9 @@ class OpenAIProvider(
prompt_tokens_used = completion_tokens_used = 0
cost = self._budget.update_usage_and_cost(
- ChatModelResponse(
- response=AssistantChatMessage(content=None),
- model_info=OPEN_AI_CHAT_MODELS[model],
- prompt_tokens_used=prompt_tokens_used,
- completion_tokens_used=completion_tokens_used,
- )
+ model_info=OPEN_AI_CHAT_MODELS[model],
+ input_tokens_used=prompt_tokens_used,
+ output_tokens_used=completion_tokens_used,
)
self._logger.debug(
f"Completion usage: {prompt_tokens_used} input, "