aboutsummaryrefslogtreecommitdiff
path: root/tests/unit/test_prompt_generator.py
blob: d1b08f1a041aae0f3aceae9e51992ccd7ee7b173 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
from autogpt.prompts.generator import PromptGenerator


def test_add_constraint():
    """
    Test if the add_constraint() method adds a constraint to the generator's constraints list.
    """
    constraint = "Constraint1"
    generator = PromptGenerator()
    generator.add_constraint(constraint)
    assert constraint in generator.constraints


def test_add_command():
    """
    Test if the add_command() method adds a command to the generator's commands list.
    """
    command_label = "Command Label"
    command_name = "command_name"
    params = {"arg1": "value1", "arg2": "value2"}
    generator = PromptGenerator()
    generator.add_command(command_label, command_name, params)
    assert generator.commands[0].__dict__ == {
        "label": command_label,
        "name": command_name,
        "params": params,
        "function": None,
    }


def test_add_resource():
    """
    Test if the add_resource() method adds a resource to the generator's resources list.
    """
    resource = "Resource1"
    generator = PromptGenerator()
    generator.add_resource(resource)
    assert resource in generator.resources


def test_add_best_practice():
    """
    Test if the add_best_practice() method adds a best practice to the generator's
    best_practices list.
    """
    practice = "Practice1"
    generator = PromptGenerator()
    generator.add_best_practice(practice)
    assert practice in generator.best_practices


def test_generate_prompt_string():
    """
    Test if the generate_prompt_string() method generates a prompt string with all the added
    constraints, commands, resources, and evaluations.
    """

    # Define the test data
    constraints = ["Constraint1", "Constraint2"]
    commands = [
        {
            "label": "Command1",
            "name": "command_name1",
            "params": {"arg1": "value1"},
        },
        {
            "label": "Command2",
            "name": "command_name2",
            "params": {},
        },
    ]
    resources = ["Resource1", "Resource2"]
    evaluations = ["Evaluation1", "Evaluation2"]

    # Add test data to the generator
    generator = PromptGenerator()
    for constraint in constraints:
        generator.add_constraint(constraint)
    for command in commands:
        generator.add_command(command["label"], command["name"], command["params"])
    for resource in resources:
        generator.add_resource(resource)
    for evaluation in evaluations:
        generator.add_best_practice(evaluation)

    # Generate the prompt string and verify its correctness
    prompt_string = generator.generate_prompt_string()
    assert prompt_string is not None

    # Check if all constraints, commands, resources, and evaluations are present in the prompt string
    for constraint in constraints:
        assert constraint in prompt_string
    for command in commands:
        assert command["name"] in prompt_string
        for key, value in command["params"].items():
            assert f'"{key}": "{value}"' in prompt_string
    for resource in resources:
        assert resource in prompt_string
    for evaluation in evaluations:
        assert evaluation in prompt_string