|
7 | 7 | from llama_cpp_agent.llm_agent import SystemPromptModule, SystemPromptModulePosition |
8 | 8 | from llama_cpp_agent.providers import LlamaCppServerProvider |
9 | 9 | from memory import output_settings, agent_core_memory, agent_retrieval_memory, agent_event_memory, update_memory_section |
10 | | -from prompts import game_master_prompt, examples, memory_prompt, wrap_function_response_in_xml_tags_json_mode, generate_fake_write_message, generate_write_message_with_examples, wrap_player_message_in_xml_tags_json_mode |
| 10 | +from prompts import game_master_prompt, examples, memory_prompt, wrap_function_response_in_xml_tags_json_mode, \ |
| 11 | + generate_fake_write_message, generate_write_message_with_examples, wrap_player_message_in_xml_tags_json_mode |
11 | 12 |
|
12 | 13 | provider = LlamaCppServerProvider("http://localhost:8080") |
13 | 14 |
|
|
29 | 30 | date_time_section = SystemPromptModule("current_date_time", "The following section shows the current date and time:") |
30 | 31 |
|
31 | 32 | example_section = SystemPromptModule("examples", |
32 | | - "The following examples show you which kind of responses you should write to the user based on the current scenario:", suffix="Always remember to never write actions or dialogue for the user! Always let the user decide on actions or dialogue!") |
| 33 | + "The following examples show you which kind of responses you should write to the user based on the current scenario:", |
| 34 | + suffix="Always remember to never write actions or dialogue for the user! Always let the user decide on actions or dialogue!") |
33 | 35 | example_section.set_content(examples) |
34 | 36 | memory_intro_section = SystemPromptModule("memory_intro", |
35 | 37 | "To support you in your task as a game master and to help you remembering things, you have access to 3 different types of memory.", |
36 | 38 | position=SystemPromptModulePosition.after_system_instructions) |
37 | 39 | memory_intro_section.set_content(memory_prompt) |
| 40 | +output_settings.output_structured_output_and_raw_json_string = True |
38 | 41 | while True: |
39 | 42 | user_input = input(">") |
40 | 43 | if user_input == "exit": |
41 | 44 | break |
42 | 45 | update_memory_section(memory_section) |
43 | 46 | date_time_section.set_content(datetime.datetime.now().strftime("%d.%m.%Y") + "\nFormat: dd.mm.yyyy") |
44 | 47 |
|
45 | | - agent_event_memory.add_event_to_queue(Roles.user, wrap_player_message_in_xml_tags_json_mode(user_input)) |
46 | | - agent_output = agent.get_chat_response( |
| 48 | + agent_event_memory.add_event(Roles.user, wrap_player_message_in_xml_tags_json_mode(user_input)) |
| 49 | + agent_output, json_output = agent.get_chat_response( |
47 | 50 | chat_history=agent_event_memory.get_event_memory_manager().build_chat_history(), |
48 | 51 | llm_sampling_settings=settings, |
49 | 52 | system_prompt_modules=[memory_intro_section, memory_section, date_time_section], |
50 | 53 | structured_output_settings=output_settings) |
51 | 54 |
|
| 55 | + agent_event_memory.add_event(Roles.assistant, json_output) |
52 | 56 | while True: |
53 | 57 | update_memory_section(memory_section) |
54 | 58 | date_time_section.set_content(datetime.datetime.now().strftime("%d.%m.%Y") + "\nFormat: dd.mm.yyyy") |
55 | 59 |
|
56 | 60 | if agent_output[0]["function"] == "write_message_to_player": |
57 | | - agent_event_memory.add_event_to_queue(Roles.tool, generate_write_message_with_examples(examples=example_section.get_formatted_content())) |
58 | 61 | output = agent.get_chat_response( |
| 62 | + generate_write_message_with_examples(examples=example_section.get_formatted_content()), |
| 63 | + role=Roles.tool, |
59 | 64 | chat_history=agent_event_memory.get_event_memory_manager().build_chat_history(), |
60 | 65 | add_message_to_chat_history=False, add_response_to_chat_history=False, |
61 | 66 | system_prompt_modules=[memory_intro_section, memory_section, date_time_section], |
62 | 67 | llm_sampling_settings=settings) |
63 | | - agent_event_memory.add_event_to_queue(Roles.tool, generate_fake_write_message()) |
64 | | - agent_event_memory.add_event_to_queue(Roles.assistant, output) |
| 68 | + agent_event_memory.add_event(Roles.tool, generate_fake_write_message()) |
| 69 | + agent_event_memory.add_event(Roles.assistant, output) |
65 | 70 |
|
66 | 71 | print(output) |
67 | 72 | break |
68 | 73 |
|
69 | | - agent_event_memory.add_event_to_queue(Roles.tool, wrap_function_response_in_xml_tags_json_mode(agent_output[0]["return_value"])) |
70 | | - agent_output = agent.get_chat_response(chat_history=agent_event_memory.get_event_memory_manager().build_chat_history(), |
71 | | - llm_sampling_settings=settings, |
72 | | - system_prompt_modules=[memory_intro_section, memory_section, |
73 | | - date_time_section], |
74 | | - structured_output_settings=output_settings) |
| 74 | + agent_event_memory.add_event(Roles.tool, wrap_function_response_in_xml_tags_json_mode( |
| 75 | + agent_output[0]["return_value"])) |
| 76 | + agent_output, json_output = agent.get_chat_response( |
| 77 | + chat_history=agent_event_memory.get_event_memory_manager().build_chat_history(), |
| 78 | + llm_sampling_settings=settings, |
| 79 | + system_prompt_modules=[memory_intro_section, memory_section, |
| 80 | + date_time_section], |
| 81 | + structured_output_settings=output_settings) |
| 82 | + agent_event_memory.add_event(Roles.assistant, json_output) |
0 commit comments