test-langgraph/test_langgraph/__main__.py

78 lines
2.1 KiB
Python

import logging
import prompt_toolkit
import prompt_toolkit.auto_suggest
import prompt_toolkit.history
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_ollama import ChatOllama
from langgraph.prebuilt import create_react_agent
from langmem import create_memory_manager
logger = logging.getLogger(__name__)
from . import tools
cli_history = prompt_toolkit.history.FileHistory('output/cli_history.txt')
MODEL = 'hf.co/unsloth/Qwen3-30B-A3B-GGUF:Q4_K_M'
def create_raw_model():
return ChatOllama(model=MODEL)
def create_model():
available_tools = tools.get_tools()
logger.info('Available tools:')
for tool in available_tools:
logger.info('- %s', tool.name)
llm = create_raw_model()
llm.bind_tools(tools=available_tools)
return create_react_agent(llm, tools=available_tools)
SYSTEM_MESSAGE = """
You are a useful assistant with access to built in system tools.
Format responses as markdown.
Provide links when available.
"""
def main():
memory_manager = create_memory_manager(
create_raw_model(),
instructions="Extract all noteworthy facts, events, and relationships. Indicate their importance.",
enable_inserts=True,
)
logging.basicConfig(level='INFO')
messages = [SystemMessage(SYSTEM_MESSAGE)]
llm = create_model()
prev_idx = 0
while True:
user_input = prompt_toolkit.prompt(
'Human: ',
history=cli_history,
auto_suggest=prompt_toolkit.auto_suggest.AutoSuggestFromHistory(),
)
if user_input == '/memories':
memories = memory_manager.invoke({"messages": messages})
print(memories)
else:
messages.append(HumanMessage(user_input))
result = llm.invoke(
{
'messages': messages,
},
)
messages = result['messages']
for msg in messages[prev_idx:]:
print(msg.pretty_repr())
del msg
prev_idx = len(messages)
if __name__ == '__main__':
main()