Testing memories

This commit is contained in:
Jon Michael Aanes 2025-05-11 20:15:42 +02:00
parent e42bb3df0a
commit 60ae842764
2 changed files with 29 additions and 14 deletions

3
requirements.txt Normal file
View File

@ -0,0 +1,3 @@
prompt_toolkit
langchain[ollama]
langgraph

View File

@ -6,6 +6,7 @@ import prompt_toolkit.history
from langchain_core.messages import HumanMessage, SystemMessage from langchain_core.messages import HumanMessage, SystemMessage
from langchain_ollama import ChatOllama from langchain_ollama import ChatOllama
from langgraph.prebuilt import create_react_agent from langgraph.prebuilt import create_react_agent
from langmem import create_memory_manager
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -13,18 +14,19 @@ from . import tools
cli_history = prompt_toolkit.history.FileHistory('output/cli_history.txt') cli_history = prompt_toolkit.history.FileHistory('output/cli_history.txt')
# MODEL = "gemma3:27b"
# MODEL = "qwen3:latest"
MODEL = 'hf.co/unsloth/Qwen3-30B-A3B-GGUF:Q4_K_M' MODEL = 'hf.co/unsloth/Qwen3-30B-A3B-GGUF:Q4_K_M'
def create_raw_model():
return ChatOllama(model=MODEL)
def create_model(): def create_model():
available_tools = tools.get_tools() available_tools = tools.get_tools()
logger.info('Available tools:') logger.info('Available tools:')
for tool in available_tools: for tool in available_tools:
logger.info('- %s', tool.name) logger.info('- %s', tool.name)
llm = ChatOllama(model=MODEL) llm = create_raw_model()
llm.bind_tools(tools=available_tools) llm.bind_tools(tools=available_tools)
return create_react_agent(llm, tools=available_tools) return create_react_agent(llm, tools=available_tools)
@ -37,6 +39,12 @@ Provide links when available.
def main(): def main():
memory_manager = create_memory_manager(
create_raw_model(),
instructions="Extract all noteworthy facts, events, and relationships. Indicate their importance.",
enable_inserts=True,
)
logging.basicConfig(level='INFO') logging.basicConfig(level='INFO')
messages = [SystemMessage(SYSTEM_MESSAGE)] messages = [SystemMessage(SYSTEM_MESSAGE)]
llm = create_model() llm = create_model()
@ -47,18 +55,22 @@ def main():
history=cli_history, history=cli_history,
auto_suggest=prompt_toolkit.auto_suggest.AutoSuggestFromHistory(), auto_suggest=prompt_toolkit.auto_suggest.AutoSuggestFromHistory(),
) )
messages.append(HumanMessage(user_input)) if user_input == '/memories':
memories = memory_manager.invoke({"messages": messages})
print(memories)
else:
messages.append(HumanMessage(user_input))
result = llm.invoke( result = llm.invoke(
{ {
'messages': messages, 'messages': messages,
}, },
) )
messages = result['messages'] messages = result['messages']
for msg in messages[prev_idx:]: for msg in messages[prev_idx:]:
print(msg.pretty_repr()) print(msg.pretty_repr())
del msg del msg
prev_idx = len(messages) prev_idx = len(messages)
if __name__ == '__main__': if __name__ == '__main__':