diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..3f2f18c
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,3 @@
+prompt_toolkit
+langchain[ollama]
+langgraph
diff --git a/test_langgraph/__main__.py b/test_langgraph/__main__.py
index 9b58a74..41371cd 100644
--- a/test_langgraph/__main__.py
+++ b/test_langgraph/__main__.py
@@ -6,6 +6,7 @@ import prompt_toolkit.history
 from langchain_core.messages import HumanMessage, SystemMessage
 from langchain_ollama import ChatOllama
 from langgraph.prebuilt import create_react_agent
+from langmem import create_memory_manager
 
 logger = logging.getLogger(__name__)
 
@@ -13,18 +14,19 @@ from . import tools
 
 cli_history = prompt_toolkit.history.FileHistory('output/cli_history.txt')
 
-# MODEL = "gemma3:27b"
-# MODEL = "qwen3:latest"
 MODEL = 'hf.co/unsloth/Qwen3-30B-A3B-GGUF:Q4_K_M'
 
 
+def create_raw_model():
+    return ChatOllama(model=MODEL)
+
 def create_model():
     available_tools = tools.get_tools()
     logger.info('Available tools:')
     for tool in available_tools:
         logger.info('- %s', tool.name)
 
-    llm = ChatOllama(model=MODEL)
+    llm = create_raw_model()
     llm.bind_tools(tools=available_tools)
     return create_react_agent(llm, tools=available_tools)
 
@@ -37,6 +39,12 @@ Provide links when available.
 
 
 def main():
+    memory_manager = create_memory_manager(
+        create_raw_model(),
+        instructions="Extract all noteworthy facts, events, and relationships. Indicate their importance.",
+        enable_inserts=True,
+    )
+
     logging.basicConfig(level='INFO')
     messages = [SystemMessage(SYSTEM_MESSAGE)]
     llm = create_model()
@@ -47,18 +55,22 @@ def main():
             history=cli_history,
             auto_suggest=prompt_toolkit.auto_suggest.AutoSuggestFromHistory(),
         )
-        messages.append(HumanMessage(user_input))
+        if user_input == '/memories':
+            memories = memory_manager.invoke({"messages": messages})
+            print(memories)
+        else:
+            messages.append(HumanMessage(user_input))
 
-        result = llm.invoke(
-            {
-                'messages': messages,
-            },
-        )
-        messages = result['messages']
-        for msg in messages[prev_idx:]:
-            print(msg.pretty_repr())
-            del msg
-        prev_idx = len(messages)
+            result = llm.invoke(
+                {
+                    'messages': messages,
+                },
+            )
+            messages = result['messages']
+            for msg in messages[prev_idx:]:
+                print(msg.pretty_repr())
+                del msg
+            prev_idx = len(messages)
 
 
 if __name__ == '__main__':