66 lines
1.9 KiB
Python
66 lines
1.9 KiB
Python
from langgraph.prebuilt import create_react_agent
|
|
from langchain_ollama import ChatOllama
|
|
from typing import Annotated
|
|
from langgraph.prebuilt import ToolNode, tools_condition
|
|
|
|
from typing_extensions import TypedDict
|
|
|
|
from langchain_core.messages import HumanMessage, SystemMessage
|
|
from langgraph.graph import StateGraph, START, END
|
|
from langgraph.graph.message import add_messages
|
|
import logging
|
|
import prompt_toolkit
|
|
import prompt_toolkit.history
|
|
import prompt_toolkit.auto_suggest
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
from . import tools
|
|
|
|
cli_history = prompt_toolkit.history.FileHistory('output/cli_history.txt')
|
|
|
|
#MODEL = "gemma3:27b"
|
|
#MODEL = "qwen3:latest"
|
|
MODEL = 'hf.co/unsloth/Qwen3-30B-A3B-GGUF:Q4_K_M'
|
|
|
|
def create_model():
|
|
available_tools = tools.get_tools()
|
|
logger.info("Available tools:")
|
|
for tool in available_tools:
|
|
logger.info("- %s", tool.name)
|
|
|
|
llm = ChatOllama(model=MODEL)
|
|
llm.bind_tools(tools=available_tools )
|
|
return create_react_agent(llm, tools=available_tools )
|
|
|
|
SYSTEM_MESSAGE = '''
|
|
You are a useful assistant with access to built in system tools.
|
|
Format responses as markdown.
|
|
Provide links when available.
|
|
'''
|
|
|
|
def main():
|
|
logging.basicConfig(level='INFO')
|
|
messages = [SystemMessage(SYSTEM_MESSAGE)]
|
|
llm = create_model()
|
|
prev_idx = 0
|
|
while True:
|
|
user_input = prompt_toolkit.prompt("Human: ",
|
|
history=cli_history,
|
|
auto_suggest=prompt_toolkit.auto_suggest.AutoSuggestFromHistory(),
|
|
)
|
|
messages.append(HumanMessage(user_input))
|
|
|
|
result = llm.invoke({
|
|
'messages': messages,
|
|
})
|
|
messages = result['messages']
|
|
for msg in messages[prev_idx:]:
|
|
print(msg.pretty_repr())
|
|
del msg
|
|
prev_idx = len(messages)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|