Welcome to this guide on using LangChain with tools and chat history management. This guide will explain how to set up and use tools with LangChain, manage chat history, and handle tool interactions within your conversation flow.
- Overview
- Setting Up Your Environment
- Tools Overview
- Handling Chat History
- Message Flow with Tools
In this guide, we'll cover:
• Using LangChain's pattern for building chat-based applications
• Setting up and using tools with LangChain
• Managing chat history
• Processing tool calls and their results
Before you begin, make sure you have:
• Required dependencies:
– langchain
– langchain_core
– Your chosen LLM provider's library (e.g., langchain_anthropic, langchain_openai)
pip install langchain langchain-core langchain-anthropic langchain-openaiTools in LangChain are defined using the Tool class. Here's how to create tools:
from langchain.tools import Tool
from langchain_anthropic import ChatAnthropic
from langchain_openai import ChatOpenAI
from langchain_core.messages import (
SystemMessage,
HumanMessage,
AIMessage,
ToolMessage
)
def search(query: str) -> str:
"""Implement your search logic here"""
return f"Results for: {query}"
def write_file(input_dict: dict) -> str:
"""Implement your file writing logic here"""
return f"Wrote to {input_dict['file_path']}"
def read_file(file_path: str) -> str:
"""Implement your file reading logic here"""
return f"Content from {file_path}"
tools = [
Tool(
name="Search",
func=search,
description="useful for searching the internet to find answers to questions"
),
Tool(
name="WriteFile",
func=write_file,
description="Write content to a file. Input should be a dictionary with 'file_path' and 'text' keys."
),
Tool(
name="ReadFile",
func=read_file,
description="Read content from a file. Input should be a file path."
)
]To use tools with your LLM:
# For Anthropic's Claude
llm = ChatAnthropic(model_name="claude-3-sonnet-20240229")
# Or for OpenAI's GPT models
# llm = ChatOpenAI(model_name="gpt-4")
llm_with_tools = llm.bind_tools(tools)LangChain uses different message types for chat history:
SystemMessage: Initial instructions/contextHumanMessage: User inputsAIMessage: Model responsesToolMessage: Tool execution results
from langchain_core.messages import (
SystemMessage,
HumanMessage,
AIMessage,
ToolMessage
)
messages = [
SystemMessage(content="You are a helpful assistant that can use tools."),
HumanMessage(content="Hello"),
AIMessage(content="Hi, how can I help?"),
HumanMessage(content="Search for 'LangChain'"),
AIMessage(content="", additional_kwargs={"tool_calls": [{
"id": "call_1",
"type": "function",
"function": {
"name": "Search",
"arguments": "{}"
}
}]}),
ToolMessage(
content="Results for: LangChain",
tool_call_id="call_1",
name="Search"
),
AIMessage(content="Here are the results I found...")
]To retrieve chat history in a user-friendly format:
def get_chat_history(messages):
"""Return chat history, skipping system messages and empty tool calls."""
history = []
for msg in messages[1:]: # Skip system message
# Skip empty AI messages (those only containing tool calls)
if isinstance(msg, AIMessage) and not msg.content and hasattr(msg, 'tool_calls'):
continue
message_dict = {
"role": msg.type,
"content": msg.content or "",
"sender": "human" if isinstance(msg, HumanMessage) else (
"assistant" if isinstance(msg, AIMessage) else "tool"
)
}
if isinstance(msg, ToolMessage):
message_dict["tool_name"] = getattr(msg, "tool_name", "unknown")
if message_dict["content"].strip():
history.append(message_dict)
return historyTo load existing chat history:
def set_chat_history(messages_list, system_prompt="You are a helpful assistant that can use tools."):
"""Load chat history from a list of message dictionaries."""
messages = [SystemMessage(content=system_prompt)]
tool_message_counter = 0
i = 0
while i < len(messages_list):
msg = messages_list[i]
role = msg["role"].lower()
content = msg["content"]
if role == "human":
messages.append(HumanMessage(content=content))
elif role == "assistant":
# Check if next message is a tool message
if (i + 1 < len(messages_list) and
messages_list[i + 1]["role"].lower() == "tool"):
# Get tool info from next message
tool_msg = messages_list[i + 1]
tool_name = tool_msg.get("tool_name", "unknown")
tool_call_id = f"call_{tool_message_counter}"
# Add AI message with tool use
messages.append(AIMessage(
content="",
additional_kwargs={"tool_calls": [{
"id": tool_call_id,
"type": "function",
"function": {
"name": tool_name,
"arguments": "{}"
}
}]}
))
# Add tool result message
messages.append(ToolMessage(
content=tool_msg["content"],
tool_call_id=tool_call_id,
name=tool_name
))
# Add AI response if it has content
if content.strip():
messages.append(AIMessage(content=content))
tool_message_counter += 1
i += 1 # Skip the next message (tool) since we've handled it
else:
# Regular assistant message
messages.append(AIMessage(content=content))
i += 1
return messagesHere's how to handle a new message with potential tool usage:
async def handle_message(messages, llm_with_tools, user_input):
"""Process a user message and return the assistant's response."""
# Add user message
messages.append(HumanMessage(content=user_input))
try:
# Get initial response from LLM
ai_msg = await llm_with_tools.ainvoke(messages)
# Check if the AI wants to use tools
if hasattr(ai_msg, 'tool_calls') and ai_msg.tool_calls:
# Add AI message with tool calls
messages.append(AIMessage(
content="",
additional_kwargs={"tool_calls": [{
"id": tool_call["id"],
"type": "function",
"function": {
"name": tool_call["name"],
"arguments": "{}"
}
} for tool_call in ai_msg.tool_calls]}
))
# Process each tool call
for tool_call in ai_msg.tool_calls:
tool_name = tool_call["name"]
tool_call_id = tool_call["id"]
# Find and execute the tool
tool = next((t for t in tools if t.name.lower() == tool_name.lower()), None)
if tool:
args = tool_call.get('args', {})
query = args.get('__arg1', '')
result = tool.invoke(query)
# Add tool result
messages.append(ToolMessage(
content=str(result),
tool_call_id=tool_call_id,
name=tool_name
))
# Get final response after tool usage
final_response = await llm_with_tools.ainvoke(messages)
content = final_response.content
else:
# If no tool calls, use the direct response
content = ai_msg.content
# Add final response to history
messages.append(AIMessage(content=content))
return content
except Exception as e:
return f"Error processing message: {str(e)}"• Always include a system message to define the assistant's behavior and available tools
• When using tools, ensure the model knows they're available via the system message
• After a tool call, re-invoke the LLM to incorporate the tool's result
• When loading chat history, watch for the pattern: empty assistant message → tool message → final assistant message
• Keep track of tool call IDs to properly link tool calls with their results