
from typing import Annotated
from langchain_core.runnables import RunnableConfig
from langchain_core.tools import InjectedToolArg
from langgraph.store.base import BaseStore
from langgraph.prebuilt import InjectedState, InjectedStore
# Can be synchronous or asynchronous; no need for @tool decorator
async def my_tool(
# These parameters are filled by the LLM
some_arg: str,
another_arg: float,
# config: RunnableConfig is always available in LangChain calls
# This will not be exposed to the LLM
config: RunnableConfig,
# The following three are specific to the pre-built ToolNode
# (and the `create_react_agent` extension). If you call tools in your own nodes,
# then you need to provide these yourself.
store: Annotated[BaseStore, InjectedStore],
# This passes the full state.
state: Annotated[State, InjectedState],
# You can also inject individual fields from the state
messages: Annotated[list, InjectedState("messages")]
# The following is incompatible with create_react_agent or ToolNode
# You can also exclude other parameters that are not shown to the model.
# These must be provided manually, and they are useful if you call tools/functions in your own nodes
# some_other_arg=Annotated["MyPrivateClass", InjectedToolArg],):
"""Call my_tool to have an impact on the real world.
Parameters:
some_arg: A very important parameter
another_arg: Another parameter provided by the LLM
"""
# The docstring becomes the tool's description and is passed to the model
print(some_arg, another_arg, config, store, state, messages)
# Config, some_other_arg, store, and state are "hidden" from the LangChain model when passed to bind_tools or with_structured_output
return "... some response"
from typing import List
# This is the state schema used by the prebuilt create_react_agent we'll be using below
from langgraph.prebuilt.chat_agent_executor import AgentState
from langchain_core.documents import Document
class State(AgentState):
docs: List[str]
from typing import List, Tuple
from typing_extensions import Annotated
from langchain_core.messages import ToolMessage
from langchain_core.tools import tool
from langgraph.prebuilt import InjectedState
@tool
def get_context(question: str, state: Annotated[dict, InjectedState]):
"""Get relevant context for answering the question."""
return "\n\n".join(doc for doc in state["docs"])
print(get_context.get_input_schema().model_json_schema())
{'description': 'Get relevant context for answering the question.', 'properties': {'question': {'title': 'Question', 'type': 'string'}, 'state': {'title': 'State', 'type': 'object'}}, 'required': ['question', 'state'], 'title': 'get_context', 'type': 'object'}
However, if we look at the tool-call schema (i.e., the content passed to the model for tool invocation), the state has been removed because we do not want the LLM to see that it needs to fill these parameters:
print(get_context.tool_call_schema.model_json_schema())
{'description': 'Get relevant context for answering the question.', 'properties': {'question': {'title': 'Question', 'type': 'string'}}, 'required': ['question'], 'title': 'get_context', 'type': 'object'}
from langchain_ollama import ChatOllama
import base_conf
from langgraph.prebuilt import ToolNode, create_react_agent
from langgraph.checkpoint.memory import MemorySaver
model = ChatOllama(base_url=base_conf.base_url, model=base_conf.model_name, temperature=0)
tools = [get_context]
# ToolNode will automatically take care of injecting state into the tool
tool_node = ToolNode(tools)
checkpointer = MemorySaver()
graph = create_react_agent(model, tools, state_schema=State, checkpointer=checkpointer)
docs = [
"FooBar company just raised 1 Billion dollars!",
"FooBar company was founded in 2019",
]
inputs = {
"messages": [{"type": "user", "content": "what's the latest news about FooBar"}],
"docs": docs,
}
config = {"configurable": {"thread_id": "1"}}
for chunk in graph.stream(inputs, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()
================================ Human Message =================================
what's the latest news about FooBar================================== Ai Message ==================================Tool Calls: get_context (e1eeaa88-b5f4-45ae-abf3-ed7fd861ce66) Call ID: e1eeaa88-b5f4-45ae-abf3-ed7fd861ce66 Args: question: latest news about FooBar================================= Tool Message =================================Name: get_context
FooBar company just raised 1 Billion dollars!
FooBar company was founded in 2019================================== Ai Message ==================================
The latest news about FooBar is that the company has just raised 1 Billion dollars! For reference, FooBar was founded in 2019.
from langgraph.store.memory import InMemoryStore
doc_store = InMemoryStore()
namespace = ("documents", "1") # user ID
doc_store.put(
namespace, "doc_0", {"doc": "FooBar company just raised 1 Billion dollars!"})
namespace = ("documents", "2") # user ID
doc_store.put(namespace, "doc_1", {"doc": "FooBar company was founded in 2019"})
from langgraph.store.base import BaseStore
from langchain_core.runnables import RunnableConfig
from langgraph.prebuilt import InjectedStore
@tool
def get_context(
question: str,
config: RunnableConfig,
store: Annotated[BaseStore, InjectedStore()],
) -> Tuple[str, List[Document]]:
"""Get relevant context for answering the question."""
user_id = config.get("configurable", {}).get("user_id")
docs = [item.value["doc"] for item in store.search(("documents", user_id))]
return "\n\n".join(doc for doc in docs)
print(get_context.tool_call_schema.model_json_schema())
{'description': 'Get relevant context for answering the question.', 'properties': {'question': {'title': 'Question', 'type': 'string'}}, 'required': ['question'], 'title': 'get_context', 'type': 'object'}
tools = [get_context]
# ToolNode will automatically inject the Store into the tools
tool_node = ToolNode(tools)
checkpointer = MemorySaver()
graph = create_react_agent(model, tools, checkpointer=checkpointer, store=doc_store)
messages = [{"type": "user", "content": "what's the latest news about FooBar"}]
config = {"configurable": {"thread_id": "1", "user_id": "1"}}
for chunk in graph.stream({"messages": messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()
================================ Human Message =================================
what's the latest news about FooBar================================== Ai Message ==================================Tool Calls: get_context (89b2d78c-6b6d-4c46-a5a7-cee513bff5cb) Call ID: 89b2d78c-6b6d-4c46-a5a7-cee513bff5cb Args: question: latest news about FooBar================================= Tool Message =================================Name: get_context
FooBar company just raised 1 Billion dollars!================================== Ai Message ==================================
The latest news is that FooBar company has just raised 1 Billion dollars!
messages = [{"type": "user", "content": "what's the latest news about FooBar"}]
config = {"configurable": {"thread_id": "2", "user_id": "2"}}
for chunk in graph.stream({"messages": messages}, config, stream_mode="values"):
chunk["messages"][-1].pretty_print()
================================ Human Message =================================
what's the latest news about FooBar================================== Ai Message ==================================Tool Calls: get_context (817da555-c13e-4fa1-8bbe-3854713fc643) Call ID: 817da555-c13e-4fa1-8bbe-3854713fc643 Args: question: latest news about FooBar================================= Tool Message =================================Name: get_context
FooBar company was founded in 2019================================== Ai Message ==================================
The information I have currently states that FooBar company was founded in 2019. However, this doesn't provide the latest news. Could you please specify a date range or give me some more time to fetch the most recent updates?