Created
March 11, 2026 23:46
-
-
Save johnymontana/275169ba9ef29aa164124e55daaec8c5 to your computer and use it in GitHub Desktop.
LangFlow Custom Component for Neo4j Agent Memory Integration
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| """ | |
| neo4j-agent-memory + LangFlow Integration | |
| ========================================== | |
| Demonstrates Reasoning Memory: every tool call the agent makes is stored | |
| as a graph node in Neo4j, giving you a full, queryable audit trail. | |
| SETUP | |
| ----- | |
| pip install neo4j-agent-memory langchain langchain-ollama langflow neo4j | |
| Neo4j: use Neo4j Aura free tier (https://console.neo4j.io) | |
| or local: docker run -p7474:7474 -p7687:7687 neo4j:latest | |
| USAGE | |
| ----- | |
| Option A — paste Neo4jMemoryComponent into LangFlow's Custom Component editor | |
| Option B — run the standalone test at the bottom: python neo4j_memory_langflow.py | |
| """ | |
| # ═══════════════════════════════════════════════════════════════════════════════ | |
| # PART 1: LangFlow Custom Component | |
| # Paste this class into LangFlow → New Component → Custom Component | |
| # ═══════════════════════════════════════════════════════════════════════════════ | |
| from langflow.custom import Component | |
| from langflow.inputs import StrInput, SecretStrInput, MessageTextInput | |
| from langflow.outputs import MessageOutput | |
| from langflow.schema.message import Message | |
| class Neo4jMemoryComponent(Component): | |
| """ | |
| LangFlow component that wraps an Ollama agent with neo4j-agent-memory. | |
| Captures every tool call as a Reasoning Memory node in Neo4j so you | |
| can query the full decision trail after the conversation. | |
| Wiring in LangFlow: | |
| [Chat Input] → [Neo4j Memory Agent] → [Chat Output] | |
| """ | |
| display_name = "Neo4j Memory Agent" | |
| description = "Ollama agent with graph-backed reasoning memory via neo4j-agent-memory" | |
| icon = "database" | |
| inputs = [ | |
| MessageTextInput( | |
| name="input_value", | |
| display_name="User Message", | |
| info="The message to send to the agent", | |
| ), | |
| StrInput( | |
| name="neo4j_uri", | |
| display_name="Neo4j URI", | |
| value="bolt://localhost:7687", | |
| info="Neo4j connection URI (bolt:// or neo4j+s:// for Aura)", | |
| ), | |
| StrInput( | |
| name="neo4j_user", | |
| display_name="Neo4j Username", | |
| value="neo4j", | |
| ), | |
| SecretStrInput( | |
| name="neo4j_password", | |
| display_name="Neo4j Password", | |
| ), | |
| StrInput( | |
| name="ollama_model", | |
| display_name="Ollama Model", | |
| value="llama3.2", | |
| info="Any model available in your local Ollama instance", | |
| ), | |
| StrInput( | |
| name="conversation_id", | |
| display_name="Conversation ID", | |
| value="langflow-session-001", | |
| info="Used to group messages and reasoning traces in the graph", | |
| ), | |
| ] | |
| outputs = [ | |
| MessageOutput(name="response", display_name="Agent Response"), | |
| ] | |
| def build_component(self): | |
| """Called once when the component is initialised in the flow.""" | |
| from neo4j_agent_memory import MemoryClient | |
| from langchain_ollama import ChatOllama | |
| from langchain.agents import AgentExecutor, create_react_agent | |
| from langchain_core.prompts import PromptTemplate | |
| from langchain_community.tools import DuckDuckGoSearchRun | |
| # ── Memory client ────────────────────────────────────────────── | |
| self._memory_client = MemoryClient( | |
| neo4j_uri=self.neo4j_uri, | |
| neo4j_user=self.neo4j_user, | |
| neo4j_password=self.neo4j_password, | |
| reasoning_memory=True, # <-- this is the key flag | |
| ) | |
| # ── LLM (local Ollama) ───────────────────────────────────────── | |
| llm = ChatOllama(model=self.ollama_model, temperature=0) | |
| # ── Tools ────────────────────────────────────────────────────── | |
| # Replace / extend with domain-specific tools for your use case. | |
| # Every tool call will be captured as a ReasoningTrace node. | |
| tools = [DuckDuckGoSearchRun(name="web_search")] | |
| # ── Agent prompt ─────────────────────────────────────────────── | |
| prompt = PromptTemplate.from_template( | |
| "You are a helpful assistant. Use tools when needed.\n\n" | |
| "Tools available: {tools}\n" | |
| "Tool names: {tool_names}\n\n" | |
| "Previous context:\n{memory_context}\n\n" | |
| "Question: {input}\n" | |
| "Scratchpad: {agent_scratchpad}" | |
| ) | |
| # ── Wire memory callback into agent ──────────────────────────── | |
| # The LangChain callback captures on_tool_start / on_tool_end | |
| # and writes each call as a (:ReasoningTrace) node in Neo4j. | |
| memory_callback = self._memory_client.get_langchain_callback( | |
| conversation_id=self.conversation_id | |
| ) | |
| agent = create_react_agent(llm=llm, tools=tools, prompt=prompt) | |
| self._executor = AgentExecutor( | |
| agent=agent, | |
| tools=tools, | |
| callbacks=[memory_callback], | |
| verbose=True, | |
| handle_parsing_errors=True, | |
| ) | |
| def build(self, input_value: str) -> Message: | |
| """Called on every message in the flow.""" | |
| if not hasattr(self, "_executor"): | |
| self.build_component() | |
| # Pull prior context from the graph for this conversation | |
| memory_context = self._memory_client.short_term.get_context( | |
| conversation_id=self.conversation_id, | |
| as_string=True, | |
| ) | |
| result = self._executor.invoke({ | |
| "input": input_value, | |
| "memory_context": memory_context, | |
| }) | |
| # Persist this turn to short-term memory | |
| self._memory_client.short_term.add_messages( | |
| conversation_id=self.conversation_id, | |
| messages=[ | |
| {"role": "user", "content": input_value}, | |
| {"role": "assistant", "content": result["output"]}, | |
| ], | |
| ) | |
| return Message(text=result["output"]) | |
| # ═══════════════════════════════════════════════════════════════════════════════ | |
| # PART 2: Standalone test — run directly to verify the integration works | |
| # before wiring it into LangFlow. | |
| # | |
| # python neo4j_memory_langflow.py | |
| # ═══════════════════════════════════════════════════════════════════════════════ | |
| def run_standalone_test(): | |
| """ | |
| Runs three turns of a conversation and then queries Neo4j to show | |
| the reasoning traces that were captured — no LangFlow required. | |
| """ | |
| import os | |
| from neo4j_agent_memory import MemoryClient | |
| from langchain_ollama import ChatOllama | |
| from langchain.agents import AgentExecutor, create_react_agent | |
| from langchain_core.prompts import PromptTemplate | |
| from langchain_community.tools import DuckDuckGoSearchRun | |
| # ── Config — override with env vars or edit directly ────────────── | |
| NEO4J_URI = os.getenv("NEO4J_URI", "bolt://localhost:7687") | |
| NEO4J_USER = os.getenv("NEO4J_USER", "neo4j") | |
| NEO4J_PASSWORD = os.getenv("NEO4J_PASSWORD", "password") | |
| OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "llama3.2") | |
| CONV_ID = "standalone-test-001" | |
| print("\n" + "═" * 60) | |
| print(" neo4j-agent-memory × LangFlow — standalone test") | |
| print("═" * 60) | |
| # ── 1. Initialise memory client with reasoning memory enabled ────── | |
| print("\n[1/4] Connecting to Neo4j and initialising memory client...") | |
| memory_client = MemoryClient( | |
| neo4j_uri=NEO4J_URI, | |
| neo4j_user=NEO4J_USER, | |
| neo4j_password=NEO4J_PASSWORD, | |
| reasoning_memory=True, | |
| ) | |
| print(" ✓ MemoryClient ready") | |
| # ── 2. Build a LangChain ReAct agent backed by local Ollama ──────── | |
| print("\n[2/4] Building ReAct agent with Ollama...") | |
| llm = ChatOllama(model=OLLAMA_MODEL, temperature=0) | |
| tools = [DuckDuckGoSearchRun(name="web_search")] | |
| prompt = PromptTemplate.from_template( | |
| "You are a helpful assistant for satellite operations.\n" | |
| "Always explain your reasoning before using a tool.\n\n" | |
| "Tools: {tools}\n" | |
| "Tool names: {tool_names}\n\n" | |
| "Prior context from memory:\n{memory_context}\n\n" | |
| "Question: {input}\n" | |
| "Scratchpad: {agent_scratchpad}" | |
| ) | |
| # The memory callback is the integration point: | |
| # it intercepts on_tool_start / on_tool_end / on_llm_end | |
| # and writes structured nodes to Neo4j automatically. | |
| memory_callback = memory_client.get_langchain_callback( | |
| conversation_id=CONV_ID | |
| ) | |
| agent = create_react_agent(llm=llm, tools=tools, prompt=prompt) | |
| executor = AgentExecutor( | |
| agent=agent, | |
| tools=tools, | |
| callbacks=[memory_callback], | |
| verbose=True, | |
| handle_parsing_errors=True, | |
| max_iterations=3, | |
| ) | |
| print(" ✓ Agent ready") | |
| # ── 3. Run a short multi-turn conversation ───────────────────────── | |
| print("\n[3/4] Running conversation turns...\n") | |
| turns = [ | |
| "What is the current solar weather forecast and how might it affect LEO satellites?", | |
| "Based on that, what frequency bands are most at risk?", | |
| "Can you summarise what we discussed and what decisions we made?", | |
| ] | |
| for i, user_message in enumerate(turns, 1): | |
| print(f"── Turn {i} " + "─" * 50) | |
| print(f"User: {user_message}\n") | |
| # Fetch accumulated memory context before each turn | |
| memory_context = memory_client.short_term.get_context( | |
| conversation_id=CONV_ID, | |
| as_string=True, | |
| ) | |
| result = executor.invoke({ | |
| "input": user_message, | |
| "memory_context": memory_context or "(no prior context)", | |
| }) | |
| # Persist both sides of the turn to short-term memory | |
| memory_client.short_term.add_messages( | |
| conversation_id=CONV_ID, | |
| messages=[ | |
| {"role": "user", "content": user_message}, | |
| {"role": "assistant", "content": result["output"]}, | |
| ], | |
| ) | |
| print(f"\nAssistant: {result['output']}\n") | |
| # ── 4. Query the reasoning traces from Neo4j ─────────────────────── | |
| print("\n[4/4] Querying reasoning traces from Neo4j...") | |
| print(" (This is the explainability payoff — every tool call is a graph node)\n") | |
| traces = memory_client.reasoning.get_traces(conversation_id=CONV_ID) | |
| if not traces: | |
| print(" No traces found — the agent may not have called any tools.") | |
| else: | |
| print(f" Found {len(traces)} reasoning trace(s):\n") | |
| for t in traces: | |
| print(f" ┌─ ReasoningTrace ──────────────────────────────") | |
| print(f" │ tool_name : {t.get('toolName', t.get('tool_name', 'unknown'))}") | |
| print(f" │ tool_input : {str(t.get('toolInput', t.get('tool_input', '')))[:80]}…") | |
| print(f" │ tool_output : {str(t.get('toolOutput', t.get('tool_output', '')))[:80]}…") | |
| print(f" │ timestamp : {t.get('createdAt', t.get('timestamp', ''))}") | |
| print(f" └───────────────────────────────────────────────\n") | |
| # ── 5. Show equivalent Cypher for direct Neo4j Browser inspection ── | |
| print("\n" + "═" * 60) | |
| print(" Open Neo4j Browser and run this Cypher to explore the graph:") | |
| print("═" * 60) | |
| print(f""" | |
| // All reasoning traces for this conversation | |
| MATCH (c:Conversation {{id: '{CONV_ID}'}})-[:HAS_TRACE]->(t:ReasoningTrace) | |
| RETURN c.id, t.toolName, t.toolInput, t.createdAt | |
| ORDER BY t.createdAt | |
| // Full provenance: conversation → messages → traces | |
| MATCH path = (c:Conversation {{id: '{CONV_ID}'}})-[*1..3]->(n) | |
| RETURN path | |
| """) | |
| print("═" * 60 + "\n") | |
| if __name__ == "__main__": | |
| run_standalone_test() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment