{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# 使用LangGraph构建智能代理\n\n本示例演示了如何使用LangGraph创建一个AI代理。\n基于LangGraph文档中的示例:\nhttps://langchain-ai.github.io/langgraph/.\n\n首先安装依赖项:\n" ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "vscode": { "languageId": "shellscript" } }, "outputs": [], "source": [ "# pip install langgraph langchain-openai azure-identity" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "导入所需模块。\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from dataclasses import dataclass\n", "from typing import Any, Callable, List, Literal\n", "\n", "from autogen_core import AgentId, MessageContext, RoutedAgent, SingleThreadedAgentRuntime, message_handler\n", "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", "from langchain_core.messages import HumanMessage, SystemMessage\n", "from langchain_core.tools import tool # pyright: ignore\n", "from langchain_openai import AzureChatOpenAI, ChatOpenAI\n", "from langgraph.graph import END, MessagesState, StateGraph\n", "from langgraph.prebuilt import ToolNode" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "定义用于与代理通信的消息类型。\n" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "@dataclass\n", "class Message:\n", " content: str" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "定义代理将使用的工具。\n" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "@tool # pyright: ignore\n", "def get_weather(location: str) -> str:\n", " \"\"\"Call to surf the web.\"\"\"\n", " # 这是一个占位符,但不要告诉大语言模型...\n", " if \"sf\" in location.lower() or \"san francisco\" in location.lower():\n", " return \"It's 60 degrees and foggy.\"\n", " return \"It's 90 degrees and sunny.\"" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "使用LangGraph的API定义代理。\n" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "class LangGraphToolUseAgent(RoutedAgent):\n", " def __init__(self, description: str, model: ChatOpenAI, tools: List[Callable[..., Any]]) -> None: # pyright: ignore\n", " super().__init__(description)\n", " self._model = model.bind_tools(tools) # pyright: ignore\n", "\n", " # 定义判断是否继续执行的函数\n", " def should_continue(state: MessagesState) -> Literal[\"tools\", END]: # type: ignore\n", " messages = state[\"messages\"]\n", " last_message = messages[-1]\n", " # 如果LLM发起工具调用,则路由到\"tools\"节点\n", " if last_message.tool_calls: # type: ignore\n", " return \"tools\"\n", " # 否则,我们停止(回复用户)\n", " return END\n", "\n", " # 定义调用模型的函数\n", " async def call_model(state: MessagesState): # type: ignore\n", " messages = state[\"messages\"]\n", " response = await self._model.ainvoke(messages)\n", " # 我们返回一个列表,因为这将被添加到现有列表中\n", " return {\"messages\": [response]}\n", "\n", " tool_node = ToolNode(tools) # pyright: ignore\n", "\n", " # 定义一个新图\n", " self._workflow = StateGraph(MessagesState)\n", "\n", " # 定义我们将循环的两个节点\n", " self._workflow.add_node(\"agent\", call_model) # pyright: ignore\n", " self._workflow.add_node(\"tools\", tool_node) # pyright: ignore\n", "\n", " # 将入口点设置为`agent`,这意味着该节点是第一个被调用的\n", " # This means that this node is the first one called\n", " self._workflow.set_entry_point(\"agent\")\n", "\n", " # 现在添加一个条件边\n", " self._workflow.add_conditional_edges(\n", " # 首先,我们定义起始节点。使用 `agent` 节点,\n", " # 这意味着这些边是在调用 `agent` 节点后采取的路径。\n", " \"agent\",\n", " # 接着,我们传入一个函数来决定下一个调用的节点。\n", " should_continue, # type: ignore\n", " )\n", "\n", " # 现在添加从 `tools` 到 `agent` 的普通边,\n", " # 这意味着在调用 `tools` 后, 下一个将调用 `agent` 节点。\n", " self._workflow.add_edge(\"tools\", \"agent\")\n", "\n", " # 最后,\n", " # 我们进行编译!这将把它编译成一个LangChain可运行对象,\n", " # 意味着你可以像使用其他可运行对象一样使用它。\n", " # 注意我们在编译图时(可选地)传递了内存\n", " self._app = self._workflow.compile()\n", "\n", " @message_handler\n", " async def handle_user_message(self, message: Message, ctx: MessageContext) -> Message:\n", " # 使用可运行对象\n", " final_state = await self._app.ainvoke(\n", " {\n", " \"messages\": [\n", " SystemMessage(\n", " content=\"You are a helpful AI assistant. You can use tools to help answer questions.\"\n", " ),\n", " HumanMessage(content=message.content),\n", " ]\n", " },\n", " config={\"configurable\": {\"thread_id\": 42}},\n", " )\n", " response = Message(content=final_state[\"messages\"][-1].content)\n", " return response" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "现在让我们测试这个代理。首先需要创建一个代理运行时环境并\n注册代理,通过提供代理名称和一个\n用于创建代理的工厂函数。\n" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "runtime = SingleThreadedAgentRuntime()\n", "await LangGraphToolUseAgent.register(\n", " runtime,\n", " \"langgraph_tool_use_agent\",\n", " lambda: LangGraphToolUseAgent(\n", " \"Tool use agent\",\n", " ChatOpenAI(\n", " model=\"gpt-4o\",\n", " # api_key=os.getenv(\"OPENAI_API_KEY\"),\n", " ),\n", " # AzureChatOpenAI( azure_deployment=os.getenv(\"AZURE_OPENAI_DEPLOYMENT\"),\n", " # azure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"),\n", " # api_version=os.getenv(\"AZURE_OPENAI_API_VERSION\"),\n", " # # 使用Azure Active Directory认证 azure_ad_token_provider=get_bearer_token_provider(DefaultAzureCredential()),\n", " # # 使用API密钥 # api_key=os.getenv(\"AZURE_OPENAI_API_KEY\"),\n", " # ),\n", " # \n", " # \n", " # \n", " [get_weather],\n", " ),\n", ")\n", "agent = AgentId(\"langgraph_tool_use_agent\", key=\"default\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "启动代理运行时。\n" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "runtime.start()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "向代理发送直接消息,并打印响应。\n" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "The current weather in San Francisco is 60 degrees and foggy.\n" ] } ], "source": [ "response = await runtime.send_message(Message(\"What's the weather in SF?\"), agent)\n", "print(response.content)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "停止代理运行时。\n" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "await runtime.stop()" ] } ], "metadata": { "kernelspec": { "display_name": "autogen_core", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.9" } }, "nbformat": 4, "nbformat_minor": 2 }