autogen_ext.agents.azure#
- class AzureAIAgent(name: str, description: str, project_client: AIProjectClient, deployment_name: str, instructions: str, tools: Iterable[Literal['file_search', 'code_interpreter', 'bing_grounding', 'azure_ai_search', 'azure_function'] | BingGroundingToolDefinition | CodeInterpreterToolDefinition | AzureAISearchToolDefinition | FileSearchToolDefinition | AzureFunctionToolDefinition | Tool | Callable[[...], Any] | Callable[[...], Awaitable[Any]]] | None = None, agent_id: str | None = None, thread_id: str | None = None, metadata: Dict[str, str] | None = None, response_format: AgentsResponseFormat | None = None, temperature: float | None = None, tool_resources: ToolResources | None = None, top_p: float | None = None)[source]#
-
AutoGen 的 Azure AI 助手代理。
安装
pip install "autogen-ext[azure]" # For Azure AI Foundry Agent Service
此代理利用 Azure AI 助手 API 创建具有以下功能的 AI 助手:
代码解释和执行
使用 Bing 搜索进行基础数据检索
文件处理和搜索
自定义函数调用
多轮对话
此代理与 AutoGen 的消息系统集成,提供了一种在 AutoGen 框架内无缝使用 Azure AI 功能的方式。它支持代码解释器、文件搜索和各种基础数据检索机制等工具。
- 代理名称必须是有效的 Python 标识符
它必须以字母 (A-Z, a-z) 或下划线 (_) 开头。
它只能包含字母、数字 (0-9) 或下划线。
它不能是 Python 关键字。
它不能包含空格或特殊字符。
它不能以数字开头。
有关如何使用用户管理的身份创建新的安全代理,请查看此处:https://learn.microsoft.com/en-us/azure/ai-services/agents/how-to/virtual-networks
示例
使用 AzureAIAgent 创建一个以 Bing 为基础的代理
import asyncio import os from autogen_agentchat.messages import TextMessage from autogen_core import CancellationToken from autogen_ext.agents.azure._azure_ai_agent import AzureAIAgent from azure.ai.projects.aio import AIProjectClient from azure.identity.aio import DefaultAzureCredential from azure.ai.agents.models import BingGroundingTool import dotenv async def bing_example(): async with DefaultAzureCredential() as credential: async with AIProjectClient( # type: ignore credential=credential, endpoint=os.getenv("AZURE_PROJECT_ENDPOINT", "") ) as project_client: conn = await project_client.connections.get(name=os.getenv("BING_CONNECTION_NAME", "")) bing_tool = BingGroundingTool(conn.id) agent_with_bing_grounding = AzureAIAgent( name="bing_agent", description="An AI assistant with Bing grounding", project_client=project_client, deployment_name="gpt-4o", instructions="You are a helpful assistant.", tools=bing_tool.definitions, metadata={"source": "AzureAIAgent"}, ) # For the bing grounding tool to return the citations, the message must contain an instruction for the model to do return them. # For example: "Please provide citations for the answers" result = await agent_with_bing_grounding.on_messages( messages=[ TextMessage( content="What is Microsoft\'s annual leave policy? Provide citations for your answers.", source="user", ) ], cancellation_token=CancellationToken(), message_limit=5, ) print(result) if __name__ == "__main__": dotenv.load_dotenv() asyncio.run(bing_example())
使用 AzureAIAgent 创建一个具有文件搜索功能的代理
import asyncio import os import tempfile import urllib.request import dotenv from autogen_agentchat.messages import TextMessage from autogen_core import CancellationToken from autogen_ext.agents.azure._azure_ai_agent import AzureAIAgent from azure.ai.projects.aio import AIProjectClient from azure.identity.aio import DefaultAzureCredential async def file_search_example(): # Download README.md from GitHub readme_url = "https://raw.githubusercontent.com/microsoft/autogen/refs/heads/main/README.md" temp_file = None try: # Create a temporary file to store the downloaded README temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".md") urllib.request.urlretrieve(readme_url, temp_file.name) print(f"Downloaded README.md to {temp_file.name}") async with DefaultAzureCredential() as credential: async with AIProjectClient( # type: ignore credential=credential, endpoint=os.getenv("AZURE_PROJECT_ENDPOINT", "") ) as project_client: agent_with_file_search = AzureAIAgent( name="file_search_agent", description="An AI assistant with file search capabilities", project_client=project_client, deployment_name="gpt-4.1-mini", instructions="You are a helpful assistant.", tools=["file_search"], metadata={"source": "AzureAIAgent"}, ) ct: CancellationToken = CancellationToken() # Use the downloaded README file for file search await agent_with_file_search.on_upload_for_file_search( file_paths=[temp_file.name], vector_store_name="file_upload_index", vector_store_metadata={"source": "AzureAIAgent"}, cancellation_token=ct, vector_store_polling_interval=60, ) result = await agent_with_file_search.on_messages( messages=[ TextMessage( content="Hello, what is AutoGen and what capabilities does it have?", source="user" ) ], cancellation_token=ct, message_limit=5, ) print(result) finally: # Clean up the temporary file if temp_file and os.path.exists(temp_file.name): os.unlink(temp_file.name) print(f"Removed temporary file {temp_file.name}") if __name__ == "__main__": dotenv.load_dotenv() asyncio.run(file_search_example())
使用 AzureAIAgent 创建一个具有代码解释器功能的代理
import asyncio import os import dotenv from autogen_agentchat.messages import TextMessage from autogen_core import CancellationToken from autogen_ext.agents.azure._azure_ai_agent import AzureAIAgent from azure.ai.projects.aio import AIProjectClient from azure.identity.aio import DefaultAzureCredential async def code_interpreter_example(): async with DefaultAzureCredential() as credential: async with AIProjectClient( # type: ignore credential=credential, endpoint=os.getenv("AZURE_PROJECT_ENDPOINT", "") ) as project_client: agent_with_code_interpreter = AzureAIAgent( name="code_interpreter_agent", description="An AI assistant with code interpreter capabilities", project_client=project_client, deployment_name="gpt-4.1-mini", instructions="You are a helpful assistant.", tools=["code_interpreter"], metadata={"source": "AzureAIAgent"}, ) await agent_with_code_interpreter.on_upload_for_code_interpreter( file_paths="/workspaces/autogen/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/data/nifty_500_quarterly_results.csv", cancellation_token=CancellationToken(), polling_interval=5, ) result = await agent_with_code_interpreter.on_messages( messages=[ TextMessage( content="Aggregate the number of stocks per industry and give me a markdown table as a result?", source="user", ) ], cancellation_token=CancellationToken(), ) print(result) if __name__ == "__main__": dotenv.load_dotenv() asyncio.run(code_interpreter_example())
- property produced_message_types: Sequence[type[Annotated[TextMessage | MultiModalMessage |StopMessage | ToolCallSummaryMessage | HandoffMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')]]]#
助手代理生成的消息类型。
- async on_messages(messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken | None = None, message_limit: int = 1) Response [source]#
处理传入消息并从 Azure AI 代理返回响应。
此方法是与代理交互的主要入口点。它委托给 on_messages_stream 并返回最终响应。
- 参数:
messages (Sequence[BaseChatMessage]) – 要处理的消息
cancellation_token (CancellationToken) – 用于取消处理的令牌
message_limit (int, 可选) – 从线程中检索的最大消息数
- 返回:
Response – 代理的响应,包括聊天消息和任何内部事件
- 抛出:
AssertionError – 如果流未返回最终结果
- async on_messages_stream(messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken | None = None, message_limit: int = 1, polling_interval: float = 0.5) AsyncGenerator[Annotated[ToolCallRequestEvent | ToolCallExecutionEvent | MemoryQueryEvent | UserInputRequestedEvent | ModelClientStreamingChunkEvent | ThoughtEvent | SelectSpeakerEvent | CodeGenerationEvent | CodeExecutionEvent, FieldInfo(annotation=NoneType, required=True, discriminator='type')] | Annotated[TextMessage | MultiModalMessage | StopMessage | ToolCallSummaryMessage | HandoffMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')] | Response, None] [source]#
处理传入消息并从 Azure AI 代理生成流式响应。
此方法处理与 Azure AI 代理的完整交互流程:1. 处理输入消息 2. 创建和监控运行 3. 处理工具调用及其结果 4. 检索并返回代理的最终响应
此方法在处理过程中(例如工具调用)生成事件,并最终生成包含代理消息的完整响应。
- 参数:
messages (Sequence[BaseChatMessage]) – 要处理的消息
cancellation_token (CancellationToken) – 用于取消处理的令牌
message_limit (int, 可选) – 从线程中检索的最大消息数
polling_interval (float, 可选) – 轮询运行状态之间的休眠时间
- 生成:
AgentEvent | ChatMessage | Response – 处理过程中的事件和最终响应
- 抛出:
ValueError – 如果运行失败或未从助手收到消息
- async handle_text_message(content: str, cancellation_token: CancellationToken | None = None) None [source]#
通过将文本消息添加到对话线程来处理它。
- 参数:
content (str) – 消息的文本内容
cancellation_token (CancellationToken) – 用于取消处理的令牌
- 返回:
None (无)
- async on_reset(cancellation_token: CancellationToken) None #
通过创建新线程来重置代理的对话。
此方法允许重置对话而不会丢失代理定义或功能。它为新的对话创建了一个新线程。
注意:目前 Azure AI 代理 API 不支持删除消息,因此会改为创建新线程。
- 参数:
cancellation_token (CancellationToken) – 用于取消处理的令牌
- async save_state() Mapping[str, Any] [source]#
保存代理的当前状态以供将来恢复。
此方法序列化代理的状态,包括代理、线程、消息以及向量存储和上传文件等相关资源的 ID。
- 返回:
Mapping[str, Any] – 包含序列化状态数据的字典
- async load_state(state: Mapping[str, Any]) None [source]#
将先前保存的状态加载到此代理中。
此方法反序列化并恢复先前保存的代理状态,设置代理以继续之前的对话或会话。
- 参数:
state (Mapping[str, Any]) – 先前保存的状态字典
- async on_upload_for_code_interpreter(file_paths: str | Iterable[str], cancellation_token: CancellationToken | None = None, polling_interval: float = 0.5) None [source]#
上传要与代码解释器工具一起使用的文件。
此方法为代理的代码解释器工具上传文件,并更新线程的工具资源以包含这些文件。
- 参数:
cancellation_token (Optional[CancellationToken]) – 用于取消处理的令牌
polling_interval (float) – 轮询文件状态之间的休眠时间
- 抛出:
ValueError – 如果文件上传失败或代理没有代码解释器功能
- async on_upload_for_file_search(file_paths: str | Iterable[str], cancellation_token: CancellationToken, vector_store_name: str | None = None, data_sources: List[VectorStoreDataSource] | None = None, expires_after: VectorStoreExpirationPolicy | None = None, chunking_strategy: VectorStoreChunkingStrategyRequest | None = None, vector_store_metadata: Dict[str, str] | None = None, vector_store_polling_interval: float = 1) None [source]#
上传要与文件搜索工具一起使用的文件。
此方法处理文件搜索功能的上传文件,必要时创建向量存储,并更新代理的配置以使用该向量存储。
- 参数:
cancellation_token (CancellationToken) – 用于取消处理的令牌
vector_store_name (Optional[str]) – 如果创建新的向量存储,则为其分配的名称
data_sources (Optional[List[VectorStoreDataSource]]) – 向量存储的附加数据源
expires_after (Optional[VectorStoreExpirationPolicy]) – 向量存储内容的过期策略
chunking_strategy (Optional[VectorStoreChunkingStrategyRequest]) – 分块文件内容的策略
vector_store_metadata (Optional[Dict[str, str]]) – 向量存储的附加元数据
vector_store_polling_interval (float) – 轮询向量存储状态之间的休眠时间
- 抛出:
ValueError – 如果此代理未启用文件搜索或文件上传失败