autogen_ext.agents.azure#
- class AzureAIAgent(name: str, description: str, project_client: AIProjectClient, deployment_name: str, instructions: str, tools: Iterable[Literal['file_search', 'code_interpreter', 'bing_grounding', 'azure_ai_search', 'azure_function', 'sharepoint_grounding'] | BingGroundingToolDefinition | CodeInterpreterToolDefinition | SharepointToolDefinition | AzureAISearchToolDefinition | FileSearchToolDefinition | AzureFunctionToolDefinition | Tool | Callable[[...], Any] | Callable[[...], Awaitable[Any]]] | None = None, agent_id: str | None = None, thread_id: str | None = None, metadata: Dict[str, str] | None = None, response_format: _types.AgentsApiResponseFormatOption | None = None, temperature: float | None = None, tool_resources: models.ToolResources | None = None, top_p: float | None = None)[source]#
基类:
BaseChatAgent
用于 AutoGen 的 Azure AI 助理代理。
安装
pip install "autogen-ext[azure]" # For Azure AI Foundry Agent Service
此代理利用 Azure AI 助手 API 创建具有以下功能的 AI 助手:
代码解释和执行
基于 Bing 搜索的基础
文件处理和搜索
自定义函数调用
多轮对话
该代理与 AutoGen 的消息传递系统集成,提供了一种在 AutoGen 框架内使用 Azure AI 功能的无缝方式。 它支持代码解释器、文件搜索和各种基础机制等工具。
- 代理名称必须是有效的 Python 标识符
它必须以字母 (A-Z, a-z) 或下划线 (_) 开头。
它只能包含字母、数字 (0-9) 或下划线。
它不能是 Python 关键字。
它不能包含空格或特殊字符。
它不能以数字开头。
有关如何使用用户托管标识创建新的安全代理,请参阅此处:https://learn.microsoft.com/en-us/azure/ai-services/agents/how-to/virtual-networks
示例
使用 AzureAIAgent 创建一个基于 Bing 的代理
import asyncio import os from autogen_agentchat.messages import TextMessage from autogen_core import CancellationToken from autogen_ext.agents.azure._azure_ai_agent import AzureAIAgent from azure.ai.projects.aio import AIProjectClient from azure.identity.aio import DefaultAzureCredential import azure.ai.projects.models as models import dotenv async def bing_example(): credential = DefaultAzureCredential() async with AIProjectClient.from_connection_string( # type: ignore credential=credential, conn_str=os.getenv("AI_PROJECT_CONNECTION_STRING", "") ) as project_client: conn = await project_client.connections.get(connection_name=os.getenv("BING_CONNECTION_NAME", "")) bing_tool = models.BingGroundingTool(conn.id) agent_with_bing_grounding = AzureAIAgent( name="bing_agent", description="An AI assistant with Bing grounding", project_client=project_client, deployment_name="gpt-4o", instructions="You are a helpful assistant.", tools=bing_tool.definitions, metadata={"source": "AzureAIAgent"}, ) # For the bing grounding tool to return the citations, the message must contain an instruction for the model to do return them. # For example: "Please provide citations for the answers" result = await agent_with_bing_grounding.on_messages( messages=[ TextMessage( content="What is Microsoft's annual leave policy? Provide citations for your answers.", source="user", ) ], cancellation_token=CancellationToken(), message_limit=5, ) print(result) if __name__ == "__main__": dotenv.load_dotenv() asyncio.run(bing_example())
使用 AzureAIAgent 创建一个具有文件搜索功能的代理
import asyncio import os import tempfile import urllib.request import dotenv from autogen_agentchat.messages import TextMessage from autogen_core import CancellationToken from autogen_ext.agents.azure._azure_ai_agent import AzureAIAgent from azure.ai.projects.aio import AIProjectClient from azure.identity.aio import DefaultAzureCredential async def file_search_example(): # Download README.md from GitHub readme_url = "https://raw.githubusercontent.com/microsoft/autogen/refs/heads/main/README.md" temp_file = None try: # Create a temporary file to store the downloaded README temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".md") urllib.request.urlretrieve(readme_url, temp_file.name) print(f"Downloaded README.md to {temp_file.name}") credential = DefaultAzureCredential() async with AIProjectClient.from_connection_string( # type: ignore credential=credential, conn_str=os.getenv("AI_PROJECT_CONNECTION_STRING", "") ) as project_client: agent_with_file_search = AzureAIAgent( name="file_search_agent", description="An AI assistant with file search capabilities", project_client=project_client, deployment_name="gpt-4o", instructions="You are a helpful assistant.", tools=["file_search"], metadata={"source": "AzureAIAgent"}, ) ct: CancellationToken = CancellationToken() # Use the downloaded README file for file search await agent_with_file_search.on_upload_for_file_search( file_paths=[temp_file.name], vector_store_name="file_upload_index", vector_store_metadata={"source": "AzureAIAgent"}, cancellation_token=ct, ) result = await agent_with_file_search.on_messages( messages=[ TextMessage(content="Hello, what is AutoGen and what capabilities does it have?", source="user") ], cancellation_token=ct, message_limit=5, ) print(result) finally: # Clean up the temporary file if temp_file and os.path.exists(temp_file.name): os.unlink(temp_file.name) print(f"Removed temporary file {temp_file.name}") if __name__ == "__main__": dotenv.load_dotenv() asyncio.run(file_search_example())
使用 AzureAIAgent 创建一个具有代码解释器功能的代理
import asyncio import os import dotenv from autogen_agentchat.messages import TextMessage from autogen_core import CancellationToken from autogen_ext.agents.azure._azure_ai_agent import AzureAIAgent from azure.ai.projects.aio import AIProjectClient from azure.identity.aio import DefaultAzureCredential async def code_interpreter_example(): credential = DefaultAzureCredential() async with AIProjectClient.from_connection_string( # type: ignore credential=credential, conn_str=os.getenv("AI_PROJECT_CONNECTION_STRING", "") ) as project_client: agent_with_code_interpreter = AzureAIAgent( name="code_interpreter_agent", description="An AI assistant with code interpreter capabilities", project_client=project_client, deployment_name="gpt-4o", instructions="You are a helpful assistant.", tools=["code_interpreter"], metadata={"source": "AzureAIAgent"}, ) await agent_with_code_interpreter.on_upload_for_code_interpreter( file_paths="/workspaces/autogen/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/data/nifty_500_quarterly_results.csv", cancellation_token=CancellationToken(), ) result = await agent_with_code_interpreter.on_messages( messages=[ TextMessage( content="Aggregate the number of stocks per industry and give me a markdown table as a result?", source="user", ) ], cancellation_token=CancellationToken(), ) print(result) if __name__ == "__main__": dotenv.load_dotenv() asyncio.run(code_interpreter_example())
- async handle_text_message(content: str, cancellation_token: CancellationToken | None = None) None [source]#
通过将文本消息添加到对话线程来处理它。
- 参数:
content (str) – 消息的文本内容
cancellation_token (CancellationToken) – 用于取消处理的令牌
- 返回值:
无
- async load_state(state: Mapping[str, Any]) None [source]#
将先前保存的状态加载到此代理中。
此方法反序列化并恢复先前保存的代理状态,从而设置代理以继续先前的对话或会话。
- 参数:
state (Mapping[str, Any]) – 先前保存的状态字典
- async on_messages(messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken | None = None, message_limit: int = 1) Response [source]#
处理传入消息并返回来自 Azure AI 代理的响应。
此方法是与代理交互的主要入口点。 它委托给 on_messages_stream 并返回最终响应。
- 参数:
messages (Sequence[ChatMessage]) – 要处理的消息
cancellation_token (CancellationToken) – 用于取消处理的令牌
message_limit (int, optional) – 从线程检索的最大消息数
- 返回值:
Response – 代理的响应,包括聊天消息和任何内部事件
- 引发:
AssertionError – 如果流未返回最终结果
- async on_messages_stream(messages: Sequence[BaseChatMessage], cancellation_token: CancellationToken | None = None, message_limit: int = 1, sleep_interval: float = 0.5) AsyncGenerator[Annotated[ToolCallRequestEvent | ToolCallExecutionEvent | MemoryQueryEvent | UserInputRequestedEvent | ModelClientStreamingChunkEvent | ThoughtEvent | SelectSpeakerEvent | CodeGenerationEvent | CodeExecutionEvent, FieldInfo(annotation=NoneType, required=True, discriminator='type')] | Annotated[TextMessage | MultiModalMessage | StopMessage | ToolCallSummaryMessage | HandoffMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')] | Response, None] [source]#
处理传入的消息,并从 Azure AI 代理生成流式响应。
此方法处理与 Azure AI 代理的完整交互流程: 1. 处理输入消息 2. 创建和监控运行 3. 处理工具调用及其结果 4. 检索并返回代理的最终响应
该方法在处理过程中生成事件(如工具调用),并最终生成包含代理消息的完整响应。
- 参数:
messages (Sequence[ChatMessage]) – 要处理的消息
cancellation_token (CancellationToken) – 用于取消处理的令牌
message_limit (int, optional) – 从线程检索的最大消息数
sleep_interval (float, 可选) – 轮询运行状态之间的时间间隔
- 生成:
AgentEvent | ChatMessage | Response – 处理过程中的事件和最终响应
- 引发:
ValueError – 如果运行失败或未收到来自助理的消息
- async on_reset(cancellation_token: CancellationToken) None [source]#
通过创建新线程来重置代理的对话。
此方法允许重置对话,而不会丢失代理定义或功能。 它会创建一个新线程,用于新的对话。
注意:目前 Azure AI Agent API 不支持删除消息,因此会创建一个新线程来代替。
- 参数:
cancellation_token (CancellationToken) – 用于取消处理的令牌
- async on_upload_for_code_interpreter(file_paths: str | Iterable[str], cancellation_token: CancellationToken | None = None, sleep_interval: float = 0.5) None [source]#
上传文件以供代码解释器工具使用。
此方法上传文件以供代理的代码解释器工具使用,并更新线程的工具资源以包含这些文件。
- 参数:
cancellation_token (Optional[CancellationToken]) – 用于取消处理的令牌
sleep_interval (float) – 轮询文件状态之间的时间间隔
- 引发:
ValueError – 如果文件上传失败或代理没有代码解释器功能。
- async on_upload_for_file_search(file_paths: str | Iterable[str], cancellation_token: CancellationToken, vector_store_name: str | None = None, data_sources: List[VectorStoreDataSource] | None = None, expires_after: VectorStoreExpirationPolicy | None = None, chunking_strategy: VectorStoreChunkingStrategyRequest | None = None, vector_store_metadata: Dict[str, str] | None = None, vector_store_polling_sleep_interval: float = 1) None [source]#
上传文件以用于文件搜索工具。
此方法处理文件上传以实现文件搜索功能,并在必要时创建向量存储,并更新代理的配置以使用向量存储。
- 参数:
cancellation_token (CancellationToken) – 用于取消处理的令牌
vector_store_name (可选[str]) – 如果创建新的向量存储,则分配给向量存储的名称
data_sources (可选[List[models.VectorStoreDataSource]]) – 向量存储的其他数据源
expires_after (可选[models.VectorStoreExpirationPolicy]) – 向量存储内容的过期策略
chunking_strategy (可选[models.VectorStoreChunkingStrategyRequest]) – 用于分块文件内容的策略
vector_store_polling_sleep_interval (float) – 轮询向量存储状态之间的时间间隔
- 引发:
ValueError – 如果此代理未启用文件搜索或文件上传失败
- property produced_message_types: Sequence[type[Annotated[TextMessage | MultiModalMessage | StopMessage | ToolCallSummaryMessage | HandoffMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')]]]#
助手代理生成的 message 类型。