autogen_ext.models.replay#

class ReplayChatCompletionClient(chat_completions: Sequence[str | CreateResult], model_info: ModelInfo | None = None)[源代码]#

基类:ChatCompletionClient, Component[ReplayChatCompletionClientConfig]

一个模拟聊天补全客户端,采用基于索引的方式回放预定义的响应。

该类通过回放预定义的响应列表来模拟聊天补全客户端。它支持单次补全和流式响应。响应可以是字符串或CreateResult对象。该客户端现在使用基于索引的方法访问响应,允许重置状态。

备注

响应可以是字符串或CreateResult对象。

参数:

chat_completions (Sequence[Union[str, CreateResult]]) -- 要回放的预定义响应列表。

抛出:

ValueError("No more mock responses available") -- 如果提供的输出列表已耗尽。

Examples:

返回预定义响应的简单聊天补全客户端。

from autogen_core.models import UserMessage
from autogen_ext.models.replay import ReplayChatCompletionClient


async def example():
    chat_completions = [
        "Hello, how can I assist you today?",
        "I'm happy to help with any questions you have.",
        "Is there anything else I can assist you with?",
    ]
    client = ReplayChatCompletionClient(chat_completions)
    messages = [UserMessage(content="What can you do?", source="user")]
    response = await client.create(messages)
    print(response.content)  # Output: "Hello, how can I assist you today?"

返回预定义响应的简单流式聊天补全客户端

import asyncio
from autogen_core.models import UserMessage
from autogen_ext.models.replay import ReplayChatCompletionClient


async def example():
    chat_completions = [
        "Hello, how can I assist you today?",
        "I'm happy to help with any questions you have.",
        "Is there anything else I can assist you with?",
    ]
    client = ReplayChatCompletionClient(chat_completions)
    messages = [UserMessage(content="What can you do?", source="user")]

    async for token in client.create_stream(messages):
        print(token, end="")  # Output: "Hello, how can I assist you today?"

    async for token in client.create_stream(messages):
        print(token, end="")  # Output: "I'm happy to help with any questions you have."

    asyncio.run(example())

使用`.reset`重置聊天客户端状态

import asyncio
from autogen_core.models import UserMessage
from autogen_ext.models.replay import ReplayChatCompletionClient


async def example():
    chat_completions = [
        "Hello, how can I assist you today?",
    ]
    client = ReplayChatCompletionClient(chat_completions)
    messages = [UserMessage(content="What can you do?", source="user")]
    response = await client.create(messages)
    print(response.content)  # Output: "Hello, how can I assist you today?"

    response = await client.create(messages)  # Raises ValueError("No more mock responses available")

    client.reset()  # Reset the client state (current index of message and token usages)
    response = await client.create(messages)
    print(response.content)  # Output: "Hello, how can I assist you today?" again


asyncio.run(example())
classmethod _from_config(config: ReplayChatCompletionClientConfig) Self[源代码]#

从配置对象创建组件的新实例。

参数:

config (T) -- 配置对象。

Returns:

Self -- 组件的新实例。

_to_config() ReplayChatCompletionClientConfig[源代码]#

导出当前组件实例的配置,该配置可用于创建具有相同配置的新组件实例。

Returns:

T -- 组件的配置。

actual_usage() RequestUsage[源代码]#
property capabilities: ModelCapabilities#

返回模拟能力。

async close() None[源代码]#
component_config_schema#

ReplayChatCompletionClientConfig 的别名

component_provider_override: ClassVar[str | None] = 'autogen_ext.models.replay.ReplayChatCompletionClient'#

覆盖组件的provider字符串。这应该用于防止内部模块名称成为模块名称的一部分。

component_type: ClassVar[ComponentType] = 'replay_chat_completion_client'#

组件的逻辑类型。

count_tokens(messages: Sequence[Annotated[SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')]], *, tools: Sequence[Tool | ToolSchema] = []) int[源代码]#
async create(messages: Sequence[Annotated[SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')]], *, tools: Sequence[Tool | ToolSchema] = [], json_output: bool | type[BaseModel] | None = None, extra_create_args: Mapping[str, Any] = {}, cancellation_token: CancellationToken | None = None) CreateResult[源代码]#

从列表中返回下一个补全结果。

property create_calls: List[Dict[str, Any]]#

返回调用create方法时使用的参数。

async create_stream(messages: Sequence[Annotated[SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')]], *, tools: Sequence[Tool | ToolSchema] = [], json_output: bool | type[BaseModel] | None = None, extra_create_args: Mapping[str, Any] = {}, cancellation_token: CancellationToken | None = None) AsyncGenerator[str | CreateResult, None][源代码]#

以流的形式返回下一个补全结果。

property model_info: ModelInfo#
remaining_tokens(messages: Sequence[Annotated[SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage, FieldInfo(annotation=NoneType, required=True, discriminator='type')]], *, tools: Sequence[Tool | ToolSchema] = []) int[源代码]#
reset() None[源代码]#

将客户端状态和使用情况重置为初始状态。

set_cached_bool_value(value: bool) None[源代码]#
total_usage() RequestUsage[源代码]#