修改ChatComplete事件的实现
parent
83bb6ad213
commit
cc283b1618
@ -0,0 +1,21 @@
|
||||
from __future__ import annotations
|
||||
from typing import Any
|
||||
|
||||
from agentkit.context import ConversationContext
|
||||
|
||||
|
||||
class BaseAgent:
|
||||
def __init__(self, config: dict[str, Any]):
|
||||
self.config = config
|
||||
|
||||
async def __aenter__(self) -> BaseAgent:
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
|
||||
return None
|
||||
|
||||
async def count_tokens(self, text: str) -> int:
|
||||
return len(text.split())
|
||||
|
||||
async def chat_complete(self, context: ConversationContext, stream: bool = False):
|
||||
pass
|
@ -0,0 +1,31 @@
|
||||
from __future__ import annotations
|
||||
from typing import Any
|
||||
|
||||
from agentkit.base.agent import BaseAgent
|
||||
|
||||
|
||||
class BaseAgentFactory:
|
||||
def __init__(self, props: dict):
|
||||
pass
|
||||
|
||||
|
||||
async def __aenter__(self) -> BaseAgentFactory:
|
||||
return self
|
||||
|
||||
|
||||
async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
|
||||
return None
|
||||
|
||||
|
||||
async def new_from_id(self, agent_id: str) -> BaseAgent | None:
|
||||
"""
|
||||
Create agent from agent id.
|
||||
"""
|
||||
return None
|
||||
|
||||
|
||||
async def new_from_config(self, agent_type: str, config: dict) -> BaseAgent | None:
|
||||
"""
|
||||
Create agent from agent type and config.
|
||||
"""
|
||||
return None
|
@ -0,0 +1,126 @@
|
||||
import logging
|
||||
from agentkit.context import ConversationContext
|
||||
from agentkit.types import AgentKitFlowStep
|
||||
|
||||
|
||||
class BreakLoopInterrupt(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AgentKitFlowExecutor:
|
||||
def __init__(self, flow_script: list[AgentKitFlowStep]):
|
||||
self.flow = flow_script
|
||||
|
||||
def _execute_step(self, step: AgentKitFlowStep, context: ConversationContext):
|
||||
"""
|
||||
Execute a single flow step.
|
||||
|
||||
Args:
|
||||
step (dict): The step to execute.
|
||||
|
||||
Returns:
|
||||
Any: The result of the step execution.
|
||||
"""
|
||||
step_type = step["type"]
|
||||
if step_type == "call":
|
||||
return self._execute_call(step, context)
|
||||
elif step_type == "if_else":
|
||||
return self._execute_if_else(step, context)
|
||||
elif step_type == "loop":
|
||||
return self._execute_loop(step)
|
||||
elif step_type == "break_loop":
|
||||
return self._execute_break_loop(step)
|
||||
else:
|
||||
raise ValueError(f"Unsupported step type: {step_type}")
|
||||
|
||||
def _execute_call(self, step: dict, context: ConversationContext):
|
||||
"""
|
||||
Execute a 'call' step.
|
||||
|
||||
Args:
|
||||
step (dict): The call step configuration.
|
||||
|
||||
Returns:
|
||||
Any: The result of the call.
|
||||
"""
|
||||
func_id = step.get("id")
|
||||
config = {k: self._resolve_value(v) for k, v in step.get("config", {}).items()}
|
||||
output_map = step.get("output_map", {})
|
||||
|
||||
# Simulate calling a function (replace with actual function calls if needed)
|
||||
logging.info(f"Calling function {func_id} with config: {config}")
|
||||
result = {key: f"mocked_value_{key}" for key in output_map.keys()} # Mocked output
|
||||
|
||||
# Map outputs to variables
|
||||
for key, var_name in output_map.items():
|
||||
self.variables[var_name] = result.get(key)
|
||||
return result
|
||||
|
||||
def _execute_if_else(self, step: dict, context: ConversationContext):
|
||||
"""
|
||||
Execute an 'if_else' step.
|
||||
|
||||
Args:
|
||||
step (dict): The if_else step configuration.
|
||||
|
||||
Returns:
|
||||
Any: The result of the executed branch.
|
||||
"""
|
||||
condition = step.get("condition")
|
||||
condition_input = {k: self._resolve_value(v) for k, v in step.get("condition_input", {}).items()}
|
||||
|
||||
# Evaluate the condition
|
||||
condition_result = eval(condition, {}, condition_input)
|
||||
|
||||
# Execute the appropriate branch
|
||||
branch = step["true_branch"] if condition_result else step["false_branch"]
|
||||
for sub_step in branch:
|
||||
self._execute_step(sub_step, context)
|
||||
|
||||
def _execute_loop(self, step: dict, context: ConversationContext):
|
||||
"""
|
||||
Execute a 'loop' step.
|
||||
|
||||
Args:
|
||||
step (dict): The loop step configuration.
|
||||
|
||||
Returns:
|
||||
Any: The result of the loop execution.
|
||||
"""
|
||||
loop_num = step.get("loop_num")
|
||||
index_var = step.get("index_var")
|
||||
loop_body = step.get("loop_body", [])
|
||||
|
||||
for i in range(loop_num):
|
||||
if index_var:
|
||||
context.state[index_var] = i
|
||||
|
||||
for sub_step in loop_body:
|
||||
self._execute_step(sub_step, context)
|
||||
|
||||
|
||||
def _execute_break_loop(self):
|
||||
"""
|
||||
Execute a 'break_loop' step.
|
||||
|
||||
Args:
|
||||
step (dict): The break_loop step configuration.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
raise BreakLoopInterrupt()
|
||||
|
||||
|
||||
def execute(self, context: ConversationContext):
|
||||
"""
|
||||
Execute the entire flow.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
try:
|
||||
for step in self.flow:
|
||||
self._execute_step(step)
|
||||
except BreakLoopInterrupt:
|
||||
pass
|
@ -0,0 +1,31 @@
|
||||
from __future__ import annotations
|
||||
from typing import Callable
|
||||
from type_defs.chat_complete_task import ChatCompleteServiceResponse
|
||||
from utils.program import run_listeners
|
||||
|
||||
class ChatCompleteEvent:
|
||||
def __init__(self):
|
||||
self.on_tool_running: list[Callable[[str, str], None]] = []
|
||||
self.on_tool_output: list[Callable[[str], None]] = []
|
||||
self.on_message_output: list[Callable[[str], None]] = []
|
||||
|
||||
async def emit_tool_running(self, tool_name: str, running_state: str = "") -> None:
|
||||
await run_listeners(self.on_tool_running, tool_name, running_state)
|
||||
|
||||
async def emit_tool_output(self, output: str) -> None:
|
||||
await run_listeners(self.on_tool_output, output)
|
||||
|
||||
async def emit_message_output(self, output: str) -> None:
|
||||
await run_listeners(self.on_message_output, output)
|
||||
|
||||
class ChatCompleteTaskEvent(ChatCompleteEvent):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.on_finished: list[Callable[[str | None], None]] = []
|
||||
self.on_error: list[Callable[[str], None]] = []
|
||||
|
||||
async def emit_finished(self, result: ChatCompleteServiceResponse | None) -> None:
|
||||
await run_listeners(self.on_finished, result)
|
||||
|
||||
async def emit_error(self, ex: Exception) -> None:
|
||||
await run_listeners(self.on_error, ex)
|
@ -1,3 +0,0 @@
|
||||
text2vec>=1.2.9
|
||||
--index-url https://download.pytorch.org/whl/cpu
|
||||
torch
|
@ -0,0 +1,25 @@
|
||||
from __future__ import annotations
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
|
||||
class ChatCompleteQuestionTooLongException(Exception):
|
||||
def __init__(self, tokens_limit: int, tokens_current: int):
|
||||
super().__init__(f"Question too long: {tokens_current} > {tokens_limit}")
|
||||
self.tokens_limit = tokens_limit
|
||||
self.tokens_current = tokens_current
|
||||
|
||||
class ChatCompleteServicePrepareResponse(TypedDict):
|
||||
extract_doc: list
|
||||
question_tokens: int
|
||||
conversation_id: int
|
||||
chunk_id: int
|
||||
api_id: str
|
||||
|
||||
class ChatCompleteServiceResponse(TypedDict):
|
||||
message: str
|
||||
message_tokens: int
|
||||
total_tokens: int
|
||||
finish_reason: str
|
||||
question_message_id: str
|
||||
response_message_id: str
|
||||
delta_data: dict
|
@ -0,0 +1,19 @@
|
||||
import asyncio
|
||||
import sys
|
||||
import traceback
|
||||
from typing import Coroutine
|
||||
|
||||
|
||||
async def run_listeners(listeners: list, *args, **kwargs) -> Coroutine[None, None, None]:
|
||||
for listener in listeners:
|
||||
try:
|
||||
res = listener(*args, **kwargs)
|
||||
if asyncio.iscoroutine(res):
|
||||
await res
|
||||
except Exception as ex:
|
||||
print(
|
||||
"Error while processing callback: %s" % ex,
|
||||
file=sys.stderr,
|
||||
)
|
||||
traceback.print_exc()
|
||||
return None
|
Loading…
Reference in New Issue