Primera fase context

This commit is contained in:
Jordan Diaz
2026-04-09 18:27:36 +00:00
parent 993e7d3000
commit 4c73d848bb
8 changed files with 424 additions and 40 deletions

View File

@@ -10,6 +10,7 @@ import uuid
from typing import Any, AsyncIterator
from ...adapters.base import ModelAdapter, ModelConfig, StreamChunk
from ...config import settings
from ...context.engine import ContextEngine
from ...mcp.manager import MCPManager
from ...memory.store import MemoryStore
@@ -202,7 +203,10 @@ class BaseAgent:
conversation.append({
"role": "tool",
"tool_call_id": tc["id"],
"content": f"[DUPLICADO] Ya ejecutada con mismos argumentos. Resultado: {prev_exec.raw_output[:2000]}",
"content": (
"[DUPLICADO] Ya ejecutada con mismos argumentos. Resultado: "
f"{prev_exec.raw_output[:settings.tool_raw_output_max_chars]}"
),
})
logger.warning("Duplicate tool call skipped: %s (fingerprint: %s)", tc["name"], fp[:8])
continue
@@ -221,7 +225,11 @@ class BaseAgent:
conversation.append({
"role": "tool",
"tool_call_id": tc["id"],
"content": tool_exec.raw_output[:8000] if tool_exec.raw_output else tool_exec.result_summary,
"content": (
tool_exec.raw_output[:settings.tool_raw_output_max_chars]
if tool_exec.raw_output
else tool_exec.result_summary
),
})
# Loop detection: if ALL tool calls in this step were duplicates
@@ -304,7 +312,7 @@ class BaseAgent:
tool_exec.status = ToolExecutionStatus.COMPLETED
tool_exec.result_summary = artifact.summary
tool_exec.raw_output = raw_output[:8000]
tool_exec.raw_output = raw_output[:settings.tool_raw_output_max_chars]
tool_exec.duration_ms = duration
await self.sse.emit(
@@ -313,7 +321,7 @@ class BaseAgent:
"tool": tool_name,
"status": "completed",
"summary": artifact.summary[:200],
"raw_output": raw_output[:4000],
"raw_output": raw_output[:min(4000, settings.tool_raw_output_max_chars)],
"tool_call_id": tool_call_id,
},
session_id=session.session_id,

View File

@@ -13,6 +13,7 @@ from typing import Any
from ..adapters.base import ModelAdapter
from ..config import settings
from ..context.engine import ContextEngine
from ..context.compactor import estimate_tokens
from ..mcp.manager import MCPManager
from ..memory.store import MemoryStore
from ..models.agent import AgentProfile
@@ -132,21 +133,19 @@ class OrchestratorEngine:
usage = result.get("usage", {"input_tokens": 0, "output_tokens": 0})
key_data = self._extract_key_data_from_results([result])
session.task_history.append({
"task_id": task.task_id,
"objective": message,
"agent_id": session.agent_id,
"status": "completed",
"steps": 1,
"facts": task.facts_extracted[-10:],
"key_data": key_data,
"tools_used": [te.tool_name for te in result.get("tool_executions", [])],
"artifacts_count": len(result.get("artifacts", [])),
"summary": f"User: {message[:150]} → Agent: {content[:150]}",
"review": "",
})
if len(session.task_history) > 20:
session.task_history = session.task_history[-20:]
session.task_history.append(
self._build_task_history_entry(
task_id=task.task_id,
message=message,
content=content,
agent_id=session.agent_id,
facts=task.facts_extracted,
key_data=key_data,
tool_executions=result.get("tool_executions", []),
artifacts_count=len(result.get("artifacts", [])),
)
)
session.task_history = self._trim_task_history(session.task_history)
# Clean old artifacts
artifacts = await self.memory.list_artifacts(session.session_id)
@@ -252,3 +251,70 @@ class OrchestratorEngine:
if seen_modules:
key_data["modules"] = seen_modules[:20]
return key_data
@staticmethod
def _build_task_history_entry(
task_id: str,
message: str,
content: str,
agent_id: str,
facts: list[str],
key_data: dict[str, Any],
tool_executions: list[Any],
artifacts_count: int,
) -> dict[str, Any]:
message_summary = " ".join(message.strip().split())[:120]
content_summary = " ".join(content.strip().split())[:160]
if content_summary:
summary = f"User: {message_summary} → Agent: {content_summary}"
else:
summary = f"User: {message_summary}"
tools_used: list[str] = []
for tool_exec in tool_executions:
tool_name = getattr(tool_exec, "tool_name", "")
if tool_name and tool_name not in tools_used:
tools_used.append(tool_name)
return {
"task_id": task_id,
"objective": message[:200],
"agent_id": agent_id,
"status": "completed",
"steps": 1,
"facts": facts[-5:],
"key_data": key_data,
"tools_used": tools_used[:8],
"artifacts_count": artifacts_count,
"summary": summary,
"review": "",
}
@staticmethod
def _trim_task_history(history: list[dict[str, Any]]) -> list[dict[str, Any]]:
if not history:
return []
trimmed = history[-settings.task_history_max_entries:]
kept: list[dict[str, Any]] = []
total_tokens = 0
for entry in reversed(trimmed):
entry_tokens = OrchestratorEngine._estimate_task_history_entry_tokens(entry)
if kept and total_tokens + entry_tokens > settings.task_history_max_tokens:
break
kept.append(entry)
total_tokens += entry_tokens
return list(reversed(kept))
@staticmethod
def _estimate_task_history_entry_tokens(entry: dict[str, Any]) -> int:
parts = [
entry.get("objective", ""),
entry.get("summary", ""),
" ".join(entry.get("facts", [])[:5]),
" ".join(entry.get("tools_used", [])[:5]),
str(entry.get("key_data", {})),
]
return estimate_tokens("\n".join(p for p in parts if p))