test_engine_contract_models.py 3.13 KB
from services.shared.models import EngineContext, EngineExecutionError, EngineResult


def test_engine_result_from_raw_normalizes_success_payload():
    result = EngineResult.from_raw(
        engine_name="query",
        payload={
            "success": True,
            "message": "query completed",
            "artifacts": {"report": "query.md"},
            "metrics": {"duration_seconds": 1.25},
        },
    )

    payload = result.to_runtime_payload()
    assert payload["engine_name"] == "query"
    assert payload["status"] == "completed"
    assert payload["success"] is True
    assert payload["summary"] == "query completed"
    assert payload["artifacts"]["report"] == "query.md"
    assert payload["metrics"]["duration_seconds"] == 1.25


def test_engine_result_from_raw_normalizes_error_payload():
    result = EngineResult.from_raw(
        engine_name="insight",
        payload={
            "success": False,
            "status": "failed",
            "error": {
                "code": "engine_timeout",
                "message": "engine timed out",
                "retryable": True,
                "details": {"timeout_seconds": 30},
            },
        },
    )

    payload = result.to_runtime_payload()
    assert payload["engine_name"] == "insight"
    assert payload["status"] == "failed"
    assert payload["success"] is False
    assert payload["summary"] == "engine timed out"
    assert payload["error"]["code"] == "engine_timeout"
    assert payload["error"]["retryable"] is True
    assert payload["error"]["details"]["timeout_seconds"] == 30


def test_engine_result_from_raw_infers_artifacts_metrics_and_string_error():
    result = EngineResult.from_raw(
        engine_name="insight",
        payload={
            "success": False,
            "message": "analysis failed",
            "error": "timeout while calling provider",
            "themes": ["service", "queueing"],
            "results": [{"title": "sample"}],
            "success_count": 1,
            "failed_count": 2,
        },
    )

    payload = result.to_runtime_payload()
    assert payload["status"] == "failed"
    assert payload["summary"] == "analysis failed"
    assert payload["artifacts"]["themes"] == ["service", "queueing"]
    assert payload["artifacts"]["results"] == [{"title": "sample"}]
    assert payload["metrics"]["success_count"] == 1
    assert payload["metrics"]["failed_count"] == 2
    assert payload["error"]["code"] == "engine_execution_failed"
    assert payload["error"]["message"] == "timeout while calling provider"


def test_engine_context_and_error_models_keep_minimal_contract():
    context = EngineContext(
        engine_name="media",
        research_task_id="task-123",
        query="museum review",
        trace_id="trace-1",
        metadata={"locale": "zh-CN"},
    )
    error = EngineExecutionError(
        code="upstream_error",
        message="upstream failed",
        retryable=False,
        details={"provider": "mock"},
    )

    assert context.engine_name == "media"
    assert context.research_task_id == "task-123"
    assert context.metadata["locale"] == "zh-CN"
    assert error.to_error_info().message == "upstream failed"