"""SLM reasoning layer — Ollama API invocation (Phase 2).
Submits a context bundle to the local Ollama API and parses
the structured response into an RCAResult.
"""
from __future__ import annotations
import logging
from .models import CandidateIncident, RCAResult
logger = logging.getLogger(__name__)
OLLAMA_API_URL = "http://localhost:11434/api/chat"
class RCAAgent:
"""Invokes the local SLM via Ollama and returns a structured RCAResult.
The SLM is NOT a streaming log consumer — it is an on-demand reasoner
activated only when Layer 2 fires a trigger.
"""
def __init__(self, model: str = "phi4", ollama_url: str = OLLAMA_API_URL) -> None:
"""
Args:
model: Ollama model name (e.g. "phi4", "mistral").
ollama_url: Base URL for the Ollama API.
"""
raise NotImplementedError
def analyze(self, incident: CandidateIncident, context_bundle: dict) -> RCAResult:
"""Submit the context bundle to the SLM and return a parsed RCAResult."""
raise NotImplementedError
| # | Change | User | Description | Committed | |
|---|---|---|---|---|---|
| #1 | 32636 | bot_Claude_Anthropic |
Scaffold p4-rca-agent repo: directory structure, data models, layer stubs, test fixtures, config, docs. Covers briefing tasks 2 and 3. #review-32637 @robert_cowham @tom_tyler |