Coverage for src / tracekit / quality / explainer.py: 17%
89 statements
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-11 23:04 +0000
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-11 23:04 +0000
1"""Result explainability for analysis outputs.
3Generates human-readable explanations for why analysis results
4are reliable or unreliable.
6Example:
7 >>> from tracekit.quality.explainer import explain_result
8 >>> from tracekit.quality.scoring import calculate_quality_score
9 >>> score = calculate_quality_score(0.9, 0.8, 0.85)
10 >>> explanation = explain_result("frequency", 10.5e6, score, "fft")
11 >>> print(explanation)
13References:
14 - Result explainability for analysis outputs
15"""
17from __future__ import annotations
19from dataclasses import dataclass, field
20from typing import TYPE_CHECKING, Any, ClassVar
22if TYPE_CHECKING:
23 from tracekit.quality.scoring import AnalysisQualityScore
26@dataclass
27class ResultExplanation:
28 """Detailed explanation for an analysis result.
30 Attributes:
31 result_name: Name of the result
32 result_value: Value of the result
33 quality: Quality score for the result
34 why_reliable: Reasons why result is reliable
35 why_unreliable: Reasons why result may be unreliable
36 recommendations: Actionable recommendations
37 alternative_methods: Alternative analysis methods
39 Example:
40 >>> explanation = ResultExplanation(
41 ... result_name="frequency",
42 ... result_value=10.5e6,
43 ... quality=score,
44 ... )
45 >>> print(explanation.to_narrative())
46 """
48 result_name: str
49 result_value: Any
50 quality: AnalysisQualityScore
52 # Detailed reasoning
53 why_reliable: list[str] = field(default_factory=list)
54 why_unreliable: list[str] = field(default_factory=list)
55 recommendations: list[str] = field(default_factory=list)
56 alternative_methods: list[str] = field(default_factory=list)
58 def to_narrative(self) -> str:
59 """Generate narrative explanation.
61 Returns:
62 Human-readable narrative explanation string
64 Example:
65 >>> narrative = explanation.to_narrative()
66 >>> print(narrative)
67 Result: frequency = 1.050e+07
68 Confidence: 85.0% (high)
70 Why this result is reliable:
71 ✓ FFT is mathematically exact for periodic signals
72 ✓ High quality input data
73 """
74 lines = []
76 # Header with result
77 lines.append(f"Result: {self.result_name} = {self._format_value(self.result_value)}")
78 lines.append(f"Confidence: {self.quality.confidence:.1%} ({self.quality.category.value})")
79 lines.append("")
81 # Why reliable
82 if self.why_reliable:
83 lines.append("Why this result is reliable:")
84 for reason in self.why_reliable:
85 lines.append(f" ✓ {reason}")
86 lines.append("")
88 # Why unreliable
89 if self.why_unreliable:
90 lines.append("Concerns affecting reliability:")
91 for reason in self.why_unreliable:
92 lines.append(f" ⚠ {reason}")
93 lines.append("")
95 # Recommendations
96 if self.recommendations:
97 lines.append("Recommendations:")
98 for rec in self.recommendations:
99 lines.append(f" → {rec}")
100 lines.append("")
102 # Alternatives
103 if self.alternative_methods:
104 lines.append("Alternative approaches:")
105 for alt in self.alternative_methods:
106 lines.append(f" • {alt}")
108 return "\n".join(lines)
110 def _format_value(self, value: Any) -> str:
111 """Format result value for display.
113 Args:
114 value: Value to format
116 Returns:
117 Formatted string representation
118 """
119 if isinstance(value, float):
120 if abs(value) < 0.001 or abs(value) > 10000:
121 return f"{value:.3e}"
122 return f"{value:.4f}"
123 return str(value)
126class ResultExplainer:
127 """Generate explanations for analysis results.
129 This class provides method-specific explanations for common
130 analysis techniques used in signal processing.
132 Example:
133 >>> explainer = ResultExplainer()
134 >>> explanation = explainer.explain(
135 ... result_name="frequency",
136 ... result_value=10.5e6,
137 ... quality=score,
138 ... method_name="fft",
139 ... )
140 >>> print(explanation.to_narrative())
141 """
143 # Method-specific explanation templates
144 METHOD_EXPLANATIONS: ClassVar[dict[str, Any]] = {
145 "fft": {
146 "reliable": [
147 "FFT is mathematically exact for periodic signals",
148 "Sufficient frequency resolution for analysis",
149 ],
150 "concerns": {
151 "low_snr": "Low SNR may cause spurious frequency peaks",
152 "short_duration": "Short capture limits frequency resolution",
153 "windowing": "Spectral leakage may affect amplitude accuracy",
154 },
155 "alternatives": ["Welch method for noisy signals", "Zero-padding for interpolation"],
156 },
157 "edge_detection": {
158 "reliable": [
159 "Multiple consistent edges detected",
160 "Clear transition between logic levels",
161 ],
162 "concerns": {
163 "noise": "Noise may cause false edge detections",
164 "ringing": "Signal ringing may create multiple edge crossings",
165 },
166 "alternatives": ["Interpolated edge timing", "Histogram-based threshold"],
167 },
168 "statistics": {
169 "reliable": [
170 "Statistical methods are mathematically well-defined",
171 "Large sample size provides reliable estimates",
172 ],
173 "concerns": {
174 "outliers": "Outliers may skew mean and variance",
175 "non_normal": "Non-normal distribution affects some statistics",
176 },
177 "alternatives": ["Robust statistics (median, MAD)", "Trimmed mean"],
178 },
179 }
181 def explain(
182 self,
183 result_name: str,
184 result_value: Any,
185 quality: AnalysisQualityScore,
186 method_name: str | None = None,
187 ) -> ResultExplanation:
188 """Generate explanation for an analysis result.
190 Args:
191 result_name: Name of the result
192 result_value: The result value
193 quality: Quality score for the result
194 method_name: Optional method name for specific explanations
196 Returns:
197 ResultExplanation with detailed reasoning
199 Example:
200 >>> explanation = explainer.explain(
201 ... "frequency",
202 ... 10.5e6,
203 ... score,
204 ... "fft"
205 ... )
206 >>> print(explanation.to_narrative())
207 """
208 explanation = ResultExplanation(
209 result_name=result_name,
210 result_value=result_value,
211 quality=quality,
212 )
214 # Get method-specific templates
215 method_key = self._get_method_key(method_name or result_name)
216 templates = self.METHOD_EXPLANATIONS.get(method_key, {})
218 # Generate reliability reasons
219 if quality.is_reliable:
220 explanation.why_reliable = self._generate_reliable_reasons(quality, templates)
222 explanation.why_unreliable = self._generate_unreliable_reasons(quality, templates)
223 explanation.recommendations = quality.get_recommendations()
224 explanation.alternative_methods = list(templates.get("alternatives", []))
226 return explanation
228 def _get_method_key(self, name: str) -> str:
229 """Extract method key from name.
231 Args:
232 name: Method or result name
234 Returns:
235 Standardized method key
236 """
237 name_lower = name.lower()
238 if "fft" in name_lower or "spectral" in name_lower:
239 return "fft"
240 if "edge" in name_lower:
241 return "edge_detection"
242 if "stat" in name_lower or "mean" in name_lower or "std" in name_lower:
243 return "statistics"
244 return "generic"
246 def _generate_reliable_reasons(
247 self,
248 quality: AnalysisQualityScore,
249 templates: dict[str, Any],
250 ) -> list[str]:
251 """Generate reasons why result is reliable.
253 Args:
254 quality: Quality score
255 templates: Method-specific explanation templates
257 Returns:
258 List of reasons
259 """
260 reasons = []
262 # Add template reasons
263 reasons.extend(templates.get("reliable", []))
265 # Add factor-based reasons
266 if quality.data_quality_factor >= 0.8:
267 reasons.append("High quality input data")
268 if quality.sample_sufficiency >= 0.9:
269 reasons.append("Sufficient sample count for analysis")
270 if quality.method_reliability >= 0.85:
271 reasons.append("Analysis method has high inherent reliability")
273 return reasons[:4] # Limit to top 4
275 def _generate_unreliable_reasons(
276 self,
277 quality: AnalysisQualityScore,
278 templates: dict[str, Any],
279 ) -> list[str]:
280 """Generate reasons why result may be unreliable.
282 Args:
283 quality: Quality score
284 templates: Method-specific explanation templates
286 Returns:
287 List of concerns
288 """
289 reasons = []
291 # Add warnings
292 reasons.extend(quality.warnings)
294 # Add factor-based concerns
295 if quality.data_quality_factor < 0.5:
296 reasons.append("Input data quality is poor")
297 if quality.sample_sufficiency < 0.5:
298 reasons.append("Insufficient samples for reliable estimate")
299 if quality.method_reliability < 0.6:
300 reasons.append("Analysis method has lower reliability")
302 return reasons
305def explain_result(
306 result_name: str,
307 result_value: Any,
308 quality: AnalysisQualityScore,
309 method_name: str | None = None,
310) -> str:
311 """Convenience function to get result explanation as text.
313 Args:
314 result_name: Name of the result
315 result_value: The result value
316 quality: Quality score
317 method_name: Optional method name
319 Returns:
320 Human-readable explanation string
322 Example:
323 >>> explanation = explain_result("frequency", 10.5e6, score, "fft")
324 >>> print(explanation)
325 Result: frequency = 1.050e+07
326 Confidence: 85.0% (high)
327 ...
328 """
329 explainer = ResultExplainer()
330 explanation = explainer.explain(result_name, result_value, quality, method_name)
331 return explanation.to_narrative()
334__all__ = [
335 "ResultExplainer",
336 "ResultExplanation",
337 "explain_result",
338]