Coverage for session_buddy / tools / crackerjack_tools.py: 47.18%

561 statements  

« prev     ^ index     » next       coverage.py v7.13.1, created at 2026-01-04 00:43 -0800

1#!/usr/bin/env python3 

2"""Crackerjack integration tools for session-mgmt-mcp. 

3 

4Following crackerjack architecture patterns for quality monitoring, 

5code analysis, and development workflow integration. 

6""" 

7 

8from __future__ import annotations 

9 

10import logging 

11import operator 

12import typing as t 

13from pathlib import Path 

14from typing import TYPE_CHECKING, Any 

15 

16from session_buddy.utils.instance_managers import ( 

17 get_reflection_database as resolve_reflection_database, 

18) 

19from session_buddy.utils.logging import get_session_logger 

20 

21logger = logging.getLogger(__name__) 

22 

23if TYPE_CHECKING: 

24 from session_buddy.crackerjack_integration import CrackerjackResult 

25 

26 

27# Extracted tool functions to reduce register_crackerjack_tools complexity 

28async def execute_crackerjack_command( 

29 command: str, 

30 args: str = "", 

31 working_directory: str = ".", 

32 timeout: int = 300, 

33 ai_agent_mode: bool = False, 

34) -> str: 

35 """Execute a Crackerjack command with enhanced AI integration. 

36 

37 Args: 

38 command: Semantic command name (test, lint, check, format, security, all) 

39 args: Additional arguments (NOT including --ai-fix) 

40 working_directory: Working directory 

41 timeout: Timeout in seconds 

42 ai_agent_mode: Enable AI-powered auto-fix (replaces --ai-fix flag) 

43 

44 Examples: 

45 # ✅ Correct usage 

46 execute_crackerjack_command(command="test", ai_agent_mode=True) 

47 execute_crackerjack_command(command="check", args="--verbose", ai_agent_mode=True) 

48 

49 # ❌ Wrong usage 

50 execute_crackerjack_command(command="--ai-fix -t") # Will raise error! 

51 

52 Returns: 

53 Formatted execution results with validation errors if invalid input 

54 

55 """ 

56 # Validate command parameter 

57 valid_commands = { 

58 "test", 

59 "lint", 

60 "check", 

61 "format", 

62 "typecheck", 

63 "security", 

64 "complexity", 

65 "analyze", 

66 "build", 

67 "clean", 

68 "all", 

69 "run", 

70 } 

71 

72 if command.startswith("--"): 

73 return ( 

74 f"❌ **Invalid Command**: {command!r}\n\n" 

75 f"**Error**: Commands should be semantic names, not flags.\n\n" 

76 f"**Valid commands**: {', '.join(sorted(valid_commands))}\n\n" 

77 f"**Correct usage**:\n" 

78 f"```python\n" 

79 f"execute_crackerjack_command(command='test', ai_agent_mode=True)\n" 

80 f"```\n\n" 

81 f"**Not**:\n" 

82 f"```python\n" 

83 f"execute_crackerjack_command(command='--ai-fix -t') # Wrong!\n" 

84 f"```" 

85 ) 

86 

87 if command not in valid_commands: 

88 suggested = _suggest_command(command, valid_commands) 

89 return ( 

90 f"❌ **Unknown Command**: {command!r}\n\n" 

91 f"**Valid commands**: {', '.join(sorted(valid_commands))}\n\n" 

92 f"**Did you mean**: `{suggested}`" 

93 ) 

94 

95 # Check for --ai-fix in args 

96 if "--ai-fix" in args: 

97 return ( 

98 "❌ **Invalid Args**: Found '--ai-fix' in args parameter\n\n" 

99 "**Use instead**: Set `ai_agent_mode=True` parameter\n\n" 

100 "**Correct**:\n" 

101 "```python\n" 

102 f"execute_crackerjack_command(command='{command}', ai_agent_mode=True)\n" 

103 "```" 

104 ) 

105 

106 # Proceed with validated inputs 

107 return await _execute_crackerjack_command_impl( 

108 command, 

109 args, 

110 working_directory, 

111 timeout, 

112 ai_agent_mode, 

113 ) 

114 

115 

116async def crackerjack_run( 

117 command: str, 

118 args: str = "", 

119 working_directory: str = ".", 

120 timeout: int = 300, 

121 ai_agent_mode: bool = False, 

122) -> str: 

123 """Run crackerjack with enhanced analytics. 

124 

125 Args: 

126 command: Semantic command name (test, lint, check, format, security, all) 

127 args: Additional arguments (NOT including --ai-fix) 

128 working_directory: Working directory 

129 timeout: Timeout in seconds 

130 ai_agent_mode: Enable AI-powered auto-fix (replaces --ai-fix flag) 

131 

132 Examples: 

133 # ✅ Correct usage 

134 crackerjack_run(command="test", ai_agent_mode=True) 

135 crackerjack_run(command="check", args="--verbose", ai_agent_mode=True) 

136 

137 # ❌ Wrong usage 

138 crackerjack_run(command="--ai-fix -t") # Will raise error! 

139 

140 Returns: 

141 Formatted execution results with validation errors if invalid input 

142 

143 """ 

144 # Validate command parameter 

145 valid_commands = { 

146 "test", 

147 "lint", 

148 "check", 

149 "format", 

150 "security", 

151 "complexity", 

152 "all", 

153 } 

154 

155 if command.startswith("--"): 155 ↛ 156line 155 didn't jump to line 156 because the condition on line 155 was never true

156 return ( 

157 f"❌ **Invalid Command**: {command!r}\n\n" 

158 f"**Error**: Commands should be semantic names, not flags.\n\n" 

159 f"**Valid commands**: {', '.join(sorted(valid_commands))}\n\n" 

160 f"**Correct usage**:\n" 

161 f"```python\n" 

162 f"crackerjack_run(command='test', ai_agent_mode=True)\n" 

163 f"```\n\n" 

164 f"**Not**:\n" 

165 f"```python\n" 

166 f"crackerjack_run(command='--ai-fix -t') # Wrong!\n" 

167 f"```" 

168 ) 

169 

170 if command not in valid_commands: 170 ↛ 171line 170 didn't jump to line 171 because the condition on line 170 was never true

171 suggested = _suggest_command(command, valid_commands) 

172 return ( 

173 f"❌ **Unknown Command**: {command!r}\n\n" 

174 f"**Valid commands**: {', '.join(sorted(valid_commands))}\n\n" 

175 f"**Did you mean**: `{suggested}`" 

176 ) 

177 

178 # Check for --ai-fix in args 

179 if "--ai-fix" in args: 179 ↛ 180line 179 didn't jump to line 180 because the condition on line 179 was never true

180 return ( 

181 "❌ **Invalid Args**: Found '--ai-fix' in args parameter\n\n" 

182 "**Use instead**: Set `ai_agent_mode=True` parameter\n\n" 

183 "**Correct**:\n" 

184 "```python\n" 

185 f"crackerjack_run(command='{command}', ai_agent_mode=True)\n" 

186 "```" 

187 ) 

188 

189 # Proceed with validated inputs 

190 return await _crackerjack_run_impl( 

191 command, 

192 args, 

193 working_directory, 

194 timeout, 

195 ai_agent_mode, 

196 ) 

197 

198 

199async def crackerjack_history( 

200 command_filter: str = "", 

201 days: int = 7, 

202 working_directory: str = ".", 

203) -> str: 

204 """View crackerjack execution history with trends and patterns.""" 

205 return await _crackerjack_history_impl(command_filter, days, working_directory) 

206 

207 

208async def crackerjack_metrics(working_directory: str = ".", days: int = 30) -> str: 

209 """Get quality metrics trends from crackerjack execution history.""" 

210 return await _crackerjack_metrics_impl(working_directory, days) 

211 

212 

213async def crackerjack_patterns(days: int = 7, working_directory: str = ".") -> str: 

214 """Analyze test failure patterns and trends.""" 

215 return await _crackerjack_patterns_impl(days, working_directory) 

216 

217 

218async def crackerjack_help() -> str: 

219 """Get comprehensive help for choosing the right crackerjack commands.""" 

220 return await _crackerjack_help_impl() 

221 

222 

223async def get_crackerjack_results_history( 

224 command_filter: str = "", 

225 days: int = 7, 

226 working_directory: str = ".", 

227) -> str: 

228 """Get recent Crackerjack command execution history.""" 

229 return await _crackerjack_history_impl(command_filter, days, working_directory) 

230 

231 

232async def get_crackerjack_quality_metrics( 

233 days: int = 30, 

234 working_directory: str = ".", 

235) -> str: 

236 """Get quality metrics trends from Crackerjack execution history.""" 

237 return await _crackerjack_metrics_impl(working_directory, days) 

238 

239 

240async def analyze_crackerjack_test_patterns( 

241 days: int = 7, 

242 working_directory: str = ".", 

243) -> str: 

244 """Analyze test failure patterns and trends for debugging insights.""" 

245 return await _crackerjack_patterns_impl(days, working_directory) 

246 

247 

248async def crackerjack_quality_trends( 

249 days: int = 30, 

250 working_directory: str = ".", 

251) -> str: 

252 """Analyze quality trends over time with actionable insights.""" 

253 return await _crackerjack_quality_trends_impl(days, working_directory) 

254 

255 

256async def crackerjack_health_check() -> str: 

257 """Check Crackerjack integration health and provide diagnostics.""" 

258 return await _crackerjack_health_check_impl() 

259 

260 

261# Alias for backward compatibility 

262async def quality_monitor() -> str: 

263 """Phase 3: Proactive quality monitoring with early warning system.""" 

264 return await _crackerjack_health_check_impl() 

265 

266 

267def _get_logger() -> t.Any: 

268 """Lazy logger resolution using the session logger.""" 

269 try: 

270 return get_session_logger() 

271 except Exception: 

272 return logger 

273 

274 

275async def _get_reflection_db() -> Any | None: 

276 """Resolve reflection database via DI helper.""" 

277 db = await resolve_reflection_database() 

278 if db is None: 

279 _get_logger().warning( 

280 "Reflection database not available for crackerjack operations.", 

281 ) 

282 return db 

283 

284 

285def _format_execution_status(result: CrackerjackResult) -> str: 

286 """Format execution status for output.""" 

287 # Determine status based on exit code 

288 has_failures = result.exit_code != 0 

289 

290 # Parse hook results from output to provide detailed status 

291 passed_hooks, failed_hooks = _parse_crackerjack_output(result.stdout) 

292 

293 if has_failures: 

294 status = f"❌ **Status**: Failed (exit code: {result.exit_code})\n" 

295 

296 if failed_hooks: 296 ↛ 297line 296 didn't jump to line 297 because the condition on line 296 was never true

297 status += f"**Failed Hooks**: {', '.join(failed_hooks)}\n" 

298 

299 # Include stderr if there are failures and it contains relevant information 

300 if result.stderr and "error" in result.stderr.lower(): 300 ↛ 303line 300 didn't jump to line 303 because the condition on line 300 was always true

301 status += f"**Error Details**:\n```\n{result.stderr[:500]}...\n```\n" 

302 

303 return status 

304 

305 if passed_hooks: 

306 return f"✅ **Status**: Success ({len(passed_hooks)} hooks passed)\n" 

307 

308 return "✅ **Status**: Success\n" 

309 

310 

311def _parse_crackerjack_output(output: str) -> tuple[list[str], list[str]]: 

312 """Parse crackerjack output to extract passed and failed hooks.""" 

313 from .hook_parser import ParseError, parse_hook_output 

314 

315 try: 

316 return _parse_with_structured_results(output) 

317 except ParseError: 

318 return _parse_with_line_scanner(output) 

319 

320 

321def _parse_with_structured_results(output: str) -> tuple[list[str], list[str]]: 

322 """Parse Crackerjack results using the structured hook parser.""" 

323 from .hook_parser import parse_hook_output 

324 

325 passed_hooks: list[str] = [] 

326 failed_hooks: list[str] = [] 

327 

328 results = parse_hook_output(output) 

329 for result in results: 

330 (passed_hooks if result.passed else failed_hooks).append(result.hook_name) 

331 return passed_hooks, failed_hooks 

332 

333 

334def _parse_with_line_scanner(output: str) -> tuple[list[str], list[str]]: 

335 """Fallback parser that scans output line-by-line.""" 

336 passed_hooks: list[str] = [] 

337 failed_hooks: list[str] = [] 

338 

339 for line in output.split("\n"): 

340 if not _should_parse_line(line): 340 ↛ 343line 340 didn't jump to line 343 because the condition on line 340 was always true

341 continue 

342 

343 hook_name = _extract_hook_name(line) 

344 if hook_name: 

345 _categorize_hook(hook_name, line, passed_hooks, failed_hooks) 

346 

347 return passed_hooks, failed_hooks 

348 

349 

350def _should_parse_line(line: str) -> bool: 

351 """Check if a line should be parsed for hook results.""" 

352 return "..." in line and ( 

353 "✅" in line or "❌" in line or "Passed" in line or "Failed" in line 

354 ) 

355 

356 

357def _extract_hook_name(line: str) -> str | None: 

358 """Extract hook name from a line.""" 

359 parts = line.split("...") 

360 if parts: 

361 hook_name = parts[0].strip() 

362 return hook_name if hook_name and not hook_name.startswith("-") else None 

363 return None 

364 

365 

366def _categorize_hook( 

367 hook_name: str, 

368 line: str, 

369 passed_hooks: list[str], 

370 failed_hooks: list[str], 

371) -> None: 

372 """Categorize hook as passed or failed based on the line content.""" 

373 if "❌" in line or "Failed" in line: 

374 failed_hooks.append(hook_name) 

375 elif "✅" in line or "Passed" in line: 

376 passed_hooks.append(hook_name) 

377 

378 

379def _parse_hook_results_table(output: str) -> str: 

380 """Parse and extract detailed hook results tables from output.""" 

381 lines = output.split("\n") 

382 results = [] 

383 in_section = False 

384 

385 for line in lines: 

386 if _is_results_section_header(line): 

387 in_section = True 

388 results.append(line) 

389 continue 

390 

391 if not in_section: 

392 continue 

393 

394 if _is_new_section_start(line): 

395 break 

396 if _should_add_to_results(line): 

397 results.append(line) 

398 else: 

399 break 

400 

401 return "\n".join(results) if results else "" 

402 

403 

404def _is_results_section_header(line: str) -> bool: 

405 """Determine whether the line marks the start of a results section.""" 

406 return "Fast Hook Results:" in line or "Comprehensive Hook Results:" in line 

407 

408 

409def _parse_hook_stage_results(output: str) -> str: 

410 """Parse and extract hook results for all stages.""" 

411 lines = output.split("\n") 

412 all_stage_results = [] 

413 

414 # Look for both fast and comprehensive results sections 

415 i = 0 

416 while i < len(lines): 

417 line = lines[i] 

418 if "Fast Hook Results:" in line or "Comprehensive Hook Results:" in line: 418 ↛ 419line 418 didn't jump to line 419 because the condition on line 418 was never true

419 stage_results = _extract_single_stage_results(lines, i) 

420 all_stage_results.extend(stage_results) 

421 # Skip past the extracted results to avoid reprocessing 

422 i += len( 

423 stage_results, 

424 ) # This might not be accurate, so let's continue normally 

425 i += 1 

426 

427 return "\n".join(all_stage_results) if all_stage_results else "" 

428 

429 

430def _extract_single_stage_results(lines: list[str], start_index: int) -> list[str]: 

431 """Extract results for a single stage starting from start_index.""" 

432 stage_results = [lines[start_index]] 

433 j = start_index + 1 

434 

435 while j < len(lines): 

436 if _should_add_to_results(lines[j]): 

437 stage_results.append(lines[j]) 

438 j += 1 

439 elif _is_new_section_start(lines[j]): 

440 # Next major section starts, stop collecting 

441 break 

442 else: 

443 # Not part of results table, stop collecting 

444 break 

445 

446 return stage_results 

447 

448 

449def _should_add_to_results(line: str) -> bool: 

450 """Check if line should be added to results.""" 

451 next_line = line.strip() 

452 return next_line == "" or "::" in line or _is_separator_line(next_line) 

453 

454 

455def _is_separator_line(line: str) -> bool: 

456 """Check if line is a separator line (contains only dashes and spaces).""" 

457 return ( 

458 "-" in line and len(line.strip()) > 10 and all(c in "- " for c in line.strip()) 

459 ) 

460 

461 

462def _is_new_section_start(line: str) -> bool: 

463 """Check if line indicates a new section start.""" 

464 return "⏳ Started:" in line or "Workflow" in line or "Building" in line 

465 

466 

467def _format_output_sections(result: CrackerjackResult) -> str: 

468 """Format stdout and stderr sections.""" 

469 output = "" 

470 if result.stdout.strip(): 

471 output += f"\n**Output**:\n```\n{result.stdout}\n```\n" 

472 if result.stderr.strip(): 

473 output += f"\n**Errors**:\n```\n{result.stderr}\n```\n" 

474 return output 

475 

476 

477def _format_metrics_section(result: CrackerjackResult) -> str: 

478 """Format metrics and insights sections.""" 

479 output = "\n📊 **Metrics**:\n" 

480 output += f"- Execution time: {result.execution_time:.2f}s\n" 

481 output += f"- Exit code: {result.exit_code}\n" 

482 

483 if result.quality_metrics: 

484 output += "\n📈 **Quality Metrics**:\n" 

485 for metric, value in result.quality_metrics.items(): 

486 output += f"- {metric.replace('_', ' ').title()}: {value:.1f}\n" 

487 

488 if result.memory_insights: 

489 output += "\n🧠 **Insights**:\n" 

490 for insight in result.memory_insights[:5]: # Limit to top 5 

491 output += f"- {insight}\n" 

492 

493 return output 

494 

495 

496# Implementation functions (extracted from registration function) 

497async def _execute_crackerjack_command_impl( 

498 command: str, 

499 args: str = "", 

500 working_directory: str = ".", 

501 timeout: int = 300, 

502 ai_agent_mode: bool = False, 

503) -> str: 

504 """Execute a Crackerjack command with enhanced AI integration.""" 

505 try: 

506 from session_buddy.crackerjack_integration import CrackerjackIntegration 

507 

508 integration = CrackerjackIntegration() 

509 result = await integration.execute_crackerjack_command( 

510 command, 

511 args.split() if args else None, 

512 working_directory, 

513 timeout, 

514 ai_agent_mode, 

515 ) 

516 

517 # Format response 

518 output = f"🔧 **Crackerjack {command}** executed\n\n" 

519 output += _format_execution_status(result) 

520 output += _format_output_sections(result) 

521 output += _format_metrics_section(result) 

522 

523 return output 

524 

525 except ImportError: 

526 _get_logger().warning("Crackerjack integration not available") 

527 return "❌ Crackerjack integration not available. Install crackerjack package" 

528 except Exception as e: 

529 _get_logger().exception(f"Crackerjack execution failed: {e}") 

530 return f"❌ Crackerjack execution failed: {e!s}" 

531 

532 

533def _format_basic_result(result: Any, command: str) -> str: 

534 """Format basic execution result with status and output.""" 

535 formatted = f"🔧 **Crackerjack {command}** executed\n\n" 

536 

537 # Parse hook results from output to provide detailed status 

538 passed_hooks, failed_hooks = _parse_crackerjack_output(result.stdout) 

539 

540 # Also extract detailed results tables if available 

541 hook_results_tables = _parse_hook_stage_results(result.stdout) 

542 

543 # Determine status based on exit code 

544 has_failures = result.exit_code != 0 

545 

546 if has_failures: 546 ↛ 547line 546 didn't jump to line 547 because the condition on line 546 was never true

547 formatted += f"❌ **Status**: Failed (exit code: {result.exit_code})\n" 

548 if failed_hooks: 

549 formatted += f"**Failed Hooks**: {', '.join(failed_hooks)}\n" 

550 if passed_hooks: 

551 formatted += f"**Passed Hooks**: {', '.join(passed_hooks)}\n" 

552 else: 

553 formatted += "✅ **Status**: Success\n" 

554 if passed_hooks: 

555 formatted += f"**Passed Hooks**: {', '.join(passed_hooks)}\n" 

556 

557 # Include hook results tables if available 

558 if hook_results_tables.strip(): 558 ↛ 559line 558 didn't jump to line 559 because the condition on line 558 was never true

559 formatted += f"\n**Hook Results**:\n```\n{hook_results_tables}\n```\n" 

560 

561 if result.stdout.strip(): 561 ↛ 565line 561 didn't jump to line 565 because the condition on line 561 was always true

562 formatted += f"\n**Output**:\n```\n{result.stdout}\n```\n" 

563 

564 # Include stderr as structured logging output 

565 if result.stderr.strip(): 

566 formatted += f"\n**Structured Logging**:\n```\n{result.stderr}\n```\n" 

567 

568 return formatted 

569 

570 

571async def _get_ai_recommendations_with_history( 

572 result: Any, 

573 working_directory: str, 

574) -> tuple[str, list[Any], dict[str, Any]]: 

575 """Get AI recommendations adjusted by historical effectiveness.""" 

576 from .agent_analyzer import AgentAnalyzer 

577 from .recommendation_engine import RecommendationEngine 

578 

579 # Get base recommendations 

580 recommendations = AgentAnalyzer.analyze( 

581 result.stdout, 

582 result.stderr, 

583 result.exit_code, 

584 ) 

585 

586 # Analyze history and adjust 

587 db = await _get_reflection_db() 

588 history_analysis: dict[str, Any] = {} 

589 

590 if db: 

591 async with db: 

592 history_analysis = await RecommendationEngine.analyze_history( 

593 db, 

594 Path(working_directory).name, 

595 days=30, 

596 ) 

597 

598 if history_analysis["agent_effectiveness"]: 

599 recommendations = RecommendationEngine.adjust_confidence( 

600 recommendations, 

601 history_analysis["agent_effectiveness"], 

602 ) 

603 

604 output = AgentAnalyzer.format_recommendations(recommendations) 

605 

606 if history_analysis.get("insights"): 

607 output += "\n💡 **Historical Insights**:\n" 

608 for insight in history_analysis["insights"][:3]: 

609 output += f" {insight}\n" 

610 

611 return output, recommendations, history_analysis 

612 

613 

614def _build_execution_metadata( 

615 working_directory: str, 

616 result: Any, 

617 metrics: Any, 

618 recommendations: list[Any] | None = None, 

619 history_analysis: dict[str, Any] | None = None, 

620) -> dict[str, Any]: 

621 """Build metadata dictionary for execution storage.""" 

622 metadata = { 

623 "project": Path(working_directory).name, 

624 "exit_code": result.exit_code, 

625 "execution_time": result.execution_time, 

626 "metrics": metrics.to_dict(), 

627 } 

628 

629 if recommendations: 629 ↛ 630line 629 didn't jump to line 630 because the condition on line 629 was never true

630 metadata["agent_recommendations"] = [ 

631 { 

632 "agent": rec.agent.value, 

633 "confidence": rec.confidence, 

634 "reason": rec.reason, 

635 "quick_fix": rec.quick_fix_command, 

636 } 

637 for rec in recommendations 

638 ] 

639 

640 if history_analysis: 640 ↛ 641line 640 didn't jump to line 641 because the condition on line 640 was never true

641 metadata["pattern_analysis"] = { 

642 "total_patterns": len(history_analysis["patterns"]), 

643 "total_executions": history_analysis["total_executions"], 

644 "insights": history_analysis["insights"][:3], 

645 } 

646 

647 return metadata 

648 

649 

650async def _store_execution_result( 

651 command: str, 

652 formatted_result: str, 

653 result: Any, 

654 metrics: Any, 

655 working_directory: str, 

656 ai_agent_mode: bool, 

657 recommendations: list[Any] | None = None, 

658 history_analysis: dict[str, Any] | None = None, 

659 db: Any | None = None, 

660) -> str: 

661 """Store execution result in history.""" 

662 try: 

663 metadata = _build_execution_metadata( 

664 working_directory, 

665 result, 

666 metrics, 

667 recommendations, 

668 history_analysis, 

669 ) 

670 

671 content = f"Crackerjack {command} execution: {formatted_result[:500]}..." 

672 

673 active_db = db 

674 if ai_agent_mode and result.exit_code != 0 and active_db: 674 ↛ 675line 674 didn't jump to line 675 because the condition on line 674 was never true

675 await active_db.store_conversation(content=content, metadata=metadata) 

676 else: 

677 if active_db is None: 677 ↛ 679line 677 didn't jump to line 679 because the condition on line 677 was always true

678 active_db = await _get_reflection_db() 

679 if not active_db: 679 ↛ 680line 679 didn't jump to line 680 because the condition on line 679 was never true

680 return "" 

681 async with active_db: 

682 await active_db.store_conversation(content=content, metadata=metadata) 

683 

684 return "📝 Execution stored in session history\n" 

685 

686 except Exception as e: 

687 _get_logger().debug(f"Failed to store crackerjack execution: {e}") 

688 return "" 

689 

690 

691def _suggest_command(invalid: str, valid: set[str]) -> str: 

692 """Suggest closest valid command using fuzzy matching. 

693 

694 Args: 

695 invalid: Invalid command that was provided 

696 valid: Set of valid command names 

697 

698 Returns: 

699 Suggested command name or "check" as fallback 

700 

701 """ 

702 from difflib import get_close_matches 

703 

704 matches = get_close_matches(invalid, valid, n=1, cutoff=0.6) 

705 return matches[0] if matches else "check" 

706 

707 

708def _build_error_troubleshooting( 

709 error: Exception, 

710 timeout: int, 

711 working_directory: str, 

712) -> str: 

713 """Build error-specific troubleshooting steps.""" 

714 if isinstance(error, ImportError): 

715 return ( 

716 "1. Verify crackerjack is installed: `uv pip list | grep crackerjack`\n" 

717 "2. Reinstall if needed: `uv pip install crackerjack`\n" 

718 "3. Check Python environment: `which python`\n" 

719 ) 

720 if isinstance(error, FileNotFoundError): 

721 return ( 

722 f"1. Verify working directory exists: `ls -la {working_directory}`\n" 

723 "2. Check if directory is a git repository: `git status`\n" 

724 "3. Ensure you're in the correct project directory\n" 

725 ) 

726 if isinstance(error, TimeoutError) or "timeout" in str(error).lower(): 

727 return ( 

728 f"1. Command exceeded {timeout}s timeout\n" 

729 "2. Try increasing timeout or use `--skip-hooks` for faster iteration\n" 

730 "3. Check for infinite loops or hanging processes\n" 

731 ) 

732 if isinstance(error, (OSError, PermissionError)): 

733 return ( 

734 "1. Check file permissions in working directory\n" 

735 "2. Ensure you have write access to project files\n" 

736 "3. Verify no files are locked by other processes\n" 

737 ) 

738 return ( 

739 "1. Try running command directly: `python -m crackerjack`\n" 

740 "2. Check crackerjack logs for detailed errors\n" 

741 "3. Use `--ai-debug` for deeper analysis\n" 

742 ) 

743 

744 

745async def _crackerjack_run_impl( 

746 command: str, 

747 args: str = "", 

748 working_directory: str = ".", 

749 timeout: int = 300, 

750 ai_agent_mode: bool = False, 

751) -> str: 

752 """Run crackerjack with enhanced analytics (replaces /crackerjack:run).""" 

753 try: 

754 from session_buddy.crackerjack_integration import CrackerjackIntegration 

755 

756 from .quality_metrics import QualityMetricsExtractor 

757 

758 # Execute crackerjack command 

759 integration = CrackerjackIntegration() 

760 result = await integration.execute_crackerjack_command( 

761 command, 

762 args.split() if args else None, 

763 working_directory, 

764 timeout, 

765 ai_agent_mode, 

766 ) 

767 

768 # Format basic result 

769 formatted_result = _format_basic_result(result, command) 

770 

771 # Extract and display quality metrics 

772 metrics = QualityMetricsExtractor.extract(result.stdout, result.stderr) 

773 formatted_result += metrics.format_for_display() 

774 

775 # AI recommendations with learning (only on failures) 

776 recommendations = None 

777 history_analysis = None 

778 db = None 

779 

780 if ai_agent_mode and result.exit_code != 0: 780 ↛ 781line 780 didn't jump to line 781 because the condition on line 780 was never true

781 ( 

782 ai_output, 

783 recommendations, 

784 history_analysis, 

785 ) = await _get_ai_recommendations_with_history(result, working_directory) 

786 formatted_result += ai_output 

787 

788 # Build final output 

789 output = f"🔧 **Enhanced Crackerjack Run**\n\n{formatted_result}\n" 

790 

791 # Store execution in history 

792 storage_msg = await _store_execution_result( 

793 command, 

794 formatted_result, 

795 result, 

796 metrics, 

797 working_directory, 

798 ai_agent_mode, 

799 recommendations, 

800 history_analysis, 

801 db, 

802 ) 

803 output += storage_msg 

804 

805 return output 

806 

807 except Exception as e: 

808 error_type = type(e).__name__ 

809 error_msg = f"❌ **Enhanced crackerjack run failed**: {error_type}\n\n" 

810 error_msg += f"**Error Details**: {e!s}\n\n" 

811 error_msg += "**Context**:\n" 

812 error_msg += f"- Command: `{command} {args}`\n" 

813 error_msg += f"- Working Directory: `{working_directory}`\n" 

814 error_msg += f"- Timeout: {timeout}s\n" 

815 error_msg += f"- AI Mode: {'Enabled' if ai_agent_mode else 'Disabled'}\n\n" 

816 error_msg += "**Troubleshooting Steps**:\n" 

817 error_msg += _build_error_troubleshooting(e, timeout, working_directory) 

818 error_msg += "\n**Quick Fix**: Run `python -m crackerjack --help` to verify installation\n" 

819 

820 _get_logger().exception( 

821 "Crackerjack execution failed", 

822 extra={ 

823 "command": command, 

824 "cmd_args": args, 

825 "working_dir": working_directory, 

826 "ai_mode": ai_agent_mode, 

827 "error_type": error_type, 

828 }, 

829 ) 

830 

831 return error_msg 

832 

833 

834def _extract_crackerjack_commands( 

835 results: list[dict[str, Any]], 

836) -> dict[str, list[Any]]: 

837 """Extract crackerjack commands from results.""" 

838 commands: dict[str, list[Any]] = {} 

839 

840 for result in results: 

841 content = result.get("content", "") 

842 if "crackerjack" in content.lower(): 

843 # Extract command from content 

844 

845 # Use validated pattern for command extraction 

846 from session_buddy.utils.regex_patterns import SAFE_PATTERNS 

847 

848 crackerjack_cmd_pattern = SAFE_PATTERNS["crackerjack_command"] 

849 match = crackerjack_cmd_pattern.search(content.lower()) 

850 cmd = match.group(1) if match else "unknown" 

851 

852 if cmd not in commands: 

853 commands[cmd] = [] 

854 commands[cmd].append(result) 

855 

856 return commands 

857 

858 

859def _format_recent_executions(results: list[dict[str, Any]]) -> str: 

860 """Format recent executions for output.""" 

861 output = "**Recent Executions**:\n" 

862 

863 for i, result in enumerate(results[:10], 1): 

864 timestamp = result.get("timestamp", "Unknown") 

865 content = result.get("content", "")[:100] 

866 output += f"{i}. ({timestamp}) {content}...\n" 

867 

868 return output 

869 

870 

871def _parse_result_timestamp(result: dict[str, Any]) -> Any | None: 

872 """Parse timestamp from result dict.""" 

873 from datetime import datetime 

874 

875 timestamp_str = result.get("timestamp") 

876 if not timestamp_str: 

877 return None 

878 

879 try: 

880 if isinstance(timestamp_str, str): 

881 return datetime.fromisoformat(timestamp_str) 

882 return timestamp_str 

883 except (ValueError, AttributeError): 

884 return None 

885 

886 

887def _filter_results_by_date( 

888 results: list[dict[str, Any]], 

889 start_date: Any, 

890) -> list[dict[str, Any]]: 

891 """Filter results by date range.""" 

892 filtered_results = [] 

893 for result in results: 893 ↛ 894line 893 didn't jump to line 894 because the loop on line 893 never started

894 result_date = _parse_result_timestamp(result) 

895 

896 # Include if no date or within range 

897 if result_date is None or result_date >= start_date: 

898 filtered_results.append(result) 

899 

900 return filtered_results 

901 

902 

903def _format_history_output(filtered_results: list[dict[str, Any]], days: int) -> str: 

904 """Format history output string.""" 

905 output = f"📊 **Crackerjack History** (last {days} days)\n\n" 

906 

907 # Group by command 

908 commands = _extract_crackerjack_commands(filtered_results) 

909 

910 # Display summary 

911 output += f"**Total Executions**: {len(filtered_results)}\n" 

912 output += f"**Commands Used**: {', '.join(commands.keys())}\n\n" 

913 

914 # Show recent executions 

915 output += _format_recent_executions(filtered_results) 

916 

917 return output 

918 

919 

920async def _crackerjack_history_impl( 

921 command_filter: str = "", 

922 days: int = 7, 

923 working_directory: str = ".", 

924) -> str: 

925 """View crackerjack execution history with trends and patterns.""" 

926 try: 

927 from datetime import datetime, timedelta 

928 

929 db = await _get_reflection_db() 

930 if not db: 930 ↛ 931line 930 didn't jump to line 931 because the condition on line 930 was never true

931 return "❌ Reflection database not available for crackerjack history" 

932 

933 async with db: 

934 end_date = datetime.now() 

935 start_date = end_date - timedelta(days=days) 

936 

937 results = await db.search_conversations( 

938 query=f"crackerjack {command_filter}".strip(), 

939 project=Path(working_directory).name, 

940 limit=50, 

941 ) 

942 

943 filtered_results = _filter_results_by_date(results, start_date) 

944 

945 if not filtered_results: 945 ↛ 948line 945 didn't jump to line 948 because the condition on line 945 was always true

946 return f"📊 No crackerjack executions found in last {days} days" 

947 

948 return _format_history_output(filtered_results, days) 

949 

950 except Exception as e: 

951 _get_logger().exception(f"Crackerjack history failed: {e}") 

952 return f"❌ History retrieval failed: {e!s}" 

953 

954 

955def _calculate_execution_summary(results: list[dict[str, Any]]) -> dict[str, Any]: 

956 """Calculate basic execution summary statistics.""" 

957 success_count = sum(1 for r in results if "success" in r.get("content", "").lower()) 

958 failure_count = len(results) - success_count 

959 return { 

960 "total": len(results), 

961 "success": success_count, 

962 "failure": failure_count, 

963 "success_rate": (success_count / len(results) * 100) if results else 0, 

964 } 

965 

966 

967def _extract_quality_keywords(results: list[dict[str, Any]]) -> dict[str, int]: 

968 """Extract quality keyword counts from results.""" 

969 quality_keywords = ["lint", "test", "security", "complexity", "coverage"] 

970 keyword_counts: dict[str, int] = {} 

971 

972 for result in results: 

973 content = result.get("content", "").lower() 

974 for keyword in quality_keywords: 

975 if keyword in content: 

976 keyword_counts[keyword] = keyword_counts.get(keyword, 0) + 1 

977 

978 return keyword_counts 

979 

980 

981def _format_quality_metrics_output( 

982 days: int, 

983 summary: dict[str, Any], 

984 keywords: dict[str, int], 

985) -> str: 

986 """Format quality metrics output.""" 

987 output = f"📊 **Crackerjack Quality Metrics** (last {days} days)\n\n" 

988 output += "**Execution Summary**:\n" 

989 output += f"- Total runs: {summary['total']}\n" 

990 output += f"- Successful: {summary['success']}\n" 

991 output += f"- Failed: {summary['failure']}\n" 

992 output += f"- Success rate: {summary['success_rate']:.1f}%\n\n" 

993 

994 if keywords: 

995 output += "**Quality Focus Areas**:\n" 

996 for keyword, count in sorted( 

997 keywords.items(), 

998 key=operator.itemgetter(1), 

999 reverse=True, 

1000 ): 

1001 output += f"- {keyword.title()}: {count} mentions\n" 

1002 

1003 output += "\n💡 Use `crackerjack analyze` for detailed quality analysis" 

1004 return output 

1005 

1006 

1007async def _crackerjack_metrics_impl( 

1008 working_directory: str = ".", 

1009 days: int = 30, 

1010) -> str: 

1011 """Get quality metrics trends from crackerjack execution history.""" 

1012 try: 

1013 db = await _get_reflection_db() 

1014 if not db: 1014 ↛ 1015line 1014 didn't jump to line 1015 because the condition on line 1014 was never true

1015 return "❌ Reflection database not available for quality metrics" 

1016 

1017 async with db: 

1018 results = await db.search_conversations( 

1019 query="crackerjack metrics quality", 

1020 project=Path(working_directory).name, 

1021 limit=100, 

1022 ) 

1023 

1024 if not results: 1024 ↛ 1030line 1024 didn't jump to line 1030 because the condition on line 1024 was always true

1025 return ( 

1026 f"📊 **Crackerjack Quality Metrics** (last {days} days)\n\n" 

1027 "No quality metrics data available\n💡 Run `crackerjack analyze` to generate metrics\n" 

1028 ) 

1029 

1030 summary = _calculate_execution_summary(results) 

1031 keywords = _extract_quality_keywords(results) 

1032 return _format_quality_metrics_output(days, summary, keywords) 

1033 

1034 except Exception as e: 

1035 _get_logger().exception(f"Metrics analysis failed: {e}") 

1036 return f"❌ Metrics analysis failed: {e!s}" 

1037 

1038 

1039def _find_keyword_matches(content: str, keyword: str) -> list[tuple[int, int]]: 

1040 """Find all occurrences of a keyword in content.""" 

1041 matches = [] 

1042 start_pos = 0 

1043 while True: 

1044 pos = content.find(keyword, start_pos) 

1045 if pos == -1: 

1046 break 

1047 matches.append((pos, pos + len(keyword))) 

1048 start_pos = pos + 1 

1049 return matches 

1050 

1051 

1052def _extract_context_around_keyword( 

1053 content: str, 

1054 keyword: str, 

1055 context_size: int = 30, 

1056) -> list[str]: 

1057 """Extract context around keyword occurrences.""" 

1058 matches = _find_keyword_matches(content, keyword) 

1059 contexts = [] 

1060 

1061 for start_pos, end_pos in matches: 

1062 start = max(0, start_pos - context_size) 

1063 end = min(len(content), end_pos + context_size) 

1064 context = content[start:end].strip() 

1065 contexts.append(context) 

1066 

1067 return contexts 

1068 

1069 

1070def _extract_failure_patterns( 

1071 results: list[dict[str, Any]], 

1072 failure_keywords: list[str], 

1073) -> dict[str, int]: 

1074 """Extract common failure patterns from test results.""" 

1075 patterns: dict[str, int] = {} 

1076 

1077 for result in results: 

1078 content = result.get("content", "").lower() 

1079 for keyword in failure_keywords: 

1080 if keyword in content: 

1081 contexts = _extract_context_around_keyword(content, keyword) 

1082 for context in contexts: 

1083 patterns[context] = patterns.get(context, 0) + 1 

1084 

1085 return patterns 

1086 

1087 

1088def _format_failure_patterns(patterns: dict[str, int]) -> str: 

1089 """Format failure patterns for output.""" 

1090 output = "" 

1091 

1092 if patterns: 

1093 output += "**Common Failure Patterns**:\n" 

1094 sorted_patterns = sorted( 

1095 patterns.items(), key=operator.itemgetter(1), reverse=True 

1096 ) 

1097 

1098 for i, (pattern, count) in enumerate(sorted_patterns[:10], 1): 

1099 output += f"{i}. ({count}x) {pattern}...\n" 

1100 

1101 output += f"\n📊 Total unique patterns: {len(patterns)}\n" 

1102 output += f"📊 Total failure mentions: {sum(patterns.values())}\n" 

1103 else: 

1104 output += "No clear failure patterns identified\n" 

1105 

1106 return output 

1107 

1108 

1109def _get_failure_keywords() -> list[str]: 

1110 """Get list of keywords to identify failure patterns.""" 

1111 return [ 

1112 "failed", 

1113 "error", 

1114 "exception", 

1115 "assertion", 

1116 "timeout", 

1117 ] 

1118 

1119 

1120async def _get_failure_pattern_results( 

1121 working_directory: str, 

1122 limit: int = 50, 

1123) -> list[dict[str, Any]]: 

1124 """Get failure pattern results from the reflection database.""" 

1125 db = await _get_reflection_db() 

1126 if not db: 1126 ↛ 1127line 1126 didn't jump to line 1127 because the condition on line 1126 was never true

1127 return [] 

1128 

1129 async with db: 

1130 return await db.search_conversations( # type: ignore[no-any-return] 

1131 query="test failure error pattern", 

1132 project=Path(working_directory).name, 

1133 limit=limit, 

1134 ) 

1135 

1136 

1137def _format_patterns_header(days: int, results_count: int) -> str: 

1138 """Format the header for the patterns output.""" 

1139 output = f"🔍 **Test Failure Patterns** (last {days} days)\n\n" 

1140 

1141 if not results_count: 1141 ↛ 1145line 1141 didn't jump to line 1145 because the condition on line 1141 was always true

1142 output += "No test failure patterns found\n" 

1143 output += "✅ This might indicate good code quality!\n" 

1144 

1145 return output 

1146 

1147 

1148async def _crackerjack_patterns_impl( 

1149 days: int = 7, 

1150 working_directory: str = ".", 

1151) -> str: 

1152 """Analyze test failure patterns and trends.""" 

1153 try: 

1154 results = await _get_failure_pattern_results(working_directory) 

1155 

1156 output = _format_patterns_header(days, len(results)) 

1157 

1158 if not results: 1158 ↛ 1162line 1158 didn't jump to line 1162 because the condition on line 1158 was always true

1159 return output 

1160 

1161 # Extract common failure patterns 

1162 failure_keywords = _get_failure_keywords() 

1163 patterns = _extract_failure_patterns(results, failure_keywords) 

1164 output += _format_failure_patterns(patterns) 

1165 

1166 return output 

1167 

1168 except Exception as e: 

1169 _get_logger().exception(f"Pattern analysis failed: {e}") 

1170 return f"❌ Pattern analysis failed: {e!s}" 

1171 

1172 

1173async def _crackerjack_help_impl() -> str: 

1174 """Get comprehensive help for choosing the right crackerjack commands.""" 

1175 return """🔧 **Crackerjack Command Guide** 

1176 

1177**Quick Quality Checks**: 

1178- `crackerjack` - Fast lint and format 

1179- `crackerjack -t` - Include tests 

1180- `crackerjack --ai-fix -t` - AI-powered autonomous fixing 

1181 

1182**Analysis Commands**: 

1183- `crackerjack analyze` - Code quality analysis 

1184- `crackerjack security` - Security scanning 

1185- `crackerjack complexity` - Complexity analysis 

1186- `crackerjack typecheck` - Type checking 

1187 

1188**Development Workflow**: 

1189- `crackerjack lint` - Code formatting and linting 

1190- `crackerjack test` - Run test suite 

1191- `crackerjack check` - Comprehensive quality checks 

1192- `crackerjack clean` - Clean temporary files 

1193 

1194**Advanced Features**: 

1195- `--ai-fix` - Enable autonomous AI fixing 

1196- `--verbose` - Detailed output 

1197- `--fix` - Automatically fix issues where possible 

1198 

1199**MCP Integration**: 

1200- Use `execute_crackerjack_command` for any crackerjack command 

1201- Use `crackerjack_run` for enhanced analytics and history 

1202- Use `crackerjack_metrics` for quality trends 

1203 

1204💡 **Pro Tips**: 

1205- Always run `crackerjack -t` before commits 

1206- Use `--ai-fix` for complex quality issues 

1207- Check `crackerjack_history` to learn from past runs 

1208- Monitor trends with `crackerjack_metrics` 

1209""" 

1210 

1211 

1212async def _crackerjack_quality_trends_impl( 

1213 days: int = 30, 

1214 working_directory: str = ".", 

1215) -> str: 

1216 """Analyze quality trends over time with actionable insights.""" 

1217 try: 

1218 db = await _get_reflection_db() 

1219 if not db: 1219 ↛ 1220line 1219 didn't jump to line 1220 because the condition on line 1219 was never true

1220 return "❌ Reflection database not available for trend analysis" 

1221 

1222 async with db: 

1223 results = await db.search_conversations( 

1224 query="crackerjack quality success failed", 

1225 project=Path(working_directory).name, 

1226 limit=200, 

1227 ) 

1228 

1229 output = f"📈 **Quality Trends Analysis** (last {days} days)\n\n" 

1230 

1231 if len(results) < 5: 1231 ↛ 1234line 1231 didn't jump to line 1234 because the condition on line 1231 was always true

1232 return _format_insufficient_trend_data(output) 

1233 

1234 success_trend, failure_trend = _analyze_quality_trend_results(results) 

1235 success_rate = _calculate_trend_success_rate(success_trend, failure_trend) 

1236 

1237 output += _format_trend_overview(success_trend, failure_trend, success_rate) 

1238 output += _format_trend_quality_insights(success_rate) 

1239 output += _format_trend_recommendations(success_rate) 

1240 

1241 return output 

1242 

1243 except Exception as e: 

1244 _get_logger().exception(f"Trend analysis failed: {e}") 

1245 return f"❌ Trend analysis failed: {e!s}" 

1246 

1247 

1248def _format_insufficient_trend_data(output: str) -> str: 

1249 """Format output when insufficient trend data is available.""" 

1250 output += "Insufficient data for trend analysis\n" 

1251 output += "💡 Run more crackerjack commands to build trend history\n" 

1252 return output 

1253 

1254 

1255def _analyze_quality_trend_results( 

1256 results: list[dict[str, Any]], 

1257) -> tuple[list[str], list[str]]: 

1258 """Analyze results to categorize success and failure trends.""" 

1259 success_trend = [] 

1260 failure_trend = [] 

1261 

1262 for result in results: 

1263 content = result.get("content", "").lower() 

1264 timestamp = result.get("timestamp", "") 

1265 

1266 if "success" in content or "✅" in content: 

1267 success_trend.append(timestamp) 

1268 elif "failed" in content or "error" in content or "❌" in content: 

1269 failure_trend.append(timestamp) 

1270 

1271 return success_trend, failure_trend 

1272 

1273 

1274def _calculate_trend_success_rate( 

1275 success_trend: list[str], 

1276 failure_trend: list[str], 

1277) -> float: 

1278 """Calculate success rate from trend data.""" 

1279 total_runs = len(success_trend) + len(failure_trend) 

1280 return (len(success_trend) / total_runs * 100) if total_runs > 0 else 0 

1281 

1282 

1283def _format_trend_overview( 

1284 success_trend: list[str], 

1285 failure_trend: list[str], 

1286 success_rate: float, 

1287) -> str: 

1288 """Format overall trends section.""" 

1289 total_runs = len(success_trend) + len(failure_trend) 

1290 output = "**Overall Trends**:\n" 

1291 output += f"- Total quality runs: {total_runs}\n" 

1292 output += f"- Success rate: {success_rate:.1f}%\n" 

1293 output += f"- Success trend: {len(success_trend)} passes\n" 

1294 output += f"- Failure trend: {len(failure_trend)} issues\n\n" 

1295 return output 

1296 

1297 

1298def _format_trend_quality_insights(success_rate: float) -> str: 

1299 """Format quality insights based on success rate.""" 

1300 if success_rate > 80: 

1301 return ( 

1302 "🎉 **Excellent quality trend!** Your code quality is consistently high.\n" 

1303 ) 

1304 if success_rate > 60: 

1305 return "✅ **Good quality trend.** Room for improvement in consistency.\n" 

1306 return "⚠️ **Quality attention needed.** Consider more frequent quality checks.\n" 

1307 

1308 

1309def _format_trend_recommendations(success_rate: float) -> str: 

1310 """Format quality recommendations based on success rate.""" 

1311 output = "\n**Recommendations**:\n" 

1312 if success_rate < 70: 

1313 output += "- Run `crackerjack --ai-fix -t` for automated fixing\n" 

1314 output += "- Increase frequency of quality checks\n" 

1315 output += "- Focus on test coverage improvement\n" 

1316 else: 

1317 output += "- Maintain current quality practices\n" 

1318 output += "- Consider adding complexity monitoring\n" 

1319 return output 

1320 

1321 

1322async def _crackerjack_health_check_impl() -> str: 

1323 """Check Crackerjack integration health and provide diagnostics.""" 

1324 import os 

1325 import subprocess # nosec B404 

1326 

1327 output = "🔧 **Crackerjack Health Check**\n\n" 

1328 

1329 try: 

1330 env = os.environ.copy() 

1331 

1332 result = subprocess.run( 

1333 ["python", "-m", "crackerjack", "--version"], 

1334 check=False, 

1335 capture_output=True, 

1336 text=True, 

1337 timeout=10, 

1338 env=env, 

1339 ) 

1340 

1341 if result.returncode == 0: 1341 ↛ 1342line 1341 didn't jump to line 1342 because the condition on line 1341 was never true

1342 output += "✅ **Crackerjack Installation**: Available\n" 

1343 output += f" Version: {result.stdout.strip()}\n" 

1344 else: 

1345 output += "❌ **Crackerjack Installation**: Not working properly\n" 

1346 output += f" Error: {result.stderr}\n" 

1347 

1348 except subprocess.TimeoutExpired: 

1349 output += "⏰ **Crackerjack Installation**: Timeout (slow system?)\n" 

1350 except FileNotFoundError: 

1351 output += "❌ **Crackerjack Installation**: Not found\n" 

1352 output += " 💡 Install with: `uv add crackerjack`\n" 

1353 except Exception as e: 

1354 output += f"❌ **Crackerjack Installation**: Error - {e!s}\n" 

1355 

1356 # Check integration components 

1357 try: 

1358 # CrackerjackIntegration will be imported when needed 

1359 import session_buddy.crackerjack_integration 

1360 

1361 output += "✅ **Integration Module**: Available\n" 

1362 except ImportError: 

1363 output += "❌ **Integration Module**: Not available\n" 

1364 

1365 # Check reflection database for history 

1366 try: 

1367 db = await _get_reflection_db() 

1368 if db: 1368 ↛ 1374line 1368 didn't jump to line 1374 because the condition on line 1368 was always true

1369 async with db: 

1370 stats = await db.get_stats() 

1371 output += "✅ **History Storage**: Available\n" 

1372 output += f" Conversations: {stats.get('conversation_count', 0)}\n" 

1373 else: 

1374 output += "⚠️ **History Storage**: Reflection database unavailable\n" 

1375 except Exception as e: 

1376 output += f"⚠️ **History Storage**: Limited - {e!s}\n" 

1377 

1378 output += "\n**Recommendations**:\n" 

1379 output += "- Run `crackerjack -t` to test full functionality\n" 

1380 output += "- Use `crackerjack_run` for enhanced analytics\n" 

1381 output += "- Check `crackerjack_history` for execution patterns\n" 

1382 

1383 return output 

1384 

1385 

1386def register_crackerjack_tools(mcp: Any) -> None: 

1387 """Register all crackerjack integration MCP tools. 

1388 

1389 Args: 

1390 mcp: FastMCP server instance 

1391 

1392 """ 

1393 mcp.tool()(execute_crackerjack_command) 

1394 mcp.tool()(crackerjack_run) 

1395 mcp.tool()(crackerjack_history) 

1396 mcp.tool()(crackerjack_metrics) 

1397 mcp.tool()(crackerjack_patterns) 

1398 mcp.tool()(crackerjack_help) 

1399 mcp.tool()(get_crackerjack_results_history) 

1400 mcp.tool()(get_crackerjack_quality_metrics) 

1401 mcp.tool()(analyze_crackerjack_test_patterns) 

1402 mcp.tool()(crackerjack_quality_trends) 

1403 mcp.tool()(crackerjack_health_check) 

1404 mcp.tool()(quality_monitor)