Coverage for session_buddy / quality_engine.py: 65.15%
385 statements
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-04 00:43 -0800
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-04 00:43 -0800
1#!/usr/bin/env python3
2"""Quality Engine for session-mgmt-mcp.
4This module contains all quality-related functions extracted from server.py
5following Phase 2.3 of the decomposition plan. These functions handle:
6- Quality score calculation and recommendations
7- Context compaction analysis and optimization
8- Token usage and conversation flow analysis
9- Memory patterns and workflow intelligence
10- Proactive quality monitoring
12All functions maintain their original signatures and behavior for backward compatibility.
13"""
15from __future__ import annotations
17import asyncio
18import os
19from contextlib import suppress
20from datetime import datetime
21from pathlib import Path
22from typing import TYPE_CHECKING, Any
24if TYPE_CHECKING:
25 from session_buddy.reflection_tools import ReflectionDatabase
27# Import utility functions
28from session_buddy.utils.file_utils import (
29 _cleanup_session_logs,
30 _cleanup_temp_files,
31 _cleanup_uv_cache,
32)
33from session_buddy.utils.git_utils import _optimize_git_repository
34from session_buddy.utils.quality import (
35 check_git_activity as _check_git_activity,
36)
37from session_buddy.utils.quality import (
38 count_significant_files as _count_significant_files,
39)
40from session_buddy.utils.quality import (
41 create_empty_summary as _create_empty_summary,
42)
43from session_buddy.utils.quality import (
44 ensure_summary_defaults as _ensure_summary_defaults,
45)
46from session_buddy.utils.quality import (
47 evaluate_git_activity_heuristic as _evaluate_git_activity_heuristic,
48)
49from session_buddy.utils.quality import (
50 evaluate_large_project_heuristic as _evaluate_large_project_heuristic,
51)
52from session_buddy.utils.quality import (
53 evaluate_python_project_heuristic as _evaluate_python_project_heuristic,
54)
55from session_buddy.utils.quality import (
56 get_default_compaction_reason as _get_default_compaction_reason,
57)
58from session_buddy.utils.quality import (
59 get_error_summary as _get_error_summary,
60)
61from session_buddy.utils.quality import (
62 get_fallback_compaction_reason as _get_fallback_compaction_reason,
63)
64from session_buddy.utils.quality import (
65 get_fallback_summary as _get_fallback_summary,
66)
67from session_buddy.utils.quality import (
68 process_recent_reflections as _process_recent_reflections,
69)
70from session_buddy.utils.quality_utils import (
71 _analyze_quality_trend,
72 _extract_quality_scores,
73 _generate_quality_trend_recommendations,
74 _get_intelligence_error_result,
75 _get_time_based_recommendations,
76)
77from session_buddy.utils.quality_utils_v2 import calculate_quality_score_v2
78from session_buddy.utils.server_helpers import _add_current_session_context
80# Extracted functions (compaction, recommendations, summary helpers) have been moved
81# to session_buddy.utils.quality module for better modularity and reusability.
83# ======================
84# Main Quality Functions (5)
85# ======================
88def should_suggest_compact() -> tuple[bool, str]:
89 """Determine if compacting would be beneficial and provide reasoning.
90 Returns (should_compact, reason).
92 Note: High complexity is necessary for comprehensive heuristic analysis
93 of project state, git activity, and development patterns.
94 """
95 # Heuristics for when compaction might be needed:
96 # 1. Large projects with many files
97 # 2. Active development (recent git activity)
98 # 3. Complex task sequences
99 # 4. Session duration indicators
101 try:
102 current_dir = Path(os.environ.get("PWD", Path.cwd()))
104 # Count significant files in project as a complexity indicator
105 file_count = _count_significant_files(current_dir)
107 # Large project heuristic
108 should_compact, reason = _evaluate_large_project_heuristic(file_count)
109 if should_compact:
110 return should_compact, reason
112 # Check for active development via git
113 git_activity = _check_git_activity(current_dir)
114 should_compact, reason = _evaluate_git_activity_heuristic(git_activity)
115 if should_compact: 115 ↛ 119line 115 didn't jump to line 119 because the condition on line 115 was always true
116 return should_compact, reason
118 # Check for common patterns suggesting complex session
119 should_compact, reason = _evaluate_python_project_heuristic(current_dir)
120 if should_compact:
121 return should_compact, reason
123 # Default to not suggesting unless we have clear indicators
124 return False, _get_default_compaction_reason()
126 except Exception:
127 # If we can't determine, err on the side of suggesting compaction for safety
128 return True, _get_fallback_compaction_reason()
131async def _optimize_reflection_database() -> str:
132 """Optimize the reflection database."""
133 try:
134 from session_buddy.reflection_tools import get_reflection_database
136 db = await get_reflection_database()
137 await db.get_stats()
138 db_size_before = (
139 Path(db.db_path).stat().st_size if Path(db.db_path).exists() else 0
140 )
142 if db.conn: 142 ↛ 155line 142 didn't jump to line 155 because the condition on line 142 was always true
143 conn = (
144 db.conn
145 ) # Capture the connection to help mypy understand it's not None
146 await asyncio.get_event_loop().run_in_executor(
147 None,
148 lambda: conn.execute("VACUUM"),
149 )
150 await asyncio.get_event_loop().run_in_executor(
151 None,
152 lambda: conn.execute("ANALYZE"),
153 )
155 db_size_after = (
156 Path(db.db_path).stat().st_size if Path(db.db_path).exists() else 0
157 )
158 space_saved = db_size_before - db_size_after
160 return f"🗄️ Database: {'Optimized reflection DB, saved ' + str(space_saved) + ' bytes' if space_saved > 0 else 'Reflection DB already optimized'}"
162 except ImportError:
163 return "ℹ️ Database: Reflection tools not available"
164 except Exception as e:
165 return f"⚠️ Database: Optimization skipped - {str(e)[:50]}"
168async def _analyze_context_compaction() -> list[str]:
169 """Analyze and recommend context compaction."""
170 results = []
172 try:
173 should_compact, reason = should_suggest_compact()
174 results.extend(("\n🔍 Context Compaction Analysis", f"📊 {reason}"))
176 if should_compact: 176 ↛ 195line 176 didn't jump to line 195 because the condition on line 176 was always true
177 results.extend(
178 [
179 "",
180 "🔄 RECOMMENDATION: Run /compact to optimize context",
181 "📝 Benefits of compaction:",
182 " • Improved response speed and accuracy",
183 " • Better focus on current development context",
184 " • Reduced memory usage for complex sessions",
185 " • Cleaner conversation flow",
186 "",
187 "💡 WORKFLOW: After this checkpoint completes, run: /compact",
188 "🔄 Context compaction should be applied for optimal performance",
189 ],
190 )
192 except Exception as e:
193 results.append(f"⚠️ Compaction analysis skipped: {str(e)[:50]}")
195 return results
198async def _store_context_summary(conversation_summary: dict[str, Any]) -> None:
199 """Store conversation summary for future context retrieval."""
200 with suppress(ImportError, RuntimeError, OSError, ValueError, AttributeError):
201 from session_buddy.reflection_tools import get_reflection_database
203 db = await get_reflection_database()
204 summary_text = f"Session context: {', '.join(conversation_summary.get('key_topics', [])[:3])}"
205 if conversation_summary.get("decisions_made"):
206 summary_text += (
207 f". Key decisions: {conversation_summary['decisions_made'][0]}"
208 )
210 await db.store_reflection(summary_text, ["context-summary", "compaction"])
213async def perform_strategic_compaction() -> list[str]:
214 """Perform strategic compaction and optimization tasks."""
215 results = []
216 current_dir = Path(os.environ.get("PWD", Path.cwd()))
218 # Database optimization
219 results.append(await _optimize_reflection_database())
221 # Log cleanup
222 results.append(_cleanup_session_logs())
224 # Temp file cleanup
225 results.append(_cleanup_temp_files(current_dir))
227 # Git optimization
228 results.extend(_optimize_git_repository(current_dir))
230 # UV cache cleanup
231 results.append(_cleanup_uv_cache())
233 # Context compaction analysis
234 results.extend(await _analyze_context_compaction())
236 # Summary
237 total_operations = len([r for r in results if not r.startswith(("ℹ️", "⚠️", "⏱️"))])
238 results.extend(
239 [
240 f"\n📊 Strategic compaction complete: {total_operations} optimization tasks performed",
241 "🎯 Recommendation: Conversation context should be compacted automatically",
242 ],
243 )
245 return results
248async def monitor_proactive_quality() -> dict[str, Any]:
249 """Phase 3: Proactive Quality Monitoring with Early Warning System."""
250 try:
251 quality_alerts = []
252 quality_trend = "stable"
253 recommend_checkpoint = False
255 # Check if reflection tools are available
256 try:
257 (
258 quality_trend,
259 quality_alerts,
260 recommend_checkpoint,
261 ) = await _perform_quality_analysis()
262 except ImportError:
263 quality_alerts.append("Reflection tools not available")
265 return {
266 "quality_trend": quality_trend,
267 "alerts": quality_alerts,
268 "recommend_checkpoint": recommend_checkpoint,
269 "monitoring_active": True,
270 }
272 except Exception as e:
273 return _get_quality_error_result(e)
276# ======================
277# Context Analysis Functions (4)
278# ======================
281async def _generate_basic_insights(
282 quality_score: float,
283 conversation_summary: dict[str, Any],
284) -> list[str]:
285 """Generate basic session insights from quality score and conversation summary."""
286 insights = []
288 insights.append(
289 f"Session checkpoint completed with quality score: {quality_score}/100",
290 )
292 if conversation_summary["key_topics"]:
293 insights.append(
294 f"Key discussion topics: {', '.join(conversation_summary['key_topics'][:3])}",
295 )
297 if conversation_summary["decisions_made"]:
298 insights.append(
299 f"Important decisions: {conversation_summary['decisions_made'][0]}",
300 )
302 if conversation_summary["next_steps"]:
303 insights.append(
304 f"Next steps identified: {conversation_summary['next_steps'][0]}",
305 )
307 return insights
310async def _add_project_context_insights(insights: list[str]) -> None:
311 """Add project context analysis to insights."""
312 from session_buddy.utils.project_analysis import analyze_project_context
314 current_dir = Path(os.environ.get("PWD", Path.cwd()))
315 project_context = await analyze_project_context(current_dir)
316 context_items = [k for k, v in project_context.items() if v]
317 if context_items:
318 insights.append(f"Active project context: {', '.join(context_items)}")
321def _generate_session_tags(quality_score: float) -> list[str]:
322 """Generate contextual tags for session reflection storage."""
323 # Import to avoid circular dependency
324 from session_buddy.reflection_tools import get_current_project
326 current_project = get_current_project()
327 tags = ["checkpoint", "session-summary", current_project or "unknown-project"]
328 if quality_score >= 80:
329 tags.append("excellent-session")
330 elif quality_score < 60: 330 ↛ 332line 330 didn't jump to line 332 because the condition on line 330 was always true
331 tags.append("needs-attention")
332 return tags
335async def summarize_current_conversation() -> dict[str, Any]:
336 """Phase 3: AI-Powered Conversation Summarization."""
337 try:
338 summary = _create_empty_summary()
340 # Check if reflection tools are available
341 try:
342 from session_buddy.reflection_tools import get_reflection_database
344 db = await get_reflection_database()
345 await _process_recent_reflections(db, summary)
346 _add_current_session_context(summary)
347 _ensure_summary_defaults(summary)
348 except ImportError:
349 summary = _get_fallback_summary()
350 except Exception:
351 summary = _get_fallback_summary()
353 return summary
355 except Exception as e:
356 return _get_error_summary(e)
359# ======================
360# Token & Conversation Analysis Functions (5)
361# ======================
364async def analyze_token_usage_patterns() -> dict[str, Any]:
365 """Phase 3A: Intelligent token usage analysis with smart triggers."""
366 try:
367 conv_stats = await _get_conversation_statistics()
368 analysis = _analyze_context_usage_patterns(conv_stats)
369 return _finalize_token_analysis(analysis)
371 except Exception as error:
372 return {
373 "needs_attention": False,
374 "status": "analysis_failed",
375 "estimated_length": "unknown",
376 "recommend_compact": False,
377 "recommend_clear": False,
378 "error": str(error),
379 }
382async def _get_conversation_statistics() -> dict[str, int]:
383 """Get conversation statistics from memory system."""
384 conv_stats: dict[str, int] = {
385 "total_conversations": 0,
386 "recent_activity": 0,
387 }
389 with suppress(ImportError, RuntimeError, OSError, ValueError, AttributeError):
390 from session_buddy.reflection_tools import get_reflection_database
392 db = await get_reflection_database()
393 stats = await db.get_stats()
394 conv_stats["total_conversations"] = stats.get("conversations_count", 0)
396 return conv_stats
399def _analyze_context_usage_patterns(conv_stats: dict[str, int]) -> dict[str, Any]:
400 """Analyze context usage patterns and generate recommendations."""
401 estimated_length = "moderate"
402 needs_attention = False
403 recommend_compact = False
404 recommend_clear = False
406 total_conversations = conv_stats["total_conversations"]
408 # Progressive thresholds based on conversation count
409 if total_conversations > 3: 409 ↛ 410line 409 didn't jump to line 410 because the condition on line 409 was never true
410 estimated_length = "extensive"
411 needs_attention = True
412 recommend_compact = True
414 if total_conversations > 10: 414 ↛ 415line 414 didn't jump to line 415 because the condition on line 414 was never true
415 estimated_length = "very long"
416 needs_attention = True
417 recommend_compact = True
419 if total_conversations > 20: 419 ↛ 420line 419 didn't jump to line 420 because the condition on line 419 was never true
420 estimated_length = "extremely long"
421 needs_attention = True
422 recommend_compact = True
423 recommend_clear = True
425 return {
426 "estimated_length": estimated_length,
427 "needs_attention": needs_attention,
428 "recommend_compact": recommend_compact,
429 "recommend_clear": recommend_clear,
430 }
433def _finalize_token_analysis(analysis: dict[str, Any]) -> dict[str, Any]:
434 """Finalize token analysis with checkpoint override."""
435 # Override: ALWAYS recommend compaction during checkpoints
436 analysis["recommend_compact"] = True
437 analysis["needs_attention"] = True
439 if analysis["estimated_length"] == "moderate": 439 ↛ 442line 439 didn't jump to line 442 because the condition on line 439 was always true
440 analysis["estimated_length"] = "checkpoint-session"
442 analysis["status"] = (
443 "optimal" if not analysis["needs_attention"] else "needs optimization"
444 )
446 return analysis
449async def analyze_conversation_flow() -> dict[str, Any]:
450 """Phase 3A: Analyze conversation patterns and flow."""
451 try:
452 # Analyze recent reflection patterns to understand session flow
453 try:
454 from session_buddy.reflection_tools import get_reflection_database
456 db = await get_reflection_database()
458 # Search recent reflections for patterns
459 recent_reflections = await db.search_reflections(
460 "session checkpoint",
461 limit=5,
462 )
464 if recent_reflections:
465 # Analyze pattern based on recent reflections
466 if any("excellent" in r["content"].lower() for r in recent_reflections):
467 pattern_type = "productive_development"
468 recommendations = [
469 "Continue current productive workflow",
470 "Consider documenting successful patterns",
471 "Maintain current checkpoint frequency",
472 ]
473 elif any(
474 "attention" in r["content"].lower() for r in recent_reflections
475 ):
476 pattern_type = "optimization_needed"
477 recommendations = [
478 "Review recent workflow changes",
479 "Consider more frequent checkpoints",
480 "Use search tools to find successful patterns",
481 ]
482 else:
483 pattern_type = "steady_progress"
484 recommendations = [
485 "Maintain current workflow patterns",
486 "Consider periodic workflow evaluation",
487 ]
488 else:
489 pattern_type = "new_session"
490 recommendations = [
491 "Establish workflow patterns through regular checkpoints",
492 ]
494 except ImportError:
495 pattern_type = "basic_session"
496 recommendations = ["Enable reflection tools for advanced flow analysis"]
497 except Exception:
498 pattern_type = "analysis_unavailable"
499 recommendations = [
500 "Use regular checkpoints to establish workflow patterns",
501 ]
503 return {
504 "pattern_type": pattern_type,
505 "recommendations": recommendations,
506 "confidence": "pattern_based",
507 }
509 except Exception as e:
510 return {
511 "pattern_type": "analysis_failed",
512 "recommendations": ["Use basic workflow patterns"],
513 "error": str(e),
514 }
517# ======================
518# Memory & Workflow Functions (4)
519# ======================
522async def analyze_memory_patterns(db: Any, conv_count: int) -> dict[str, Any]:
523 """Phase 3A: Advanced memory pattern analysis."""
524 try:
525 # Analyze conversation history for intelligent insights
526 if conv_count == 0:
527 return {
528 "summary": "New session - no historical patterns yet",
529 "proactive_suggestions": [
530 "Start building conversation history for better insights",
531 ],
532 }
533 if conv_count < 5: 533 ↛ 534line 533 didn't jump to line 534 because the condition on line 533 was never true
534 return {
535 "summary": f"{conv_count} conversations stored - building pattern recognition",
536 "proactive_suggestions": [
537 "Continue regular checkpoints to build session intelligence",
538 "Use store_reflection for important insights",
539 ],
540 }
541 if conv_count < 20: 541 ↛ 549line 541 didn't jump to line 549 because the condition on line 541 was always true
542 return {
543 "summary": f"{conv_count} conversations stored - developing patterns",
544 "proactive_suggestions": [
545 "Use reflect_on_past to leverage growing knowledge base",
546 "Search previous solutions before starting new implementations",
547 ],
548 }
549 return {
550 "summary": f"{conv_count} conversations - rich pattern recognition available",
551 "proactive_suggestions": [
552 "Leverage extensive history with targeted searches",
553 "Consider workflow optimization based on successful patterns",
554 "Use conversation history to accelerate problem-solving",
555 ],
556 }
558 except Exception as e:
559 return {
560 "summary": "Memory analysis unavailable",
561 "proactive_suggestions": [
562 "Use basic memory tools for conversation tracking",
563 ],
564 "error": str(e),
565 }
568async def analyze_project_workflow_patterns(current_dir: Path) -> dict[str, Any]:
569 """Phase 3A: Project-specific workflow pattern analysis."""
570 try:
571 project_characteristics = _detect_project_characteristics(current_dir)
572 workflow_recommendations = _generate_workflow_recommendations(
573 project_characteristics,
574 )
576 return {
577 "workflow_recommendations": workflow_recommendations,
578 "project_characteristics": project_characteristics,
579 }
581 except Exception as e:
582 return {
583 "workflow_recommendations": ["Use basic project workflow patterns"],
584 "error": str(e),
585 }
588def _generate_workflow_recommendations(characteristics: dict[str, bool]) -> list[str]:
589 """Generate workflow recommendations based on project characteristics."""
590 recommendations = []
592 if characteristics["has_tests"]:
593 recommendations.extend(
594 [
595 "Use targeted test commands for specific test scenarios",
596 "Consider test-driven development workflow with regular testing",
597 ],
598 )
600 if characteristics["has_git"]:
601 recommendations.extend(
602 [
603 "Leverage git context for branch-specific development",
604 "Use commit messages to track progress patterns",
605 ],
606 )
608 if characteristics["has_python"] and characteristics["has_tests"]:
609 recommendations.append(
610 "Python+Testing: Consider pytest workflows with coverage analysis",
611 )
613 if characteristics["has_node"]: 613 ↛ 614line 613 didn't jump to line 614 because the condition on line 613 was never true
614 recommendations.append(
615 "Node.js project: Leverage npm/yarn scripts in development workflow",
616 )
618 if characteristics["has_docker"]: 618 ↛ 619line 618 didn't jump to line 619 because the condition on line 618 was never true
619 recommendations.append(
620 "Containerized project: Consider container-based development workflows",
621 )
623 # Default recommendations if no specific patterns detected
624 if not recommendations:
625 recommendations.append(
626 "Establish project-specific workflow patterns through regular checkpoints",
627 )
629 return recommendations
632def _detect_project_characteristics(current_dir: Path) -> dict[str, bool]:
633 """Detect project characteristics from directory structure."""
634 return {
635 "has_tests": (current_dir / "tests").exists()
636 or (current_dir / "test").exists(),
637 "has_git": (current_dir / ".git").exists(),
638 "has_python": (current_dir / "pyproject.toml").exists()
639 or (current_dir / "requirements.txt").exists(),
640 "has_node": (current_dir / "package.json").exists(),
641 "has_docker": (current_dir / "Dockerfile").exists()
642 or (current_dir / "docker-compose.yml").exists(),
643 }
646def _check_workflow_drift(quality_scores: list[float]) -> tuple[list[str], bool]:
647 """Check for workflow drift indicators."""
648 quality_alerts = []
649 recommend_checkpoint = False
651 if len(quality_scores) >= 4:
652 variance = max(quality_scores) - min(quality_scores)
653 if variance > 30:
654 quality_alerts.append(
655 "High quality variance detected - workflow inconsistency",
656 )
657 recommend_checkpoint = True
659 return quality_alerts, recommend_checkpoint
662# ======================
663# Intelligence & Insights Functions (5)
664# ======================
667async def _capture_intelligence_insights(
668 db: ReflectionDatabase,
669 tags: list[str],
670 results: list[str],
671) -> None:
672 """Capture session intelligence insights."""
673 intelligence = await generate_session_intelligence()
674 if intelligence["priority_actions"]:
675 intel_summary = f"Session intelligence: {intelligence['intelligence_level']}. "
676 intel_summary += f"Priority: {intelligence['priority_actions'][0]}"
678 intel_id = await db.store_reflection(
679 intel_summary,
680 [*tags, "intelligence", "proactive"],
681 )
682 results.append(f"🧠 Intelligence insights stored: {intel_id[:12]}...")
685async def _analyze_reflection_based_intelligence() -> list[str]:
686 """Analyze recent reflections for intelligence recommendations."""
687 try:
688 from session_buddy.reflection_tools import get_reflection_database
690 db = await get_reflection_database()
691 recent_reflections = await db.search_reflections("checkpoint", limit=3)
693 if recent_reflections:
694 recent_scores = _extract_quality_scores(recent_reflections)
695 return _generate_quality_trend_recommendations(recent_scores)
697 except ImportError:
698 return []
699 except Exception:
700 return ["Enable reflection analysis for session trend intelligence"]
702 return []
705async def generate_session_intelligence() -> dict[str, Any]:
706 """Phase 3A: Generate proactive session intelligence and priority actions."""
707 try:
708 current_time = datetime.now()
710 # Gather all recommendation sources
711 priority_actions = []
712 priority_actions.extend(_get_time_based_recommendations(current_time.hour))
713 priority_actions.extend(await _analyze_reflection_based_intelligence())
714 priority_actions = _ensure_default_recommendations(priority_actions)
716 return {
717 "priority_actions": priority_actions,
718 "intelligence_level": "proactive",
719 "timestamp": current_time.isoformat(),
720 }
722 except Exception as e:
723 return _get_intelligence_error_result(e)
726def _ensure_default_recommendations(priority_actions: list[str]) -> list[str]:
727 """Ensure at least one recommendation is provided."""
728 if not priority_actions: 728 ↛ 729line 728 didn't jump to line 729 because the condition on line 728 was never true
729 return ["Continue regular checkpoint monitoring"]
730 return priority_actions
733# ======================
734# Quality Analysis & Recommendations Functions (6)
735# ======================
738async def _perform_quality_analysis() -> tuple[str, list[str], bool]:
739 """Perform quality analysis with reflection data."""
740 quality_alerts = []
741 quality_trend = "stable"
742 recommend_checkpoint = False
744 try:
745 from session_buddy.reflection_tools import get_reflection_database
747 db = await get_reflection_database()
748 recent_reflections = await db.search_reflections("quality score", limit=5)
749 quality_scores = _extract_quality_scores(recent_reflections)
751 if quality_scores:
752 trend, trend_alerts, trend_checkpoint = _analyze_quality_trend(
753 quality_scores,
754 )
755 quality_trend = trend
756 quality_alerts.extend(trend_alerts)
757 recommend_checkpoint = recommend_checkpoint or trend_checkpoint
759 drift_alerts, drift_checkpoint = _check_workflow_drift(quality_scores)
760 quality_alerts.extend(drift_alerts)
761 recommend_checkpoint = recommend_checkpoint or drift_checkpoint
763 except (ImportError, Exception):
764 quality_alerts.append("Quality monitoring analysis unavailable")
766 return quality_trend, quality_alerts, recommend_checkpoint
769def _get_quality_error_result(error: Exception) -> dict[str, Any]:
770 """Get error result for quality monitoring failure."""
771 return {
772 "quality_trend": "unknown",
773 "alerts": ["Quality monitoring failed"],
774 "recommend_checkpoint": False,
775 "monitoring_active": False,
776 "error": str(error),
777 }
780async def _analyze_token_usage_recommendations(results: list[str]) -> None:
781 """Analyze token usage and add recommendations."""
782 token_analysis = await analyze_token_usage_patterns()
783 if token_analysis["needs_attention"]: 783 ↛ 806line 783 didn't jump to line 806 because the condition on line 783 was always true
784 results.extend(
785 (
786 f"⚠️ Context usage: {token_analysis['status']}",
787 f" Estimated conversation length: {token_analysis['estimated_length']}",
788 )
789 )
791 # Smart compaction triggers - PRIORITY RECOMMENDATIONS
792 if token_analysis["recommend_compact"]: 792 ↛ 801line 792 didn't jump to line 801 because the condition on line 792 was always true
793 results.extend(
794 (
795 "🚨 CRITICAL AUTO-RECOMMENDATION: Context compaction required",
796 "🔄 This checkpoint has prepared conversation summary for compaction",
797 "💡 Compaction should be applied automatically after this checkpoint",
798 )
799 )
801 if token_analysis["recommend_clear"]: 801 ↛ 802line 801 didn't jump to line 802 because the condition on line 801 was never true
802 results.append(
803 "🆕 AUTO-RECOMMENDATION: Consider /clear for fresh context after compaction",
804 )
805 else:
806 results.append(f"✅ Context usage: {token_analysis['status']}")
809async def _analyze_conversation_flow_recommendations(results: list[str]) -> None:
810 """Analyze conversation flow and add recommendations."""
811 flow_analysis = await analyze_conversation_flow()
812 results.append(f"📊 Session flow: {flow_analysis['pattern_type']}")
814 if flow_analysis["recommendations"]: 814 ↛ exitline 814 didn't return from function '_analyze_conversation_flow_recommendations' because the condition on line 814 was always true
815 results.append("🎯 Flow-based recommendations:")
816 for rec in flow_analysis["recommendations"][:3]:
817 results.append(f" • {rec}")
820async def _analyze_memory_recommendations(results: list[str]) -> None:
821 """Analyze memory patterns and add recommendations."""
822 try:
823 from session_buddy.reflection_tools import get_reflection_database
825 db = await get_reflection_database()
826 stats = await db.get_stats()
827 conv_count = stats.get("conversations_count", 0)
829 # Advanced memory analysis
830 memory_insights = await analyze_memory_patterns(db, conv_count)
831 results.append(f"📚 Memory insights: {memory_insights['summary']}")
833 if memory_insights["proactive_suggestions"]: 833 ↛ exitline 833 didn't return from function '_analyze_memory_recommendations' because the condition on line 833 was always true
834 results.append("💡 Proactive suggestions:")
835 for suggestion in memory_insights["proactive_suggestions"][:2]:
836 results.append(f" • {suggestion}")
838 except ImportError:
839 pass
840 except Exception:
841 results.append("📚 Memory system available for conversation search")
844# ======================
845# Advanced Metrics & Context Analysis (2)
846# ======================
849async def analyze_advanced_context_metrics() -> dict[str, Any]:
850 """Phase 3A: Advanced context metrics analysis."""
851 return {
852 "estimated_tokens": 0, # Placeholder for actual token counting
853 "context_density": "moderate",
854 "conversation_depth": "active",
855 }
858async def analyze_context_usage() -> list[str]:
859 """Phase 2 & 3A: Advanced context analysis with intelligent recommendations."""
860 results = []
862 try:
863 results.append("🔍 Advanced context analysis and optimization...")
865 # Phase 3A: Advanced Context Intelligence
866 await analyze_advanced_context_metrics()
868 # Run all analysis components
869 await _analyze_token_usage_recommendations(results)
870 await _analyze_conversation_flow_recommendations(results)
871 await _analyze_memory_recommendations(results)
872 await _analyze_project_workflow_recommendations(results)
873 await _analyze_session_intelligence_recommendations(results)
874 await _analyze_quality_monitoring_recommendations(results)
876 except Exception as e:
877 await _add_fallback_recommendations(results, e)
879 return results
882async def _analyze_project_workflow_recommendations(results: list[str]) -> None:
883 """Analyze project workflow patterns and add recommendations."""
884 current_dir = Path(os.environ.get("PWD", Path.cwd()))
885 project_insights = await analyze_project_workflow_patterns(current_dir)
887 if project_insights["workflow_recommendations"]: 887 ↛ exitline 887 didn't return from function '_analyze_project_workflow_recommendations' because the condition on line 887 was always true
888 results.append("🚀 Workflow optimizations:")
889 for opt in project_insights["workflow_recommendations"][:2]:
890 results.append(f" • {opt}")
893async def _analyze_session_intelligence_recommendations(results: list[str]) -> None:
894 """Analyze session intelligence and add recommendations."""
895 session_intelligence = await generate_session_intelligence()
896 if session_intelligence["priority_actions"]: 896 ↛ exitline 896 didn't return from function '_analyze_session_intelligence_recommendations' because the condition on line 896 was always true
897 results.append("\n🧠 Session Intelligence:")
898 for action in session_intelligence["priority_actions"][:3]:
899 results.append(f" • {action}")
902async def _analyze_quality_monitoring_recommendations(results: list[str]) -> None:
903 """Analyze quality monitoring and add recommendations."""
904 quality_monitoring = await monitor_proactive_quality()
905 if quality_monitoring["monitoring_active"]: 905 ↛ exitline 905 didn't return from function '_analyze_quality_monitoring_recommendations' because the condition on line 905 was always true
906 results.append(f"\n📊 Quality Trend: {quality_monitoring['quality_trend']}")
908 if quality_monitoring["alerts"]: 908 ↛ 913line 908 didn't jump to line 913 because the condition on line 908 was always true
909 results.append("⚠️ Quality Alerts:")
910 for alert in quality_monitoring["alerts"][:2]:
911 results.append(f" • {alert}")
913 if quality_monitoring["recommend_checkpoint"]: 913 ↛ 914line 913 didn't jump to line 914 because the condition on line 913 was never true
914 results.append("🔄 PROACTIVE RECOMMENDATION: Consider immediate checkpoint")
917async def _add_fallback_recommendations(results: list[str], error: Exception) -> None:
918 """Add fallback recommendations when analysis fails."""
919 results.append(f"❌ Advanced context analysis failed: {str(error)[:60]}...")
920 results.append("💡 Falling back to basic context management recommendations")
922 # Fallback to basic recommendations
923 results.append("🎯 Basic context actions:")
924 results.append(" • Use /compact for conversation summarization")
925 results.append(" • Use /clear for fresh context on new topics")
926 results.append(" • Use search tools to retrieve relevant discussions")
929# ======================
930# Quality Score Calculation (Fixed)
931# ======================
934async def _perform_quality_assessment() -> tuple[int, dict[str, Any]]:
935 """Perform quality assessment and return score and data."""
936 quality_data = await calculate_quality_score()
937 quality_score = quality_data["total_score"]
938 return quality_score, quality_data
941async def calculate_quality_score(project_dir: Path | None = None) -> dict[str, Any]:
942 """Calculate session quality score using V2 algorithm.
944 This function fixes the bug where server.py was calling calculate_quality_score()
945 but the actual function is calculate_quality_score_v2() in utils.
947 Args:
948 project_dir: Path to the project directory. If not provided, will use current directory.
950 Returns dict with:
951 - total_score: int (0-100)
952 - version: str
953 - project_health: dict
954 - permissions_health: dict
955 - session_health: dict
956 - tool_health: dict
957 - recommendations: list[str]
959 """
960 if project_dir is None:
961 project_dir = Path(os.environ.get("PWD", Path.cwd()))
963 quality_result = await calculate_quality_score_v2(project_dir=project_dir)
965 # Convert dataclass to dict to maintain compatibility and include breakdown
966 result_dict = {
967 "total_score": int(
968 quality_result.total_score,
969 ), # Convert to int for backward compatibility
970 "version": quality_result.version,
971 "project_health": {
972 "total": quality_result.project_health.total,
973 "tooling_score": quality_result.project_health.tooling_score,
974 "maturity_score": quality_result.project_health.maturity_score,
975 "details": quality_result.project_health.details,
976 },
977 "permissions_health": quality_result.trust_score.details, # Using trust_score details for permissions
978 "session_health": {"status": "active"}, # Placeholder for session health
979 "tool_health": {
980 "count": quality_result.trust_score.tool_ecosystem,
981 }, # Using trust_score for tool health
982 "recommendations": quality_result.recommendations,
983 "timestamp": quality_result.timestamp,
984 "trust_score": quality_result.trust_score.details, # Add trust_score for backward compatibility
985 }
987 # Add breakdown key for backward compatibility with tests
988 result_dict["breakdown"] = {
989 "project_health": quality_result.project_health.total,
990 "permissions": sum(quality_result.trust_score.details.values())
991 if quality_result.trust_score.details
992 else 0,
993 "session_management": 20, # Fixed value as in original tests
994 "tools": quality_result.trust_score.tool_ecosystem,
995 "code_quality": quality_result.project_health.tooling_score, # Add code_quality key for tests
996 "dev_velocity": quality_result.dev_velocity.total, # Use actual dev_velocity score
997 "security": quality_result.security.total, # Add security key for tests
998 }
1000 return result_dict