Coverage for src / tracekit / core / logging.py: 86%
263 statements
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-11 23:04 +0000
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-11 23:04 +0000
1"""Structured logging infrastructure for TraceKit.
3This module provides structured logging with JSON/logfmt support,
4hierarchical loggers, log rotation, and error context capture.
7Example:
8 >>> from tracekit.core.logging import configure_logging, get_logger
9 >>> configure_logging(format='json', level='INFO')
10 >>> logger = get_logger('tracekit.loaders')
11 >>> logger.info("Loading trace", file="data.bin", size_mb=1024)
13References:
14 Python logging module best practices
15 LOG-001 through LOG-008 requirements
16"""
18from __future__ import annotations
20import gzip
21import json
22import logging
23import logging.handlers
24import os
25import shutil
26import sys
27import time
28import traceback
29from dataclasses import dataclass
30from datetime import UTC, datetime
31from pathlib import Path
32from typing import Any, Literal
34# Global logging configuration
35_logging_configured = False
36_root_logger_name = "tracekit"
39@dataclass
40class LogConfig:
41 """Logging configuration.
43 Attributes:
44 level: Default log level (DEBUG, INFO, WARNING, ERROR, CRITICAL).
45 format: Output format (json, logfmt, text).
46 timestamp_format: Timestamp format (iso8601, iso8601_local, unix, custom).
47 custom_timestamp_format: Custom timestamp format string.
48 console_output: Enable console output to stderr.
49 file_output: Enable file output.
50 file_path: Path to log file.
51 max_bytes: Maximum log file size before rotation.
52 backup_count: Number of rotated log files to keep.
53 compress: Compress rotated log files.
54 when: Time-based rotation interval type ('midnight', 'H', 'D', 'W0'-'W6').
55 interval: Interval for time-based rotation.
56 max_age: Maximum age for log files (e.g., '30d').
58 References:
59 LOG-001: Structured Logging Framework
60 LOG-003: Automatic Log Rotation and Retention Policies
61 """
63 level: str = "INFO"
64 format: Literal["json", "logfmt", "text"] = "text"
65 timestamp_format: Literal["iso8601", "iso8601_local", "unix", "custom"] = "iso8601"
66 custom_timestamp_format: str | None = None
67 console_output: bool = True
68 file_output: bool = False
69 file_path: str | None = None
70 max_bytes: int = 10_000_000 # 10 MB
71 backup_count: int = 5
72 compress: bool = False
73 when: str | None = None # Time-based rotation: 'midnight', 'H', 'D', 'W0'-'W6'
74 interval: int = 1 # Interval for time-based rotation
75 max_age: str | None = None # Maximum age for log files (e.g., '30d')
78# Default configuration
79_config = LogConfig()
82class CompressingRotatingFileHandler(logging.handlers.RotatingFileHandler):
83 """RotatingFileHandler that compresses rotated files with gzip.
85 Extends standard RotatingFileHandler to optionally compress log files
86 when they are rotated, saving disk space for historical logs.
88 Args:
89 filename: Path to log file.
90 mode: File open mode.
91 maxBytes: Maximum file size before rotation.
92 backupCount: Number of backup files to keep.
93 encoding: File encoding.
94 compress: Whether to gzip compress rotated files.
96 References:
97 LOG-003: Automatic Log Rotation and Retention Policies
98 """
100 def __init__(
101 self,
102 filename: str,
103 mode: str = "a",
104 maxBytes: int = 0,
105 backupCount: int = 0,
106 encoding: str | None = None,
107 compress: bool = False,
108 ):
109 """Initialize compressing rotating file handler.
111 Args:
112 filename: Path to log file.
113 mode: File open mode.
114 maxBytes: Maximum file size before rotation.
115 backupCount: Number of backup files to keep.
116 encoding: File encoding.
117 compress: Whether to gzip compress rotated files.
118 """
119 super().__init__(filename, mode, maxBytes, backupCount, encoding)
120 self.compress = compress
122 def doRollover(self) -> None:
123 """Perform rollover and optionally compress the rotated file.
125 References:
126 LOG-003: Automatic Log Rotation and Retention Policies
127 """
128 # Standard rollover
129 super().doRollover()
131 # Compress the rolled file if compression is enabled
132 if self.compress and self.backupCount > 0:
133 # The most recently rotated file is .1
134 rotated_file = f"{self.baseFilename}.1"
135 compressed_file = f"{rotated_file}.gz"
137 if Path(rotated_file).exists(): 137 ↛ exitline 137 didn't return from function 'doRollover' because the condition on line 137 was always true
138 # Compress the file
139 with open(rotated_file, "rb") as f_in:
140 with gzip.open(compressed_file, "wb") as f_out:
141 shutil.copyfileobj(f_in, f_out)
143 # Remove the uncompressed file
144 Path(rotated_file).unlink()
147class CompressingTimedRotatingFileHandler(logging.handlers.TimedRotatingFileHandler):
148 """TimedRotatingFileHandler that compresses rotated files with gzip.
150 Extends standard TimedRotatingFileHandler to optionally compress log files
151 when they are rotated, saving disk space for historical logs.
153 Args:
154 filename: Path to log file.
155 when: Type of interval ('midnight', 'H', 'D', 'W0'-'W6').
156 interval: Number of intervals between rotations.
157 backupCount: Number of backup files to keep.
158 encoding: File encoding.
159 compress: Whether to gzip compress rotated files.
160 max_age: Maximum age for log files (e.g., '30d').
162 References:
163 LOG-003: Automatic Log Rotation and Retention Policies
164 """
166 def __init__(
167 self,
168 filename: str,
169 when: str = "midnight",
170 interval: int = 1,
171 backupCount: int = 0,
172 encoding: str | None = None,
173 compress: bool = False,
174 max_age: str | None = None,
175 ):
176 """Initialize compressing timed rotating file handler.
178 Args:
179 filename: Path to log file.
180 when: Type of interval ('midnight', 'H', 'D', 'W0'-'W6').
181 interval: Number of intervals between rotations.
182 backupCount: Number of backup files to keep.
183 encoding: File encoding.
184 compress: Whether to gzip compress rotated files.
185 max_age: Maximum age for log files (e.g., '30d').
186 """
187 super().__init__(filename, when, interval, backupCount, encoding=encoding)
188 self.compress = compress
189 self.max_age = max_age
190 self._max_age_seconds = self._parse_max_age(max_age) if max_age else None
192 def _parse_max_age(self, max_age: str) -> int:
193 """Parse max_age string to seconds.
195 Args:
196 max_age: Age string like '30d', '7d', '24h'.
198 Returns:
199 Number of seconds.
201 Raises:
202 ValueError: If max_age format is invalid.
203 """
204 if max_age.endswith("d"):
205 return int(max_age[:-1]) * 86400 # days to seconds
206 elif max_age.endswith("h"):
207 return int(max_age[:-1]) * 3600 # hours to seconds
208 elif max_age.endswith("m"):
209 return int(max_age[:-1]) * 60 # minutes to seconds
210 else:
211 raise ValueError(f"Invalid max_age format: {max_age}. Use 'd', 'h', or 'm' suffix.")
213 def doRollover(self) -> None:
214 """Perform rollover and optionally compress the rotated file.
216 Also cleans up files older than max_age if specified.
218 References:
219 LOG-003: Automatic Log Rotation and Retention Policies
220 """
221 # Close stream before rollover
222 if self.stream is not None:
223 self.stream.close()
224 self.stream = None # type: ignore[assignment]
226 # Determine the file that just got rotated
227 current_time = int(self.rolloverAt - self.interval)
228 time_tuple = time.gmtime(current_time) if self.utc else time.localtime(current_time)
229 dfn = self.rotation_filename(
230 self.baseFilename + "." + self.suffix % time_tuple[:6] # type: ignore[arg-type]
231 )
233 # Handle the existing rotated file
234 if Path(dfn).exists():
235 Path(dfn).unlink()
237 # Rotate the current file
238 self.rotate(self.baseFilename, dfn)
240 # Compress if enabled
241 if self.compress and Path(dfn).exists():
242 compressed_file = f"{dfn}.gz"
243 with open(dfn, "rb") as f_in, gzip.open(compressed_file, "wb") as f_out:
244 shutil.copyfileobj(f_in, f_out)
245 Path(dfn).unlink()
247 # Clean up old files based on max_age
248 if self._max_age_seconds:
249 self._cleanup_old_files()
251 # Delete old files based on backupCount
252 if self.backupCount > 0:
253 self._delete_old_files()
255 # Set next rollover time
256 new_rollover_at = self.computeRollover(current_time)
257 while new_rollover_at <= self.rolloverAt:
258 new_rollover_at = new_rollover_at + self.interval
259 self.rolloverAt = new_rollover_at
261 # Open new log file
262 if not self.delay:
263 self.stream = self._open()
265 def _cleanup_old_files(self) -> None:
266 """Remove log files older than max_age.
268 References:
269 LOG-003: Automatic Log Rotation and Retention Policies
270 """
271 if not self._max_age_seconds: 271 ↛ 272line 271 didn't jump to line 272 because the condition on line 271 was never true
272 return
274 now = datetime.now().timestamp()
275 base_path = Path(self.baseFilename)
276 log_dir = base_path.parent
277 base_name = base_path.name
279 for log_file in log_dir.glob(f"{base_name}.*"):
280 try:
281 file_age = now - log_file.stat().st_mtime
282 if file_age > self._max_age_seconds: 282 ↛ 279line 282 didn't jump to line 279 because the condition on line 282 was always true
283 log_file.unlink()
284 except OSError:
285 pass # Ignore errors during cleanup
287 def _delete_old_files(self) -> None:
288 """Delete files exceeding backup count.
290 References:
291 LOG-003: Automatic Log Rotation and Retention Policies
292 """
293 base_path = Path(self.baseFilename)
294 log_dir = base_path.parent
295 base_name = base_path.name
297 # Get all rotated files
298 rotated_files = sorted(
299 log_dir.glob(f"{base_name}.*"),
300 key=lambda p: p.stat().st_mtime,
301 reverse=True,
302 )
304 # Remove files beyond backup count
305 for old_file in rotated_files[self.backupCount :]:
306 try:
307 old_file.unlink()
308 except OSError:
309 pass
312class StructuredFormatter(logging.Formatter):
313 """Formatter that produces structured log output (JSON or logfmt).
315 Supports multiple output formats with ISO 8601 timestamps and
316 automatic correlation ID injection.
318 Args:
319 fmt: Output format (json, logfmt, text).
320 timestamp_format: Timestamp format (iso8601, iso8601_local, unix).
322 References:
323 LOG-001: Structured Logging Framework
324 LOG-005: ISO 8601 Timestamps
325 """
327 def __init__(
328 self,
329 fmt: Literal["json", "logfmt", "text"] = "text",
330 timestamp_format: str = "iso8601",
331 ):
332 """Initialize structured formatter.
334 Args:
335 fmt: Output format.
336 timestamp_format: Timestamp format.
337 """
338 super().__init__()
339 self.fmt = fmt
340 self.timestamp_format = timestamp_format
342 def format(self, record: logging.LogRecord) -> str:
343 """Format log record as structured output.
345 Args:
346 record: Log record to format.
348 Returns:
349 Formatted log string.
350 """
351 # Get timestamp
352 timestamp = self._format_timestamp(record.created)
354 # Build structured data
355 data = {
356 "timestamp": timestamp,
357 "level": record.levelname,
358 "module": record.name,
359 "message": record.getMessage(),
360 }
362 # Add correlation ID if present
363 try:
364 from tracekit.core.correlation import get_correlation_id
366 corr_id = get_correlation_id()
367 if corr_id:
368 data["correlation_id"] = corr_id
369 except ImportError:
370 pass # Correlation module not yet loaded
372 # Add extra fields
373 for key, value in record.__dict__.items():
374 if key not in (
375 "name",
376 "msg",
377 "args",
378 "levelname",
379 "levelno",
380 "pathname",
381 "filename",
382 "module",
383 "exc_info",
384 "exc_text",
385 "stack_info",
386 "lineno",
387 "funcName",
388 "created",
389 "msecs",
390 "relativeCreated",
391 "thread",
392 "threadName",
393 "processName",
394 "process",
395 "message",
396 "asctime",
397 ):
398 data[key] = value
400 # Add exception info if present
401 if record.exc_info:
402 data["exception"] = self.formatException(record.exc_info)
404 if self.fmt == "json":
405 return json.dumps(data, default=str)
406 elif self.fmt == "logfmt":
407 return self._format_logfmt(data)
408 else:
409 # Plain text format
410 extra = " ".join(
411 f"{k}={v}"
412 for k, v in data.items()
413 if k not in ("timestamp", "level", "module", "message")
414 )
415 base = f"{timestamp} [{record.levelname}] {record.name}: {record.getMessage()}"
416 if extra: 416 ↛ 418line 416 didn't jump to line 418 because the condition on line 416 was always true
417 base += f" | {extra}"
418 return base
420 def _format_timestamp(self, created: float) -> str:
421 """Format timestamp according to configuration.
423 Args:
424 created: Timestamp as float (seconds since epoch).
426 Returns:
427 Formatted timestamp string.
429 References:
430 LOG-005: ISO 8601 Timestamps
431 """
432 dt = datetime.fromtimestamp(created, tz=UTC)
433 if self.timestamp_format == "iso8601":
434 # ISO 8601 with microseconds: 2025-12-20T15:30:45.123456Z
435 return dt.strftime("%Y-%m-%dT%H:%M:%S.%f") + "Z"
436 elif self.timestamp_format == "iso8601_local":
437 dt_local = datetime.fromtimestamp(created)
438 return dt_local.strftime("%Y-%m-%dT%H:%M:%S.%f")
439 elif self.timestamp_format == "unix": 439 ↛ 442line 439 didn't jump to line 442 because the condition on line 439 was always true
440 return str(created)
441 else:
442 return dt.strftime(self.timestamp_format)
444 def _format_logfmt(self, data: dict) -> str: # type: ignore[type-arg]
445 """Format data as logfmt (key=value pairs).
447 Args:
448 data: Dictionary to format.
450 Returns:
451 Logfmt formatted string.
452 """
453 parts = []
454 for key, value in data.items():
455 if isinstance(value, str) and (" " in value or '"' in value):
456 # Quote values with spaces
457 value_str = f'"{value.replace(chr(34), chr(92) + chr(34))}"'
458 else:
459 value_str = str(value)
460 parts.append(f"{key}={value_str}")
461 return " ".join(parts)
464def configure_logging(
465 *,
466 level: str = "INFO",
467 format: Literal["json", "logfmt", "text"] = "text",
468 timestamp_format: Literal["iso8601", "iso8601_local", "unix"] = "iso8601",
469 handlers: dict[str, dict[str, Any]] | None = None,
470) -> None:
471 """Configure TraceKit logging.
473 Sets up structured logging with the specified format and handlers.
474 Supports both size-based and time-based log rotation with optional
475 gzip compression.
477 Args:
478 level: Default log level (DEBUG, INFO, WARNING, ERROR, CRITICAL).
479 format: Output format (json, logfmt, or text).
480 timestamp_format: Timestamp format (iso8601, iso8601_local, unix).
481 handlers: Dict of handler configurations. Supported handlers:
482 - 'console': Console output to stderr.
483 - level: Log level for this handler.
484 - 'file': File output with rotation.
485 - filename: Path to log file.
486 - level: Log level for this handler.
487 - max_bytes: Max file size before rotation (size-based).
488 - backup_count: Number of rotated files to keep.
489 - compress: Gzip compress rotated files.
490 - when: Time-based rotation ('midnight', 'H', 'D', 'W0'-'W6').
491 - interval: Interval for time-based rotation.
492 - max_age: Max age for log files (e.g., '30d').
494 Example:
495 >>> # Size-based rotation with compression
496 >>> configure_logging(handlers={
497 ... 'file': {'filename': 'app.log', 'max_bytes': 10e6, 'compress': True}
498 ... })
499 >>> # Time-based daily rotation
500 >>> configure_logging(handlers={
501 ... 'file': {'filename': 'app.log', 'when': 'midnight', 'backup_count': 30}
502 ... })
503 >>> # Combined: time-based with max_age cleanup
504 >>> configure_logging(handlers={
505 ... 'file': {'filename': 'app.log', 'when': 'midnight',
506 ... 'compress': True, 'max_age': '30d'}
507 ... })
509 References:
510 LOG-001: Structured Logging Framework
511 LOG-002: Hierarchical Log Levels
512 LOG-003: Automatic Log Rotation and Retention Policies
513 """
514 global _logging_configured, _config # noqa: PLW0602
516 # Update config
517 _config.level = level
518 _config.format = format
519 _config.timestamp_format = timestamp_format
521 # Get or create root logger
522 root_logger = logging.getLogger(_root_logger_name)
523 root_logger.setLevel(getattr(logging, level.upper()))
525 # Remove existing handlers and close them to prevent resource leaks
526 for handler in root_logger.handlers[:]:
527 handler.close()
528 root_logger.removeHandler(handler)
530 # Create formatter
531 formatter = StructuredFormatter(format, timestamp_format)
533 # Add handlers
534 if handlers:
535 for name, config in handlers.items():
536 if name == "console":
537 handler = logging.StreamHandler(sys.stderr)
538 handler.setLevel(getattr(logging, config.get("level", level).upper()))
539 handler.setFormatter(formatter)
540 root_logger.addHandler(handler)
541 elif name == "file": 541 ↛ 535line 541 didn't jump to line 535 because the condition on line 541 was always true
542 filename = config.get("filename", "tracekit.log")
543 handler_level = config.get("level", "DEBUG")
544 backup_count = int(config.get("backup_count", 5))
545 compress = config.get("compress", False)
547 # Check if time-based rotation is requested
548 when = config.get("when")
549 if when:
550 # Time-based rotation (LOG-003)
551 interval = int(config.get("interval", 1))
552 max_age = config.get("max_age")
553 handler = CompressingTimedRotatingFileHandler(
554 filename,
555 when=when,
556 interval=interval,
557 backupCount=backup_count,
558 compress=compress,
559 max_age=max_age,
560 )
561 else:
562 # Size-based rotation
563 max_bytes = int(config.get("max_bytes", 10_000_000))
564 handler = CompressingRotatingFileHandler(
565 filename,
566 maxBytes=max_bytes,
567 backupCount=backup_count,
568 compress=compress,
569 )
571 handler.setLevel(getattr(logging, handler_level.upper()))
572 handler.setFormatter(formatter)
573 root_logger.addHandler(handler)
574 else:
575 # Default: console only
576 handler = logging.StreamHandler(sys.stderr)
577 handler.setLevel(getattr(logging, level.upper()))
578 handler.setFormatter(formatter)
579 root_logger.addHandler(handler)
581 _logging_configured = True
584def get_logger(name: str) -> logging.Logger:
585 """Get a logger with the specified name.
587 Returns a logger under the tracekit namespace with proper
588 configuration.
590 Args:
591 name: Logger name (e.g., 'tracekit.loaders.binary').
593 Returns:
594 Configured logging.Logger instance.
596 Example:
597 >>> logger = get_logger('tracekit.analyzers.spectral')
598 >>> logger.info("Computing FFT", samples=1000000)
600 References:
601 LOG-001: Structured Logging Framework
602 """
603 if not _logging_configured:
604 # Auto-configure with defaults
605 configure_logging()
607 # Ensure name is under tracekit namespace
608 if not name.startswith(_root_logger_name):
609 name = f"{_root_logger_name}.{name}"
611 return logging.getLogger(name)
614def set_log_level(level: str, module: str | None = None) -> None:
615 """Set log level globally or for a specific module.
617 Args:
618 level: Log level (DEBUG, INFO, WARNING, ERROR, CRITICAL).
619 module: Module name to set level for, or None for global.
621 Example:
622 >>> set_log_level('DEBUG') # Global
623 >>> set_log_level('DEBUG', 'tracekit.loaders') # Module-specific
625 References:
626 LOG-002: Hierarchical Log Levels
627 """
628 logger = logging.getLogger(module) if module else logging.getLogger(_root_logger_name)
630 logger.setLevel(getattr(logging, level.upper()))
633class ErrorContextCapture:
634 """Captures rich error context including stack traces and local variables.
636 Provides detailed error information for debugging including:
637 - Full stack trace
638 - Local variables at each frame
639 - Exception chain
640 - System information
642 Example:
643 >>> try:
644 ... risky_operation()
645 ... except Exception as exc:
646 ... context = ErrorContextCapture.from_exception(exc, include_locals=True)
647 ... logger.error("Operation failed", extra=context.to_dict())
649 References:
650 LOG-008: Rich Error Context with Stack Traces
651 CORE-006: Helpful exception messages
652 """
654 def __init__(
655 self,
656 exc_type: type[BaseException],
657 exc_value: BaseException,
658 exc_traceback: Any,
659 additional_context: dict[str, Any] | None = None,
660 ):
661 """Initialize error context capture.
663 Args:
664 exc_type: Exception type.
665 exc_value: Exception instance.
666 exc_traceback: Exception traceback.
667 additional_context: Additional context to include.
668 """
669 self.exc_type = exc_type
670 self.exc_value = exc_value
671 self.exc_traceback = exc_traceback
672 self.additional_context = additional_context or {}
674 @classmethod
675 def from_exception(
676 cls,
677 exc: BaseException,
678 include_locals: bool = True, # noqa: ARG003
679 additional_context: dict[str, Any] | None = None,
680 ) -> ErrorContextCapture:
681 """Create error context from an exception.
683 Args:
684 exc: Exception to capture.
685 include_locals: Whether to include local variables.
686 additional_context: Additional context to include.
688 Returns:
689 ErrorContextCapture instance.
690 """
691 exc_type = type(exc)
692 exc_value = exc
693 exc_traceback = exc.__traceback__
694 return cls(exc_type, exc_value, exc_traceback, additional_context)
696 def to_dict(self, include_locals: bool = True) -> dict[str, Any]:
697 """Convert error context to dictionary.
699 Args:
700 include_locals: Whether to include local variables.
702 Returns:
703 Dictionary with error context.
705 References:
706 LOG-008: Rich Error Context
707 """
708 result: dict[str, Any] = {
709 "exception_type": self.exc_type.__name__,
710 "exception_module": self.exc_type.__module__,
711 "exception_message": str(self.exc_value),
712 "traceback": traceback.format_exception(
713 self.exc_type, self.exc_value, self.exc_traceback
714 ),
715 }
717 # Add exception chain
718 if hasattr(self.exc_value, "__cause__") and self.exc_value.__cause__:
719 result["caused_by"] = {
720 "type": type(self.exc_value.__cause__).__name__,
721 "message": str(self.exc_value.__cause__),
722 }
724 # Add local variables if requested
725 if include_locals and self.exc_traceback:
726 frames = []
727 tb = self.exc_traceback
728 while tb is not None:
729 frame = tb.tb_frame
730 frames.append(
731 {
732 "filename": frame.f_code.co_filename,
733 "function": frame.f_code.co_name,
734 "lineno": tb.tb_lineno,
735 "locals": self._filter_sensitive_data(
736 {k: repr(v) for k, v in frame.f_locals.items()}
737 ),
738 }
739 )
740 tb = tb.tb_next
741 result["frames"] = frames
743 # Add additional context
744 if self.additional_context:
745 result["context"] = self.additional_context
747 return result
749 def _filter_sensitive_data(self, data: dict[str, str]) -> dict[str, str]:
750 """Filter sensitive data from local variables.
752 Args:
753 data: Dictionary of local variables.
755 Returns:
756 Filtered dictionary with sensitive data redacted.
758 References:
759 LOG-008: Rich Error Context (sensitive data filtering)
760 """
761 sensitive_keys = {
762 "password",
763 "passwd",
764 "pwd",
765 "secret",
766 "token",
767 "api_key",
768 "apikey",
769 "auth",
770 "authorization",
771 }
773 filtered = {}
774 for key, value in data.items():
775 key_lower = key.lower()
776 if any(sensitive in key_lower for sensitive in sensitive_keys):
777 filtered[key] = "***REDACTED***"
778 else:
779 filtered[key] = value
780 return filtered
783def log_exception(
784 exc: BaseException,
785 logger: logging.Logger | None = None,
786 context: dict[str, Any] | None = None,
787 include_locals: bool = False,
788) -> None:
789 """Log an exception with full context.
791 Captures rich error context including stack traces, exception chain,
792 and optionally local variables for debugging.
794 Args:
795 exc: The exception to log.
796 logger: Logger to use (default: root tracekit logger).
797 context: Additional context to include.
798 include_locals: Whether to include local variables from stack frames.
800 Example:
801 >>> try:
802 ... result = complex_computation(data)
803 ... except Exception as e:
804 ... log_exception(e, context={"data_size": len(data)})
806 References:
807 LOG-008: Rich Error Context with Stack Traces
808 CORE-006: Helpful exception messages
809 """
810 if logger is None: 810 ↛ 811line 810 didn't jump to line 811 because the condition on line 810 was never true
811 logger = get_logger("tracekit")
813 # Capture error context
814 error_context = ErrorContextCapture.from_exception(
815 exc, include_locals=include_locals, additional_context=context
816 )
818 # Convert to dict and log
819 context_dict = error_context.to_dict(include_locals=include_locals)
821 # Log with exception info
822 logger.exception("Exception occurred", extra=context_dict)
825def format_timestamp(
826 dt: datetime | None = None,
827 format: Literal["iso8601", "iso8601_local", "unix"] = "iso8601",
828) -> str:
829 """Format a timestamp according to LOG-005 requirements.
831 Args:
832 dt: Datetime to format, or None for current time.
833 format: Format to use (iso8601, iso8601_local, unix).
835 Returns:
836 Formatted timestamp string.
838 Raises:
839 ValueError: If format is unknown.
841 Example:
842 >>> ts = format_timestamp()
843 >>> print(ts) # 2025-12-20T15:30:45.123456Z
845 References:
846 LOG-005: ISO 8601 Timestamps
847 """
848 if dt is None:
849 dt = datetime.now(UTC)
851 if format == "iso8601":
852 return dt.strftime("%Y-%m-%dT%H:%M:%S.%f") + "Z"
853 elif format == "iso8601_local":
854 dt_local = dt.astimezone()
855 return dt_local.strftime("%Y-%m-%dT%H:%M:%S.%f")
856 elif format == "unix":
857 return str(dt.timestamp())
858 else:
859 raise ValueError(f"Unknown timestamp format: {format}")
862# Initialize logging on module import (with defaults)
863def _init_logging() -> None:
864 """Initialize logging with environment variable configuration.
866 Reads:
867 TRACEKIT_LOG_LEVEL: Log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
868 TRACEKIT_LOG_FORMAT: Log format (json, logfmt, text)
870 References:
871 LOG-001: Structured Logging Framework
872 LOG-002: Hierarchical Log Levels
873 """
874 level = os.environ.get("TRACEKIT_LOG_LEVEL", "WARNING")
875 log_format = os.environ.get("TRACEKIT_LOG_FORMAT", "text")
877 if log_format in ("json", "logfmt", "text"):
878 configure_logging(level=level, format=log_format) # type: ignore[arg-type]
879 else:
880 configure_logging(level=level)
883# Auto-initialize on import
884_init_logging()
886# Re-export correlation and performance functions for convenience
887# These provide LOG-004 and LOG-006 functionality through this module
888from tracekit.core.correlation import ( # noqa: E402
889 CorrelationContext,
890 generate_correlation_id,
891 get_correlation_id,
892 set_correlation_id,
893 with_correlation_id,
894)
895from tracekit.core.performance import ( # noqa: E402
896 PerformanceContext,
897 PerformanceRecord,
898 clear_performance_data,
899 get_performance_records,
900 get_performance_summary,
901 timed,
902)
904__all__ = [
905 "CompressingRotatingFileHandler",
906 "CompressingTimedRotatingFileHandler",
907 "CorrelationContext",
908 # Error handling (LOG-008)
909 "ErrorContextCapture",
910 "LogConfig",
911 "PerformanceContext",
912 "PerformanceRecord",
913 "StructuredFormatter",
914 "clear_performance_data",
915 # Logging configuration (LOG-001, LOG-002, LOG-003)
916 "configure_logging",
917 # Timestamps (LOG-005)
918 "format_timestamp",
919 "generate_correlation_id",
920 # Correlation ID (LOG-004)
921 "get_correlation_id",
922 "get_logger",
923 "get_performance_records",
924 "get_performance_summary",
925 "log_exception",
926 "set_correlation_id",
927 "set_log_level",
928 # Performance timing (LOG-006)
929 "timed",
930 "with_correlation_id",
931]