Coverage for src / tracekit / analyzers / jitter / decomposition.py: 92%
232 statements
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-11 23:04 +0000
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-11 23:04 +0000
1"""Jitter decomposition into random and deterministic components.
3This module implements IEEE 2414-2020 compliant jitter decomposition
4using the dual-Dirac model and spectral analysis techniques.
7Example:
8 >>> from tracekit.analyzers.jitter.decomposition import extract_rj, extract_dj
9 >>> rj_result = extract_rj(tie_data)
10 >>> print(f"RJ RMS: {rj_result.rj_rms * 1e12:.2f} ps")
12References:
13 IEEE 2414-2020: Standard for Jitter and Phase Noise
14 Dual-Dirac Model: JEDEC JESD65C
15"""
17from __future__ import annotations
19from dataclasses import dataclass
20from typing import TYPE_CHECKING, Literal
22import numpy as np
23from scipy import stats
25from tracekit.core.exceptions import InsufficientDataError
27if TYPE_CHECKING:
28 from numpy.typing import NDArray
31@dataclass
32class RandomJitterResult:
33 """Result of random jitter extraction.
35 Attributes:
36 rj_rms: Random jitter RMS value in seconds.
37 method: Method used for extraction ("tail_fit" or "q_scale").
38 confidence: Confidence score (0.0 to 1.0).
39 sigma: Gaussian sigma parameter in seconds.
40 mu: Gaussian mean offset in seconds.
41 n_samples: Number of TIE samples used.
42 """
44 rj_rms: float
45 method: str
46 confidence: float
47 sigma: float
48 mu: float
49 n_samples: int
52@dataclass
53class DeterministicJitterResult:
54 """Result of deterministic jitter extraction.
56 Attributes:
57 dj_pp: Peak-to-peak deterministic jitter in seconds.
58 dj_delta: Dual-Dirac delta (half-width) in seconds.
59 method: Method used for extraction.
60 confidence: Confidence score (0.0 to 1.0).
61 histogram: Histogram counts for analysis.
62 bin_centers: Bin centers for histogram.
63 """
65 dj_pp: float
66 dj_delta: float
67 method: str
68 confidence: float
69 histogram: NDArray[np.float64] | None = None
70 bin_centers: NDArray[np.float64] | None = None
73@dataclass
74class PeriodicJitterResult:
75 """Result of periodic jitter extraction.
77 Attributes:
78 components: List of (frequency_hz, amplitude_seconds) tuples.
79 pj_pp: Total periodic jitter peak-to-peak in seconds.
80 dominant_frequency: Most significant PJ frequency in Hz.
81 dominant_amplitude: Amplitude at dominant frequency in seconds.
82 """
84 components: list[tuple[float, float]]
85 pj_pp: float
86 dominant_frequency: float | None
87 dominant_amplitude: float | None
90@dataclass
91class DataDependentJitterResult:
92 """Result of data-dependent jitter extraction.
94 Attributes:
95 ddj_pp: Peak-to-peak DDJ in seconds.
96 pattern_histogram: Jitter vs bit pattern histogram.
97 pattern_length: Length of bit patterns analyzed.
98 isi_coefficient: ISI correlation coefficient.
99 """
101 ddj_pp: float
102 pattern_histogram: dict[str, float]
103 pattern_length: int
104 isi_coefficient: float
107@dataclass
108class JitterDecomposition:
109 """Complete jitter decomposition result.
111 Attributes:
112 rj: Random jitter component.
113 dj: Deterministic jitter component.
114 pj: Periodic jitter component (optional).
115 ddj: Data-dependent jitter component (optional).
116 tj_pp: Total jitter peak-to-peak at measured BER.
117 ber_measured: BER at which TJ was measured.
118 """
120 rj: RandomJitterResult
121 dj: DeterministicJitterResult
122 pj: PeriodicJitterResult | None = None
123 ddj: DataDependentJitterResult | None = None
124 tj_pp: float | None = None
125 ber_measured: float | None = None
128def extract_rj(
129 tie_data: NDArray[np.float64],
130 *,
131 method: Literal["tail_fit", "q_scale", "auto"] = "auto",
132 min_samples: int = 1000,
133) -> RandomJitterResult:
134 """Extract random jitter component from TIE data.
136 Uses the dual-Dirac model to separate random (Gaussian) jitter
137 from the total jitter distribution. RJ is the unbounded random
138 component typically caused by thermal noise.
140 Args:
141 tie_data: Time Interval Error data array in seconds.
142 method: Extraction method:
143 - "tail_fit": Fit Gaussian to distribution tails
144 - "q_scale": Q-scale (probabilistic) analysis
145 - "auto": Automatically select best method
146 min_samples: Minimum samples required for analysis.
148 Returns:
149 RandomJitterResult with RJ_rms and analysis details.
151 Raises:
152 InsufficientDataError: If tie_data has fewer than min_samples.
153 ValueError: If method is not recognized.
155 Example:
156 >>> rj = extract_rj(tie_data)
157 >>> print(f"RJ: {rj.rj_rms * 1e12:.2f} ps RMS")
159 References:
160 IEEE 2414-2020 Section 6.2
161 """
162 if len(tie_data) < min_samples:
163 raise InsufficientDataError(
164 f"RJ extraction requires at least {min_samples} samples",
165 required=min_samples,
166 available=len(tie_data),
167 analysis_type="random_jitter_extraction",
168 )
170 # Remove NaN values
171 valid_data = tie_data[~np.isnan(tie_data)]
173 if len(valid_data) < min_samples:
174 raise InsufficientDataError(
175 f"RJ extraction requires at least {min_samples} valid samples",
176 required=min_samples,
177 available=len(valid_data),
178 analysis_type="random_jitter_extraction",
179 )
181 # Select method
182 if method == "auto":
183 # Use Q-scale for large datasets, tail_fit for smaller
184 method = "q_scale" if len(valid_data) > 10000 else "tail_fit"
186 if method == "tail_fit":
187 return _extract_rj_tail_fit(valid_data)
188 elif method == "q_scale":
189 return _extract_rj_q_scale(valid_data)
190 else:
191 raise ValueError(f"Unknown method: {method}")
194def _extract_rj_tail_fit(tie_data: NDArray[np.float64]) -> RandomJitterResult:
195 """Extract RJ using Gaussian tail fitting.
197 Fits a Gaussian distribution to the outer tails of the TIE histogram
198 where deterministic jitter has minimal effect.
200 Args:
201 tie_data: Time Interval Error data array in seconds.
203 Returns:
204 RandomJitterResult with RJ_rms and analysis details.
205 """
206 # For pure Gaussian data, the tails should follow a Gaussian perfectly.
207 # The key insight is to use Q-Q plot analysis on the extreme tails.
209 sorted_data = np.sort(tie_data)
210 n = len(sorted_data)
212 # Use percentiles to estimate Gaussian parameters
213 # For a Gaussian: P16 = μ - σ, P50 = μ, P84 = μ + σ # noqa: RUF003
214 p16 = np.percentile(sorted_data, 16)
215 p50 = np.percentile(sorted_data, 50)
216 p84 = np.percentile(sorted_data, 84)
218 # Estimate sigma from the 68% confidence interval
219 sigma_estimate = (p84 - p16) / 2
220 mu_estimate = p50
222 # Refine estimate using tail data (beyond ±2 sigma)
223 # For pure Gaussian, fit Q-Q plot in the tails
224 tail_fraction = 0.025 # Use outer 2.5% on each side (beyond ~2 sigma)
225 lower_tail_idx = int(n * tail_fraction)
226 upper_tail_idx = int(n * (1 - tail_fraction))
228 # Get tail indices
229 tail_indices = np.concatenate([np.arange(0, lower_tail_idx), np.arange(upper_tail_idx, n)])
231 if len(tail_indices) >= 10: 231 ↛ 256line 231 didn't jump to line 256 because the condition on line 231 was always true
232 # Q-Q plot analysis on tails
233 tail_data = sorted_data[tail_indices]
234 tail_probabilities = np.array(
235 [i / (n - 1) if i < lower_tail_idx else (i + 1) / (n - 1) for i in tail_indices]
236 )
238 # Get theoretical quantiles
239 theoretical_quantiles = stats.norm.ppf(tail_probabilities)
240 valid_mask = np.isfinite(theoretical_quantiles)
242 if np.sum(valid_mask) >= 10: 242 ↛ 252line 242 didn't jump to line 252 because the condition on line 242 was always true
243 # Linear regression: data = sigma * theoretical + mu
244 slope, intercept, r_value, _, _ = stats.linregress(
245 theoretical_quantiles[valid_mask], tail_data[valid_mask]
246 )
248 sigma = abs(slope)
249 mu = intercept
250 confidence = max(0.0, min(1.0, r_value**2))
251 else:
252 sigma = sigma_estimate
253 mu = mu_estimate
254 confidence = 0.6
255 else:
256 sigma = sigma_estimate
257 mu = mu_estimate
258 confidence = 0.6
260 return RandomJitterResult(
261 rj_rms=sigma,
262 method="tail_fit",
263 confidence=confidence,
264 sigma=sigma,
265 mu=mu,
266 n_samples=len(tie_data),
267 )
270def _extract_rj_q_scale(tie_data: NDArray[np.float64]) -> RandomJitterResult:
271 """Extract RJ using Q-scale (probability plot) analysis.
273 Uses quantile-quantile analysis to separate Gaussian (random)
274 from non-Gaussian (deterministic) components.
276 Args:
277 tie_data: Time Interval Error data array in seconds.
279 Returns:
280 RandomJitterResult with RJ_rms and analysis details.
281 """
282 n = len(tie_data)
283 sorted_data = np.sort(tie_data)
285 # Calculate theoretical Gaussian quantiles
286 probabilities = (np.arange(1, n + 1) - 0.5) / n
287 theoretical_quantiles = stats.norm.ppf(probabilities)
289 # Remove infinities from edges
290 valid_mask = np.isfinite(theoretical_quantiles)
291 theoretical_quantiles = theoretical_quantiles[valid_mask]
292 sorted_data = sorted_data[valid_mask]
294 # Focus on the linear (Gaussian) region in the tails
295 # Use outer 30% of data for slope estimation
296 n_valid = len(sorted_data)
297 tail_frac = 0.15
299 lower_idx = int(n_valid * tail_frac)
300 upper_idx = int(n_valid * (1 - tail_frac))
302 # Combine tail indices
303 tail_indices = np.concatenate([np.arange(0, lower_idx), np.arange(upper_idx, n_valid)])
305 if len(tail_indices) < 10: 305 ↛ 307line 305 didn't jump to line 307 because the condition on line 305 was never true
306 # Fall back to simple estimation
307 sigma = np.std(sorted_data)
308 mu = np.mean(sorted_data)
309 confidence = 0.3
310 else:
311 # Linear regression on Q-Q tail data
312 x_tail = theoretical_quantiles[tail_indices]
313 y_tail = sorted_data[tail_indices]
315 slope, intercept, r_value, _p_value, _std_err = stats.linregress(x_tail, y_tail)
317 # Slope of Q-Q plot is sigma, intercept is mu
318 sigma = abs(slope)
319 mu = intercept
320 confidence = min(1.0, max(0.0, r_value**2))
322 return RandomJitterResult(
323 rj_rms=sigma,
324 method="q_scale",
325 confidence=confidence,
326 sigma=sigma,
327 mu=mu,
328 n_samples=n,
329 )
332def extract_dj(
333 tie_data: NDArray[np.float64],
334 rj_result: RandomJitterResult | None = None,
335 *,
336 min_samples: int = 1000,
337) -> DeterministicJitterResult:
338 """Extract deterministic jitter component from TIE data.
340 DJ is the bounded, repeatable component of jitter. It is calculated
341 as TJ - RJ contribution using the dual-Dirac model.
343 Args:
344 tie_data: Time Interval Error data array in seconds.
345 rj_result: Pre-computed RJ result (computed if None).
346 min_samples: Minimum samples required.
348 Returns:
349 DeterministicJitterResult with DJ_pp value.
351 Raises:
352 InsufficientDataError: If insufficient samples.
354 Example:
355 >>> dj = extract_dj(tie_data)
356 >>> print(f"DJ: {dj.dj_pp * 1e12:.2f} ps peak-to-peak")
358 References:
359 IEEE 2414-2020 Section 6.3
360 """
361 if len(tie_data) < min_samples:
362 raise InsufficientDataError(
363 f"DJ extraction requires at least {min_samples} samples",
364 required=min_samples,
365 available=len(tie_data),
366 analysis_type="deterministic_jitter_extraction",
367 )
369 valid_data = tie_data[~np.isnan(tie_data)]
371 # Get RJ if not provided - use tail fitting for better RJ isolation
372 if rj_result is None:
373 rj_result = extract_rj(valid_data, method="tail_fit", min_samples=min_samples)
375 rj_rms = rj_result.rj_rms
377 # Create histogram for analysis
378 n_bins = min(100, len(valid_data) // 50)
379 hist, bin_edges = np.histogram(valid_data, bins=n_bins, density=False)
380 bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
382 # DJ extraction using dual-Dirac model:
383 # The dual-Dirac model assumes DJ creates two impulses at ±δ,
384 # each convolved with Gaussian RJ.
385 # Total distribution is: 0.5*N(μ-δ, σ) + 0.5*N(μ+δ, σ) # noqa: RUF003
387 sorted_data = np.sort(valid_data)
388 n = len(sorted_data)
390 # Try to find DJ by detecting bimodal peaks in histogram
391 from scipy.ndimage import gaussian_filter1d
392 from scipy.signal import find_peaks
394 dj_pp = 0.0
395 peak_separation_dj = None
397 # Smooth histogram for peak detection
398 if len(hist) >= 5: 398 ↛ 415line 398 didn't jump to line 415 because the condition on line 398 was always true
399 hist_smooth = gaussian_filter1d(hist.astype(float), sigma=2)
400 peaks, properties = find_peaks(hist_smooth, prominence=np.max(hist_smooth) * 0.1)
402 # If we find 2 clear peaks, DJ is their separation
403 if len(peaks) >= 2:
404 # Sort peaks by prominence and take top 2
405 prominences = properties.get("prominences", np.ones(len(peaks)))
406 sorted_peak_idx = np.argsort(prominences)[::-1][:2]
407 top_peaks = peaks[sorted_peak_idx]
408 top_peaks = np.sort(top_peaks)
410 # Peak separation in the histogram
411 peak_positions = bin_centers[top_peaks]
412 peak_separation_dj = abs(peak_positions[1] - peak_positions[0])
414 # If we found peaks, use that as DJ
415 if peak_separation_dj is not None and peak_separation_dj > 2 * rj_rms:
416 dj_pp = peak_separation_dj
417 else:
418 # Fallback: use quantile-based method
419 # At BER = 1e-4 (Q ≈ 3.72), we're well into the tails
420 lower_idx = max(0, int(n * 0.0001))
421 upper_idx = min(n - 1, int(n * 0.9999))
423 tj_at_ber = sorted_data[upper_idx] - sorted_data[lower_idx]
425 # For dual-Dirac + RJ: TJ = 2*Q*RJ + DJ
426 # Using Q for BER = 1e-4
427 q_factor = 3.72
428 rj_contribution = 2 * q_factor * rj_rms
430 # DJ is what remains after removing RJ contribution
431 dj_pp = max(0.0, tj_at_ber - rj_contribution)
433 # Delta is half the DJ peak-to-peak (dual-Dirac separation)
434 dj_delta = dj_pp / 2
436 # Confidence based on whether we found clear DJ
437 if peak_separation_dj is not None:
438 # Found bimodal peaks
439 confidence = 0.9 if len(peaks) == 2 else 0.7
440 elif dj_pp > 2 * rj_rms: 440 ↛ 442line 440 didn't jump to line 442 because the condition on line 440 was never true
441 # Significant DJ from quantile method
442 confidence = 0.5
443 else:
444 # Little or no DJ detected
445 confidence = 0.2
447 return DeterministicJitterResult(
448 dj_pp=dj_pp,
449 dj_delta=dj_delta,
450 method="dual_dirac",
451 confidence=confidence,
452 histogram=hist.astype(np.float64),
453 bin_centers=bin_centers,
454 )
457def extract_pj(
458 tie_data: NDArray[np.float64],
459 sample_rate: float,
460 *,
461 min_frequency: float = 1.0,
462 max_frequency: float | None = None,
463 n_components: int = 5,
464) -> PeriodicJitterResult:
465 """Extract periodic jitter components via spectral analysis.
467 Uses FFT of TIE data to identify sinusoidal jitter components,
468 typically caused by power supply noise or EMI.
470 Args:
471 tie_data: Time Interval Error data array in seconds.
472 sample_rate: Sample rate of edge events (edges per second).
473 min_frequency: Minimum PJ frequency to detect (Hz).
474 max_frequency: Maximum PJ frequency (default: Nyquist).
475 n_components: Number of periodic components to extract.
477 Returns:
478 PeriodicJitterResult with frequency/amplitude pairs.
480 Example:
481 >>> pj = extract_pj(tie_data, sample_rate=1e6)
482 >>> for freq, amp in pj.components:
483 ... print(f"{freq/1e3:.1f} kHz: {amp*1e12:.2f} ps")
485 References:
486 IEEE 2414-2020 Section 6.4
487 """
488 valid_data = tie_data[~np.isnan(tie_data)]
489 n = len(valid_data)
491 if n < 32:
492 return PeriodicJitterResult(
493 components=[],
494 pj_pp=0.0,
495 dominant_frequency=None,
496 dominant_amplitude=None,
497 )
499 # Remove DC offset (mean)
500 data_centered = valid_data - np.mean(valid_data)
502 # Apply window to reduce spectral leakage
503 window = np.hanning(n)
504 data_windowed = data_centered * window
506 # Compute FFT
507 nfft = int(2 ** np.ceil(np.log2(n)))
508 spectrum = np.fft.rfft(data_windowed, n=nfft)
509 frequencies = np.fft.rfftfreq(nfft, d=1.0 / sample_rate)
510 magnitudes = np.abs(spectrum) * 2 / n # Scale for amplitude
512 # Set frequency range
513 if max_frequency is None:
514 max_frequency = sample_rate / 2
516 # Find peaks in valid frequency range
517 freq_mask = (frequencies >= min_frequency) & (frequencies <= max_frequency)
518 valid_freqs = frequencies[freq_mask]
519 valid_mags = magnitudes[freq_mask]
521 if len(valid_mags) < 3:
522 return PeriodicJitterResult(
523 components=[],
524 pj_pp=0.0,
525 dominant_frequency=None,
526 dominant_amplitude=None,
527 )
529 # Find peaks (local maxima)
530 from scipy.signal import find_peaks
532 # Threshold: peaks must be 3x the median magnitude
533 threshold = 3 * np.median(valid_mags)
534 peak_indices, _properties = find_peaks(
535 valid_mags,
536 height=threshold,
537 distance=3, # Minimum separation between peaks
538 )
540 # Sort by amplitude and take top n_components
541 if len(peak_indices) > 0: 541 ↛ 554line 541 didn't jump to line 554 because the condition on line 541 was always true
542 peak_heights = valid_mags[peak_indices]
543 sorted_indices = np.argsort(peak_heights)[::-1][:n_components]
544 top_peaks = peak_indices[sorted_indices]
546 components = [(float(valid_freqs[idx]), float(valid_mags[idx])) for idx in top_peaks]
548 # Calculate total PJ as sum of component amplitudes (peak-to-peak)
549 pj_pp = 2 * sum(amp for _, amp in components)
551 dominant_frequency = components[0][0] if components else None
552 dominant_amplitude = components[0][1] if components else None
553 else:
554 components = []
555 pj_pp = 0.0
556 dominant_frequency = None
557 dominant_amplitude = None
559 return PeriodicJitterResult(
560 components=components,
561 pj_pp=pj_pp,
562 dominant_frequency=dominant_frequency,
563 dominant_amplitude=dominant_amplitude,
564 )
567def extract_ddj(
568 tie_data: NDArray[np.float64],
569 bit_pattern: NDArray[np.int_] | None = None,
570 *,
571 pattern_length: int = 3,
572) -> DataDependentJitterResult:
573 """Extract data-dependent jitter caused by ISI effects.
575 Analyzes correlation between jitter and preceding bit patterns
576 to identify inter-symbol interference (ISI) induced timing variations.
578 Args:
579 tie_data: Time Interval Error data array in seconds.
580 bit_pattern: Associated bit pattern for each TIE sample.
581 pattern_length: Number of preceding bits to correlate.
583 Returns:
584 DataDependentJitterResult with pattern-correlated jitter.
586 Raises:
587 ValueError: If bit_pattern length does not match tie_data length.
589 Example:
590 >>> ddj = extract_ddj(tie_data, bit_pattern=bits, pattern_length=3)
591 >>> print(f"DDJ: {ddj.ddj_pp * 1e12:.2f} ps")
593 References:
594 IEEE 2414-2020 Section 6.5
595 """
596 valid_data = tie_data[~np.isnan(tie_data)]
597 n = len(valid_data)
599 if bit_pattern is None:
600 # Without bit pattern data, estimate from TIE distribution
601 # Use alternating pattern assumption
602 pattern_histogram: dict[str, float] = {}
604 # Simple estimation: look for bimodality in TIE
605 median = np.median(valid_data)
606 above_median = valid_data > median
607 below_median = ~above_median
609 pattern_histogram["above_median"] = float(np.mean(valid_data[above_median]))
610 pattern_histogram["below_median"] = float(np.mean(valid_data[below_median]))
612 ddj_pp = abs(pattern_histogram["above_median"] - pattern_histogram["below_median"])
614 return DataDependentJitterResult(
615 ddj_pp=ddj_pp,
616 pattern_histogram=pattern_histogram,
617 pattern_length=pattern_length,
618 isi_coefficient=0.0, # Unknown without pattern
619 )
621 # With bit pattern available
622 if len(bit_pattern) != n:
623 raise ValueError("bit_pattern length must match tie_data length")
625 pattern_histogram = {}
626 2**pattern_length
628 # Create pattern strings and accumulate TIE values
629 for i in range(pattern_length - 1, n):
630 pattern_bits = bit_pattern[i - pattern_length + 1 : i + 1]
631 pattern_str = "".join(str(int(b)) for b in pattern_bits)
633 if pattern_str not in pattern_histogram:
634 pattern_histogram[pattern_str] = [] # type: ignore[assignment]
635 pattern_histogram[pattern_str].append(valid_data[i]) # type: ignore[attr-defined]
637 # Calculate mean TIE for each pattern
638 pattern_means: dict[str, float] = {}
639 for pattern, values in pattern_histogram.items():
640 if len(values) > 0: # type: ignore[arg-type] 640 ↛ 639line 640 didn't jump to line 639 because the condition on line 640 was always true
641 pattern_means[pattern] = float(np.mean(values))
643 # DDJ is the range of pattern-dependent means
644 if len(pattern_means) > 1:
645 mean_values = list(pattern_means.values())
646 ddj_pp = max(mean_values) - min(mean_values)
647 else:
648 ddj_pp = 0.0
650 # Calculate ISI correlation coefficient
651 # Correlation between previous bit and current TIE
652 if n > 1: 652 ↛ 658line 652 didn't jump to line 658 because the condition on line 652 was always true
653 prev_bits = bit_pattern[:-1].astype(float)
654 curr_tie = valid_data[1:]
655 correlation = np.corrcoef(prev_bits, curr_tie)[0, 1]
656 isi_coefficient = correlation if np.isfinite(correlation) else 0.0
657 else:
658 isi_coefficient = 0.0
660 return DataDependentJitterResult(
661 ddj_pp=ddj_pp,
662 pattern_histogram=pattern_means,
663 pattern_length=pattern_length,
664 isi_coefficient=isi_coefficient,
665 )
668def decompose_jitter(
669 tie_data: NDArray[np.float64],
670 *,
671 edge_rate: float | None = None,
672 include_pj: bool = True,
673 include_ddj: bool = False,
674 bit_pattern: NDArray[np.int_] | None = None,
675) -> JitterDecomposition:
676 """Perform complete jitter decomposition.
678 Decomposes total jitter into its constituent components:
679 - Random Jitter (RJ): Unbounded Gaussian component
680 - Deterministic Jitter (DJ): Bounded, repeatable component
681 - Periodic Jitter (PJ): Sinusoidal components (optional)
682 - Data-Dependent Jitter (DDJ): ISI-related component (optional)
684 Args:
685 tie_data: Time Interval Error data array in seconds.
686 edge_rate: Rate of edges in Hz (required for PJ analysis).
687 include_pj: Include periodic jitter analysis.
688 include_ddj: Include data-dependent jitter analysis.
689 bit_pattern: Bit pattern for DDJ analysis.
691 Returns:
692 JitterDecomposition with all component results.
694 Example:
695 >>> decomp = decompose_jitter(tie_data, edge_rate=1e9)
696 >>> print(f"RJ: {decomp.rj.rj_rms * 1e12:.2f} ps")
697 >>> print(f"DJ: {decomp.dj.dj_pp * 1e12:.2f} ps")
699 References:
700 IEEE 2414-2020 Section 6
701 """
702 # Extract RJ first
703 rj_result = extract_rj(tie_data)
705 # Extract DJ using RJ result
706 dj_result = extract_dj(tie_data, rj_result)
708 # Optional: Extract PJ
709 pj_result = None
710 if include_pj and edge_rate is not None:
711 pj_result = extract_pj(tie_data, edge_rate)
713 # Optional: Extract DDJ
714 ddj_result = None
715 if include_ddj:
716 ddj_result = extract_ddj(tie_data, bit_pattern)
718 return JitterDecomposition(
719 rj=rj_result,
720 dj=dj_result,
721 pj=pj_result,
722 ddj=ddj_result,
723 )
726__all__ = [
727 "DataDependentJitterResult",
728 "DeterministicJitterResult",
729 "JitterDecomposition",
730 "PeriodicJitterResult",
731 "RandomJitterResult",
732 "decompose_jitter",
733 "extract_ddj",
734 "extract_dj",
735 "extract_pj",
736 "extract_rj",
737]