Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1""" 

2Histogram-related functions 

3""" 

4import contextlib 

5import functools 

6import operator 

7import warnings 

8 

9import numpy as np 

10from numpy.core import overrides 

11 

12__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges'] 

13 

14array_function_dispatch = functools.partial( 

15 overrides.array_function_dispatch, module='numpy') 

16 

17# range is a keyword argument to many functions, so save the builtin so they can 

18# use it. 

19_range = range 

20 

21 

22def _ptp(x): 

23 """Peak-to-peak value of x. 

24 

25 This implementation avoids the problem of signed integer arrays having a 

26 peak-to-peak value that cannot be represented with the array's data type. 

27 This function returns an unsigned value for signed integer arrays. 

28 """ 

29 return _unsigned_subtract(x.max(), x.min()) 

30 

31 

32def _hist_bin_sqrt(x, range): 

33 """ 

34 Square root histogram bin estimator. 

35 

36 Bin width is inversely proportional to the data size. Used by many 

37 programs for its simplicity. 

38 

39 Parameters 

40 ---------- 

41 x : array_like 

42 Input data that is to be histogrammed, trimmed to range. May not 

43 be empty. 

44 

45 Returns 

46 ------- 

47 h : An estimate of the optimal bin width for the given data. 

48 """ 

49 del range # unused 

50 return _ptp(x) / np.sqrt(x.size) 

51 

52 

53def _hist_bin_sturges(x, range): 

54 """ 

55 Sturges histogram bin estimator. 

56 

57 A very simplistic estimator based on the assumption of normality of 

58 the data. This estimator has poor performance for non-normal data, 

59 which becomes especially obvious for large data sets. The estimate 

60 depends only on size of the data. 

61 

62 Parameters 

63 ---------- 

64 x : array_like 

65 Input data that is to be histogrammed, trimmed to range. May not 

66 be empty. 

67 

68 Returns 

69 ------- 

70 h : An estimate of the optimal bin width for the given data. 

71 """ 

72 del range # unused 

73 return _ptp(x) / (np.log2(x.size) + 1.0) 

74 

75 

76def _hist_bin_rice(x, range): 

77 """ 

78 Rice histogram bin estimator. 

79 

80 Another simple estimator with no normality assumption. It has better 

81 performance for large data than Sturges, but tends to overestimate 

82 the number of bins. The number of bins is proportional to the cube 

83 root of data size (asymptotically optimal). The estimate depends 

84 only on size of the data. 

85 

86 Parameters 

87 ---------- 

88 x : array_like 

89 Input data that is to be histogrammed, trimmed to range. May not 

90 be empty. 

91 

92 Returns 

93 ------- 

94 h : An estimate of the optimal bin width for the given data. 

95 """ 

96 del range # unused 

97 return _ptp(x) / (2.0 * x.size ** (1.0 / 3)) 

98 

99 

100def _hist_bin_scott(x, range): 

101 """ 

102 Scott histogram bin estimator. 

103 

104 The binwidth is proportional to the standard deviation of the data 

105 and inversely proportional to the cube root of data size 

106 (asymptotically optimal). 

107 

108 Parameters 

109 ---------- 

110 x : array_like 

111 Input data that is to be histogrammed, trimmed to range. May not 

112 be empty. 

113 

114 Returns 

115 ------- 

116 h : An estimate of the optimal bin width for the given data. 

117 """ 

118 del range # unused 

119 return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x) 

120 

121 

122def _hist_bin_stone(x, range): 

123 """ 

124 Histogram bin estimator based on minimizing the estimated integrated squared error (ISE). 

125 

126 The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution. 

127 The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule. 

128 https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule 

129 

130 This paper by Stone appears to be the origination of this rule. 

131 http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf 

132 

133 Parameters 

134 ---------- 

135 x : array_like 

136 Input data that is to be histogrammed, trimmed to range. May not 

137 be empty. 

138 range : (float, float) 

139 The lower and upper range of the bins. 

140 

141 Returns 

142 ------- 

143 h : An estimate of the optimal bin width for the given data. 

144 """ 

145 

146 n = x.size 

147 ptp_x = _ptp(x) 

148 if n <= 1 or ptp_x == 0: 

149 return 0 

150 

151 def jhat(nbins): 

152 hh = ptp_x / nbins 

153 p_k = np.histogram(x, bins=nbins, range=range)[0] / n 

154 return (2 - (n + 1) * p_k.dot(p_k)) / hh 

155 

156 nbins_upper_bound = max(100, int(np.sqrt(n))) 

157 nbins = min(_range(1, nbins_upper_bound + 1), key=jhat) 

158 if nbins == nbins_upper_bound: 

159 warnings.warn("The number of bins estimated may be suboptimal.", 

160 RuntimeWarning, stacklevel=3) 

161 return ptp_x / nbins 

162 

163 

164def _hist_bin_doane(x, range): 

165 """ 

166 Doane's histogram bin estimator. 

167 

168 Improved version of Sturges' formula which works better for 

169 non-normal data. See 

170 stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning 

171 

172 Parameters 

173 ---------- 

174 x : array_like 

175 Input data that is to be histogrammed, trimmed to range. May not 

176 be empty. 

177 

178 Returns 

179 ------- 

180 h : An estimate of the optimal bin width for the given data. 

181 """ 

182 del range # unused 

183 if x.size > 2: 

184 sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) 

185 sigma = np.std(x) 

186 if sigma > 0.0: 

187 # These three operations add up to 

188 # g1 = np.mean(((x - np.mean(x)) / sigma)**3) 

189 # but use only one temp array instead of three 

190 temp = x - np.mean(x) 

191 np.true_divide(temp, sigma, temp) 

192 np.power(temp, 3, temp) 

193 g1 = np.mean(temp) 

194 return _ptp(x) / (1.0 + np.log2(x.size) + 

195 np.log2(1.0 + np.absolute(g1) / sg1)) 

196 return 0.0 

197 

198 

199def _hist_bin_fd(x, range): 

200 """ 

201 The Freedman-Diaconis histogram bin estimator. 

202 

203 The Freedman-Diaconis rule uses interquartile range (IQR) to 

204 estimate binwidth. It is considered a variation of the Scott rule 

205 with more robustness as the IQR is less affected by outliers than 

206 the standard deviation. However, the IQR depends on fewer points 

207 than the standard deviation, so it is less accurate, especially for 

208 long tailed distributions. 

209 

210 If the IQR is 0, this function returns 0 for the bin width. 

211 Binwidth is inversely proportional to the cube root of data size 

212 (asymptotically optimal). 

213 

214 Parameters 

215 ---------- 

216 x : array_like 

217 Input data that is to be histogrammed, trimmed to range. May not 

218 be empty. 

219 

220 Returns 

221 ------- 

222 h : An estimate of the optimal bin width for the given data. 

223 """ 

224 del range # unused 

225 iqr = np.subtract(*np.percentile(x, [75, 25])) 

226 return 2.0 * iqr * x.size ** (-1.0 / 3.0) 

227 

228 

229def _hist_bin_auto(x, range): 

230 """ 

231 Histogram bin estimator that uses the minimum width of the 

232 Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero. 

233 If the bin width from the FD estimator is 0, the Sturges estimator is used. 

234 

235 The FD estimator is usually the most robust method, but its width 

236 estimate tends to be too large for small `x` and bad for data with limited 

237 variance. The Sturges estimator is quite good for small (<1000) datasets 

238 and is the default in the R language. This method gives good off-the-shelf 

239 behaviour. 

240 

241 .. versionchanged:: 1.15.0 

242 If there is limited variance the IQR can be 0, which results in the 

243 FD bin width being 0 too. This is not a valid bin width, so 

244 ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal. 

245 If the IQR is 0, it's unlikely any variance-based estimators will be of 

246 use, so we revert to the Sturges estimator, which only uses the size of the 

247 dataset in its calculation. 

248 

249 Parameters 

250 ---------- 

251 x : array_like 

252 Input data that is to be histogrammed, trimmed to range. May not 

253 be empty. 

254 

255 Returns 

256 ------- 

257 h : An estimate of the optimal bin width for the given data. 

258 

259 See Also 

260 -------- 

261 _hist_bin_fd, _hist_bin_sturges 

262 """ 

263 fd_bw = _hist_bin_fd(x, range) 

264 sturges_bw = _hist_bin_sturges(x, range) 

265 del range # unused 

266 if fd_bw: 

267 return min(fd_bw, sturges_bw) 

268 else: 

269 # limited variance, so we return a len dependent bw estimator 

270 return sturges_bw 

271 

272# Private dict initialized at module load time 

273_hist_bin_selectors = {'stone': _hist_bin_stone, 

274 'auto': _hist_bin_auto, 

275 'doane': _hist_bin_doane, 

276 'fd': _hist_bin_fd, 

277 'rice': _hist_bin_rice, 

278 'scott': _hist_bin_scott, 

279 'sqrt': _hist_bin_sqrt, 

280 'sturges': _hist_bin_sturges} 

281 

282 

283def _ravel_and_check_weights(a, weights): 

284 """ Check a and weights have matching shapes, and ravel both """ 

285 a = np.asarray(a) 

286 

287 # Ensure that the array is a "subtractable" dtype 

288 if a.dtype == np.bool_: 

289 warnings.warn("Converting input from {} to {} for compatibility." 

290 .format(a.dtype, np.uint8), 

291 RuntimeWarning, stacklevel=3) 

292 a = a.astype(np.uint8) 

293 

294 if weights is not None: 

295 weights = np.asarray(weights) 

296 if weights.shape != a.shape: 

297 raise ValueError( 

298 'weights should have the same shape as a.') 

299 weights = weights.ravel() 

300 a = a.ravel() 

301 return a, weights 

302 

303 

304def _get_outer_edges(a, range): 

305 """ 

306 Determine the outer bin edges to use, from either the data or the range 

307 argument 

308 """ 

309 if range is not None: 

310 first_edge, last_edge = range 

311 if first_edge > last_edge: 

312 raise ValueError( 

313 'max must be larger than min in range parameter.') 

314 if not (np.isfinite(first_edge) and np.isfinite(last_edge)): 

315 raise ValueError( 

316 "supplied range of [{}, {}] is not finite".format(first_edge, last_edge)) 

317 elif a.size == 0: 

318 # handle empty arrays. Can't determine range, so use 0-1. 

319 first_edge, last_edge = 0, 1 

320 else: 

321 first_edge, last_edge = a.min(), a.max() 

322 if not (np.isfinite(first_edge) and np.isfinite(last_edge)): 

323 raise ValueError( 

324 "autodetected range of [{}, {}] is not finite".format(first_edge, last_edge)) 

325 

326 # expand empty range to avoid divide by zero 

327 if first_edge == last_edge: 

328 first_edge = first_edge - 0.5 

329 last_edge = last_edge + 0.5 

330 

331 return first_edge, last_edge 

332 

333 

334def _unsigned_subtract(a, b): 

335 """ 

336 Subtract two values where a >= b, and produce an unsigned result 

337 

338 This is needed when finding the difference between the upper and lower 

339 bound of an int16 histogram 

340 """ 

341 # coerce to a single type 

342 signed_to_unsigned = { 

343 np.byte: np.ubyte, 

344 np.short: np.ushort, 

345 np.intc: np.uintc, 

346 np.int_: np.uint, 

347 np.longlong: np.ulonglong 

348 } 

349 dt = np.result_type(a, b) 

350 try: 

351 dt = signed_to_unsigned[dt.type] 

352 except KeyError: 

353 return np.subtract(a, b, dtype=dt) 

354 else: 

355 # we know the inputs are integers, and we are deliberately casting 

356 # signed to unsigned 

357 return np.subtract(a, b, casting='unsafe', dtype=dt) 

358 

359 

360def _get_bin_edges(a, bins, range, weights): 

361 """ 

362 Computes the bins used internally by `histogram`. 

363 

364 Parameters 

365 ========== 

366 a : ndarray 

367 Ravelled data array 

368 bins, range 

369 Forwarded arguments from `histogram`. 

370 weights : ndarray, optional 

371 Ravelled weights array, or None 

372 

373 Returns 

374 ======= 

375 bin_edges : ndarray 

376 Array of bin edges 

377 uniform_bins : (Number, Number, int): 

378 The upper bound, lowerbound, and number of bins, used in the optimized 

379 implementation of `histogram` that works on uniform bins. 

380 """ 

381 # parse the overloaded bins argument 

382 n_equal_bins = None 

383 bin_edges = None 

384 

385 if isinstance(bins, str): 

386 bin_name = bins 

387 # if `bins` is a string for an automatic method, 

388 # this will replace it with the number of bins calculated 

389 if bin_name not in _hist_bin_selectors: 

390 raise ValueError( 

391 "{!r} is not a valid estimator for `bins`".format(bin_name)) 

392 if weights is not None: 

393 raise TypeError("Automated estimation of the number of " 

394 "bins is not supported for weighted data") 

395 

396 first_edge, last_edge = _get_outer_edges(a, range) 

397 

398 # truncate the range if needed 

399 if range is not None: 

400 keep = (a >= first_edge) 

401 keep &= (a <= last_edge) 

402 if not np.logical_and.reduce(keep): 

403 a = a[keep] 

404 

405 if a.size == 0: 

406 n_equal_bins = 1 

407 else: 

408 # Do not call selectors on empty arrays 

409 width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge)) 

410 if width: 

411 n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width)) 

412 else: 

413 # Width can be zero for some estimators, e.g. FD when 

414 # the IQR of the data is zero. 

415 n_equal_bins = 1 

416 

417 elif np.ndim(bins) == 0: 

418 try: 

419 n_equal_bins = operator.index(bins) 

420 except TypeError as e: 

421 raise TypeError( 

422 '`bins` must be an integer, a string, or an array') from e 

423 if n_equal_bins < 1: 

424 raise ValueError('`bins` must be positive, when an integer') 

425 

426 first_edge, last_edge = _get_outer_edges(a, range) 

427 

428 elif np.ndim(bins) == 1: 

429 bin_edges = np.asarray(bins) 

430 if np.any(bin_edges[:-1] > bin_edges[1:]): 

431 raise ValueError( 

432 '`bins` must increase monotonically, when an array') 

433 

434 else: 

435 raise ValueError('`bins` must be 1d, when an array') 

436 

437 if n_equal_bins is not None: 

438 # gh-10322 means that type resolution rules are dependent on array 

439 # shapes. To avoid this causing problems, we pick a type now and stick 

440 # with it throughout. 

441 bin_type = np.result_type(first_edge, last_edge, a) 

442 if np.issubdtype(bin_type, np.integer): 

443 bin_type = np.result_type(bin_type, float) 

444 

445 # bin edges must be computed 

446 bin_edges = np.linspace( 

447 first_edge, last_edge, n_equal_bins + 1, 

448 endpoint=True, dtype=bin_type) 

449 return bin_edges, (first_edge, last_edge, n_equal_bins) 

450 else: 

451 return bin_edges, None 

452 

453 

454def _search_sorted_inclusive(a, v): 

455 """ 

456 Like `searchsorted`, but where the last item in `v` is placed on the right. 

457 

458 In the context of a histogram, this makes the last bin edge inclusive 

459 """ 

460 return np.concatenate(( 

461 a.searchsorted(v[:-1], 'left'), 

462 a.searchsorted(v[-1:], 'right') 

463 )) 

464 

465 

466def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None): 

467 return (a, bins, weights) 

468 

469 

470@array_function_dispatch(_histogram_bin_edges_dispatcher) 

471def histogram_bin_edges(a, bins=10, range=None, weights=None): 

472 r""" 

473 Function to calculate only the edges of the bins used by the `histogram` 

474 function. 

475 

476 Parameters 

477 ---------- 

478 a : array_like 

479 Input data. The histogram is computed over the flattened array. 

480 bins : int or sequence of scalars or str, optional 

481 If `bins` is an int, it defines the number of equal-width 

482 bins in the given range (10, by default). If `bins` is a 

483 sequence, it defines the bin edges, including the rightmost 

484 edge, allowing for non-uniform bin widths. 

485 

486 If `bins` is a string from the list below, `histogram_bin_edges` will use 

487 the method chosen to calculate the optimal bin width and 

488 consequently the number of bins (see `Notes` for more detail on 

489 the estimators) from the data that falls within the requested 

490 range. While the bin width will be optimal for the actual data 

491 in the range, the number of bins will be computed to fill the 

492 entire range, including the empty portions. For visualisation, 

493 using the 'auto' option is suggested. Weighted data is not 

494 supported for automated bin size selection. 

495 

496 'auto' 

497 Maximum of the 'sturges' and 'fd' estimators. Provides good 

498 all around performance. 

499 

500 'fd' (Freedman Diaconis Estimator) 

501 Robust (resilient to outliers) estimator that takes into 

502 account data variability and data size. 

503 

504 'doane' 

505 An improved version of Sturges' estimator that works better 

506 with non-normal datasets. 

507 

508 'scott' 

509 Less robust estimator that that takes into account data 

510 variability and data size. 

511 

512 'stone' 

513 Estimator based on leave-one-out cross-validation estimate of 

514 the integrated squared error. Can be regarded as a generalization 

515 of Scott's rule. 

516 

517 'rice' 

518 Estimator does not take variability into account, only data 

519 size. Commonly overestimates number of bins required. 

520 

521 'sturges' 

522 R's default method, only accounts for data size. Only 

523 optimal for gaussian data and underestimates number of bins 

524 for large non-gaussian datasets. 

525 

526 'sqrt' 

527 Square root (of data size) estimator, used by Excel and 

528 other programs for its speed and simplicity. 

529 

530 range : (float, float), optional 

531 The lower and upper range of the bins. If not provided, range 

532 is simply ``(a.min(), a.max())``. Values outside the range are 

533 ignored. The first element of the range must be less than or 

534 equal to the second. `range` affects the automatic bin 

535 computation as well. While bin width is computed to be optimal 

536 based on the actual data within `range`, the bin count will fill 

537 the entire range including portions containing no data. 

538 

539 weights : array_like, optional 

540 An array of weights, of the same shape as `a`. Each value in 

541 `a` only contributes its associated weight towards the bin count 

542 (instead of 1). This is currently not used by any of the bin estimators, 

543 but may be in the future. 

544 

545 Returns 

546 ------- 

547 bin_edges : array of dtype float 

548 The edges to pass into `histogram` 

549 

550 See Also 

551 -------- 

552 histogram 

553 

554 Notes 

555 ----- 

556 The methods to estimate the optimal number of bins are well founded 

557 in literature, and are inspired by the choices R provides for 

558 histogram visualisation. Note that having the number of bins 

559 proportional to :math:`n^{1/3}` is asymptotically optimal, which is 

560 why it appears in most estimators. These are simply plug-in methods 

561 that give good starting points for number of bins. In the equations 

562 below, :math:`h` is the binwidth and :math:`n_h` is the number of 

563 bins. All estimators that compute bin counts are recast to bin width 

564 using the `ptp` of the data. The final bin count is obtained from 

565 ``np.round(np.ceil(range / h))``. 

566 

567 'auto' (maximum of the 'sturges' and 'fd' estimators) 

568 A compromise to get a good value. For small datasets the Sturges 

569 value will usually be chosen, while larger datasets will usually 

570 default to FD. Avoids the overly conservative behaviour of FD 

571 and Sturges for small and large datasets respectively. 

572 Switchover point is usually :math:`a.size \approx 1000`. 

573 

574 'fd' (Freedman Diaconis Estimator) 

575 .. math:: h = 2 \frac{IQR}{n^{1/3}} 

576 

577 The binwidth is proportional to the interquartile range (IQR) 

578 and inversely proportional to cube root of a.size. Can be too 

579 conservative for small datasets, but is quite good for large 

580 datasets. The IQR is very robust to outliers. 

581 

582 'scott' 

583 .. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}} 

584 

585 The binwidth is proportional to the standard deviation of the 

586 data and inversely proportional to cube root of ``x.size``. Can 

587 be too conservative for small datasets, but is quite good for 

588 large datasets. The standard deviation is not very robust to 

589 outliers. Values are very similar to the Freedman-Diaconis 

590 estimator in the absence of outliers. 

591 

592 'rice' 

593 .. math:: n_h = 2n^{1/3} 

594 

595 The number of bins is only proportional to cube root of 

596 ``a.size``. It tends to overestimate the number of bins and it 

597 does not take into account data variability. 

598 

599 'sturges' 

600 .. math:: n_h = \log _{2}n+1 

601 

602 The number of bins is the base 2 log of ``a.size``. This 

603 estimator assumes normality of data and is too conservative for 

604 larger, non-normal datasets. This is the default method in R's 

605 ``hist`` method. 

606 

607 'doane' 

608 .. math:: n_h = 1 + \log_{2}(n) + 

609 \log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}}) 

610 

611 g_1 = mean[(\frac{x - \mu}{\sigma})^3] 

612 

613 \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} 

614 

615 An improved version of Sturges' formula that produces better 

616 estimates for non-normal datasets. This estimator attempts to 

617 account for the skew of the data. 

618 

619 'sqrt' 

620 .. math:: n_h = \sqrt n 

621 

622 The simplest and fastest estimator. Only takes into account the 

623 data size. 

624 

625 Examples 

626 -------- 

627 >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5]) 

628 >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1)) 

629 array([0. , 0.25, 0.5 , 0.75, 1. ]) 

630 >>> np.histogram_bin_edges(arr, bins=2) 

631 array([0. , 2.5, 5. ]) 

632 

633 For consistency with histogram, an array of pre-computed bins is 

634 passed through unmodified: 

635 

636 >>> np.histogram_bin_edges(arr, [1, 2]) 

637 array([1, 2]) 

638 

639 This function allows one set of bins to be computed, and reused across 

640 multiple histograms: 

641 

642 >>> shared_bins = np.histogram_bin_edges(arr, bins='auto') 

643 >>> shared_bins 

644 array([0., 1., 2., 3., 4., 5.]) 

645 

646 >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1]) 

647 >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins) 

648 >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins) 

649 

650 >>> hist_0; hist_1 

651 array([1, 1, 0, 1, 0]) 

652 array([2, 0, 1, 1, 2]) 

653 

654 Which gives more easily comparable results than using separate bins for 

655 each histogram: 

656 

657 >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto') 

658 >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto') 

659 >>> hist_0; hist_1 

660 array([1, 1, 1]) 

661 array([2, 1, 1, 2]) 

662 >>> bins_0; bins_1 

663 array([0., 1., 2., 3.]) 

664 array([0. , 1.25, 2.5 , 3.75, 5. ]) 

665 

666 """ 

667 a, weights = _ravel_and_check_weights(a, weights) 

668 bin_edges, _ = _get_bin_edges(a, bins, range, weights) 

669 return bin_edges 

670 

671 

672def _histogram_dispatcher( 

673 a, bins=None, range=None, normed=None, weights=None, density=None): 

674 return (a, bins, weights) 

675 

676 

677@array_function_dispatch(_histogram_dispatcher) 

678def histogram(a, bins=10, range=None, normed=None, weights=None, 

679 density=None): 

680 r""" 

681 Compute the histogram of a set of data. 

682 

683 Parameters 

684 ---------- 

685 a : array_like 

686 Input data. The histogram is computed over the flattened array. 

687 bins : int or sequence of scalars or str, optional 

688 If `bins` is an int, it defines the number of equal-width 

689 bins in the given range (10, by default). If `bins` is a 

690 sequence, it defines a monotonically increasing array of bin edges, 

691 including the rightmost edge, allowing for non-uniform bin widths. 

692 

693 .. versionadded:: 1.11.0 

694 

695 If `bins` is a string, it defines the method used to calculate the 

696 optimal bin width, as defined by `histogram_bin_edges`. 

697 

698 range : (float, float), optional 

699 The lower and upper range of the bins. If not provided, range 

700 is simply ``(a.min(), a.max())``. Values outside the range are 

701 ignored. The first element of the range must be less than or 

702 equal to the second. `range` affects the automatic bin 

703 computation as well. While bin width is computed to be optimal 

704 based on the actual data within `range`, the bin count will fill 

705 the entire range including portions containing no data. 

706 normed : bool, optional 

707 

708 .. deprecated:: 1.6.0 

709 

710 This is equivalent to the `density` argument, but produces incorrect 

711 results for unequal bin widths. It should not be used. 

712 

713 .. versionchanged:: 1.15.0 

714 DeprecationWarnings are actually emitted. 

715 

716 weights : array_like, optional 

717 An array of weights, of the same shape as `a`. Each value in 

718 `a` only contributes its associated weight towards the bin count 

719 (instead of 1). If `density` is True, the weights are 

720 normalized, so that the integral of the density over the range 

721 remains 1. 

722 density : bool, optional 

723 If ``False``, the result will contain the number of samples in 

724 each bin. If ``True``, the result is the value of the 

725 probability *density* function at the bin, normalized such that 

726 the *integral* over the range is 1. Note that the sum of the 

727 histogram values will not be equal to 1 unless bins of unity 

728 width are chosen; it is not a probability *mass* function. 

729 

730 Overrides the ``normed`` keyword if given. 

731 

732 Returns 

733 ------- 

734 hist : array 

735 The values of the histogram. See `density` and `weights` for a 

736 description of the possible semantics. 

737 bin_edges : array of dtype float 

738 Return the bin edges ``(length(hist)+1)``. 

739 

740 

741 See Also 

742 -------- 

743 histogramdd, bincount, searchsorted, digitize, histogram_bin_edges 

744 

745 Notes 

746 ----- 

747 All but the last (righthand-most) bin is half-open. In other words, 

748 if `bins` is:: 

749 

750 [1, 2, 3, 4] 

751 

752 then the first bin is ``[1, 2)`` (including 1, but excluding 2) and 

753 the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which 

754 *includes* 4. 

755 

756 

757 Examples 

758 -------- 

759 >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) 

760 (array([0, 2, 1]), array([0, 1, 2, 3])) 

761 >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) 

762 (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) 

763 >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) 

764 (array([1, 4, 1]), array([0, 1, 2, 3])) 

765 

766 >>> a = np.arange(5) 

767 >>> hist, bin_edges = np.histogram(a, density=True) 

768 >>> hist 

769 array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) 

770 >>> hist.sum() 

771 2.4999999999999996 

772 >>> np.sum(hist * np.diff(bin_edges)) 

773 1.0 

774 

775 .. versionadded:: 1.11.0 

776 

777 Automated Bin Selection Methods example, using 2 peak random data 

778 with 2000 points: 

779 

780 >>> import matplotlib.pyplot as plt 

781 >>> rng = np.random.RandomState(10) # deterministic random data 

782 >>> a = np.hstack((rng.normal(size=1000), 

783 ... rng.normal(loc=5, scale=2, size=1000))) 

784 >>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram 

785 >>> plt.title("Histogram with 'auto' bins") 

786 Text(0.5, 1.0, "Histogram with 'auto' bins") 

787 >>> plt.show() 

788 

789 """ 

790 a, weights = _ravel_and_check_weights(a, weights) 

791 

792 bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights) 

793 

794 # Histogram is an integer or a float array depending on the weights. 

795 if weights is None: 

796 ntype = np.dtype(np.intp) 

797 else: 

798 ntype = weights.dtype 

799 

800 # We set a block size, as this allows us to iterate over chunks when 

801 # computing histograms, to minimize memory usage. 

802 BLOCK = 65536 

803 

804 # The fast path uses bincount, but that only works for certain types 

805 # of weight 

806 simple_weights = ( 

807 weights is None or 

808 np.can_cast(weights.dtype, np.double) or 

809 np.can_cast(weights.dtype, complex) 

810 ) 

811 

812 if uniform_bins is not None and simple_weights: 

813 # Fast algorithm for equal bins 

814 # We now convert values of a to bin indices, under the assumption of 

815 # equal bin widths (which is valid here). 

816 first_edge, last_edge, n_equal_bins = uniform_bins 

817 

818 # Initialize empty histogram 

819 n = np.zeros(n_equal_bins, ntype) 

820 

821 # Pre-compute histogram scaling factor 

822 norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge) 

823 

824 # We iterate over blocks here for two reasons: the first is that for 

825 # large arrays, it is actually faster (for example for a 10^8 array it 

826 # is 2x as fast) and it results in a memory footprint 3x lower in the 

827 # limit of large arrays. 

828 for i in _range(0, len(a), BLOCK): 

829 tmp_a = a[i:i+BLOCK] 

830 if weights is None: 

831 tmp_w = None 

832 else: 

833 tmp_w = weights[i:i + BLOCK] 

834 

835 # Only include values in the right range 

836 keep = (tmp_a >= first_edge) 

837 keep &= (tmp_a <= last_edge) 

838 if not np.logical_and.reduce(keep): 

839 tmp_a = tmp_a[keep] 

840 if tmp_w is not None: 

841 tmp_w = tmp_w[keep] 

842 

843 # This cast ensures no type promotions occur below, which gh-10322 

844 # make unpredictable. Getting it wrong leads to precision errors 

845 # like gh-8123. 

846 tmp_a = tmp_a.astype(bin_edges.dtype, copy=False) 

847 

848 # Compute the bin indices, and for values that lie exactly on 

849 # last_edge we need to subtract one 

850 f_indices = _unsigned_subtract(tmp_a, first_edge) * norm 

851 indices = f_indices.astype(np.intp) 

852 indices[indices == n_equal_bins] -= 1 

853 

854 # The index computation is not guaranteed to give exactly 

855 # consistent results within ~1 ULP of the bin edges. 

856 decrement = tmp_a < bin_edges[indices] 

857 indices[decrement] -= 1 

858 # The last bin includes the right edge. The other bins do not. 

859 increment = ((tmp_a >= bin_edges[indices + 1]) 

860 & (indices != n_equal_bins - 1)) 

861 indices[increment] += 1 

862 

863 # We now compute the histogram using bincount 

864 if ntype.kind == 'c': 

865 n.real += np.bincount(indices, weights=tmp_w.real, 

866 minlength=n_equal_bins) 

867 n.imag += np.bincount(indices, weights=tmp_w.imag, 

868 minlength=n_equal_bins) 

869 else: 

870 n += np.bincount(indices, weights=tmp_w, 

871 minlength=n_equal_bins).astype(ntype) 

872 else: 

873 # Compute via cumulative histogram 

874 cum_n = np.zeros(bin_edges.shape, ntype) 

875 if weights is None: 

876 for i in _range(0, len(a), BLOCK): 

877 sa = np.sort(a[i:i+BLOCK]) 

878 cum_n += _search_sorted_inclusive(sa, bin_edges) 

879 else: 

880 zero = np.zeros(1, dtype=ntype) 

881 for i in _range(0, len(a), BLOCK): 

882 tmp_a = a[i:i+BLOCK] 

883 tmp_w = weights[i:i+BLOCK] 

884 sorting_index = np.argsort(tmp_a) 

885 sa = tmp_a[sorting_index] 

886 sw = tmp_w[sorting_index] 

887 cw = np.concatenate((zero, sw.cumsum())) 

888 bin_index = _search_sorted_inclusive(sa, bin_edges) 

889 cum_n += cw[bin_index] 

890 

891 n = np.diff(cum_n) 

892 

893 # density overrides the normed keyword 

894 if density is not None: 

895 if normed is not None: 

896 # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6) 

897 warnings.warn( 

898 "The normed argument is ignored when density is provided. " 

899 "In future passing both will result in an error.", 

900 DeprecationWarning, stacklevel=3) 

901 normed = None 

902 

903 if density: 

904 db = np.array(np.diff(bin_edges), float) 

905 return n/db/n.sum(), bin_edges 

906 elif normed: 

907 # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6) 

908 warnings.warn( 

909 "Passing `normed=True` on non-uniform bins has always been " 

910 "broken, and computes neither the probability density " 

911 "function nor the probability mass function. " 

912 "The result is only correct if the bins are uniform, when " 

913 "density=True will produce the same result anyway. " 

914 "The argument will be removed in a future version of " 

915 "numpy.", 

916 np.VisibleDeprecationWarning, stacklevel=3) 

917 

918 # this normalization is incorrect, but 

919 db = np.array(np.diff(bin_edges), float) 

920 return n/(n*db).sum(), bin_edges 

921 else: 

922 if normed is not None: 

923 # 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6) 

924 warnings.warn( 

925 "Passing normed=False is deprecated, and has no effect. " 

926 "Consider passing the density argument instead.", 

927 DeprecationWarning, stacklevel=3) 

928 return n, bin_edges 

929 

930 

931def _histogramdd_dispatcher(sample, bins=None, range=None, normed=None, 

932 weights=None, density=None): 

933 if hasattr(sample, 'shape'): # same condition as used in histogramdd 

934 yield sample 

935 else: 

936 yield from sample 

937 with contextlib.suppress(TypeError): 

938 yield from bins 

939 yield weights 

940 

941 

942@array_function_dispatch(_histogramdd_dispatcher) 

943def histogramdd(sample, bins=10, range=None, normed=None, weights=None, 

944 density=None): 

945 """ 

946 Compute the multidimensional histogram of some data. 

947 

948 Parameters 

949 ---------- 

950 sample : (N, D) array, or (D, N) array_like 

951 The data to be histogrammed. 

952 

953 Note the unusual interpretation of sample when an array_like: 

954 

955 * When an array, each row is a coordinate in a D-dimensional space - 

956 such as ``histogramdd(np.array([p1, p2, p3]))``. 

957 * When an array_like, each element is the list of values for single 

958 coordinate - such as ``histogramdd((X, Y, Z))``. 

959 

960 The first form should be preferred. 

961 

962 bins : sequence or int, optional 

963 The bin specification: 

964 

965 * A sequence of arrays describing the monotonically increasing bin 

966 edges along each dimension. 

967 * The number of bins for each dimension (nx, ny, ... =bins) 

968 * The number of bins for all dimensions (nx=ny=...=bins). 

969 

970 range : sequence, optional 

971 A sequence of length D, each an optional (lower, upper) tuple giving 

972 the outer bin edges to be used if the edges are not given explicitly in 

973 `bins`. 

974 An entry of None in the sequence results in the minimum and maximum 

975 values being used for the corresponding dimension. 

976 The default, None, is equivalent to passing a tuple of D None values. 

977 density : bool, optional 

978 If False, the default, returns the number of samples in each bin. 

979 If True, returns the probability *density* function at the bin, 

980 ``bin_count / sample_count / bin_volume``. 

981 normed : bool, optional 

982 An alias for the density argument that behaves identically. To avoid 

983 confusion with the broken normed argument to `histogram`, `density` 

984 should be preferred. 

985 weights : (N,) array_like, optional 

986 An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. 

987 Weights are normalized to 1 if normed is True. If normed is False, 

988 the values of the returned histogram are equal to the sum of the 

989 weights belonging to the samples falling into each bin. 

990 

991 Returns 

992 ------- 

993 H : ndarray 

994 The multidimensional histogram of sample x. See normed and weights 

995 for the different possible semantics. 

996 edges : list 

997 A list of D arrays describing the bin edges for each dimension. 

998 

999 See Also 

1000 -------- 

1001 histogram: 1-D histogram 

1002 histogram2d: 2-D histogram 

1003 

1004 Examples 

1005 -------- 

1006 >>> r = np.random.randn(100,3) 

1007 >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) 

1008 >>> H.shape, edges[0].size, edges[1].size, edges[2].size 

1009 ((5, 8, 4), 6, 9, 5) 

1010 

1011 """ 

1012 

1013 try: 

1014 # Sample is an ND-array. 

1015 N, D = sample.shape 

1016 except (AttributeError, ValueError): 

1017 # Sample is a sequence of 1D arrays. 

1018 sample = np.atleast_2d(sample).T 

1019 N, D = sample.shape 

1020 

1021 nbin = np.empty(D, int) 

1022 edges = D*[None] 

1023 dedges = D*[None] 

1024 if weights is not None: 

1025 weights = np.asarray(weights) 

1026 

1027 try: 

1028 M = len(bins) 

1029 if M != D: 

1030 raise ValueError( 

1031 'The dimension of bins must be equal to the dimension of the ' 

1032 ' sample x.') 

1033 except TypeError: 

1034 # bins is an integer 

1035 bins = D*[bins] 

1036 

1037 # normalize the range argument 

1038 if range is None: 

1039 range = (None,) * D 

1040 elif len(range) != D: 

1041 raise ValueError('range argument must have one entry per dimension') 

1042 

1043 # Create edge arrays 

1044 for i in _range(D): 

1045 if np.ndim(bins[i]) == 0: 

1046 if bins[i] < 1: 

1047 raise ValueError( 

1048 '`bins[{}]` must be positive, when an integer'.format(i)) 

1049 smin, smax = _get_outer_edges(sample[:,i], range[i]) 

1050 try: 

1051 n = operator.index(bins[i]) 

1052 

1053 except TypeError as e: 

1054 raise TypeError( 

1055 "`bins[{}]` must be an integer, when a scalar".format(i) 

1056 ) from e 

1057 

1058 edges[i] = np.linspace(smin, smax, n + 1) 

1059 elif np.ndim(bins[i]) == 1: 

1060 edges[i] = np.asarray(bins[i]) 

1061 if np.any(edges[i][:-1] > edges[i][1:]): 

1062 raise ValueError( 

1063 '`bins[{}]` must be monotonically increasing, when an array' 

1064 .format(i)) 

1065 else: 

1066 raise ValueError( 

1067 '`bins[{}]` must be a scalar or 1d array'.format(i)) 

1068 

1069 nbin[i] = len(edges[i]) + 1 # includes an outlier on each end 

1070 dedges[i] = np.diff(edges[i]) 

1071 

1072 # Compute the bin number each sample falls into. 

1073 Ncount = tuple( 

1074 # avoid np.digitize to work around gh-11022 

1075 np.searchsorted(edges[i], sample[:, i], side='right') 

1076 for i in _range(D) 

1077 ) 

1078 

1079 # Using digitize, values that fall on an edge are put in the right bin. 

1080 # For the rightmost bin, we want values equal to the right edge to be 

1081 # counted in the last bin, and not as an outlier. 

1082 for i in _range(D): 

1083 # Find which points are on the rightmost edge. 

1084 on_edge = (sample[:, i] == edges[i][-1]) 

1085 # Shift these points one bin to the left. 

1086 Ncount[i][on_edge] -= 1 

1087 

1088 # Compute the sample indices in the flattened histogram matrix. 

1089 # This raises an error if the array is too large. 

1090 xy = np.ravel_multi_index(Ncount, nbin) 

1091 

1092 # Compute the number of repetitions in xy and assign it to the 

1093 # flattened histmat. 

1094 hist = np.bincount(xy, weights, minlength=nbin.prod()) 

1095 

1096 # Shape into a proper matrix 

1097 hist = hist.reshape(nbin) 

1098 

1099 # This preserves the (bad) behavior observed in gh-7845, for now. 

1100 hist = hist.astype(float, casting='safe') 

1101 

1102 # Remove outliers (indices 0 and -1 for each dimension). 

1103 core = D*(slice(1, -1),) 

1104 hist = hist[core] 

1105 

1106 # handle the aliasing normed argument 

1107 if normed is None: 

1108 if density is None: 

1109 density = False 

1110 elif density is None: 

1111 # an explicit normed argument was passed, alias it to the new name 

1112 density = normed 

1113 else: 

1114 raise TypeError("Cannot specify both 'normed' and 'density'") 

1115 

1116 if density: 

1117 # calculate the probability density function 

1118 s = hist.sum() 

1119 for i in _range(D): 

1120 shape = np.ones(D, int) 

1121 shape[i] = nbin[i] - 2 

1122 hist = hist / dedges[i].reshape(shape) 

1123 hist /= s 

1124 

1125 if (hist.shape != nbin - 2).any(): 

1126 raise RuntimeError( 

1127 "Internal Shape Error") 

1128 return hist, edges