Generated by Cython 0.29.23
Yellow lines hint at Python interaction.
Click on a line that starts with a "+
" to see the C code that Cython generated for it.
Raw output: simulation.c
+001: cimport numpy as np
__pyx_t_5 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_5) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+002: import numpy as np
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
003: import cython
004: from libc.string cimport memcmp
005: from libc.math cimport log
006: from libc.stdlib cimport abort, malloc, free
007:
008: # Numpy must be initialized. When using numpy from C or Cython you must
009: # *ALWAYS* do that, or you will have segfaults
+010: np.import_array()
__pyx_t_2 = __pyx_f_5numpy_import_array(); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 10, __pyx_L1_error)
011:
012: # Helper functions
013:
+014: cdef int extend_mode_to_code(str mode) except -1:
static int __pyx_f_6pyrost_3bin_10simulation_extend_mode_to_code(PyObject *__pyx_v_mode) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("extend_mode_to_code", 0); /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("pyrost.bin.simulation.extend_mode_to_code", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; }
+015: if mode == 'constant':
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_mode, __pyx_n_u_constant, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 15, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* … */ }
+016: return EXTEND_CONSTANT
__pyx_r = __pyx_e_6pyrost_3bin_10simulation_EXTEND_CONSTANT; goto __pyx_L0;
+017: elif mode == 'nearest':
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_mode, __pyx_n_u_nearest, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 17, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* … */ }
+018: return EXTEND_NEAREST
__pyx_r = __pyx_e_6pyrost_3bin_10simulation_EXTEND_NEAREST; goto __pyx_L0;
+019: elif mode == 'mirror':
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_mode, __pyx_n_u_mirror, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 19, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* … */ }
+020: return EXTEND_MIRROR
__pyx_r = __pyx_e_6pyrost_3bin_10simulation_EXTEND_MIRROR; goto __pyx_L0;
+021: elif mode == 'reflect':
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_mode, __pyx_n_u_reflect, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 21, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* … */ }
+022: return EXTEND_REFLECT
__pyx_r = __pyx_e_6pyrost_3bin_10simulation_EXTEND_REFLECT; goto __pyx_L0;
+023: elif mode == 'wrap':
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_mode, __pyx_n_u_wrap, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 23, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (likely(__pyx_t_2)) { /* … */ }
+024: return EXTEND_WRAP
__pyx_r = __pyx_e_6pyrost_3bin_10simulation_EXTEND_WRAP; goto __pyx_L0;
025: else:
+026: raise RuntimeError('boundary mode not supported')
/*else*/ { __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 26, __pyx_L1_error) } /* … */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_boundary_mode_not_supported); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 26, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_);
027:
+028: cdef np.ndarray check_array(np.ndarray array, int type_num):
static PyArrayObject *__pyx_f_6pyrost_3bin_10simulation_check_array(PyArrayObject *__pyx_v_array, int __pyx_v_type_num) { int __pyx_v_tn; PyArrayObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("check_array", 0); __Pyx_INCREF((PyObject *)__pyx_v_array); /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("pyrost.bin.simulation.check_array", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_array); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; }
+029: if not np.PyArray_IS_C_CONTIGUOUS(array):
__pyx_t_1 = ((!(PyArray_IS_C_CONTIGUOUS(__pyx_v_array) != 0)) != 0);
if (__pyx_t_1) {
/* … */
}
+030: array = np.PyArray_GETCONTIGUOUS(array)
__pyx_t_2 = ((PyObject *)PyArray_GETCONTIGUOUS(__pyx_v_array)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF_SET(__pyx_v_array, ((PyArrayObject *)__pyx_t_2)); __pyx_t_2 = 0;
+031: cdef int tn = np.PyArray_TYPE(array)
__pyx_v_tn = PyArray_TYPE(__pyx_v_array);
+032: if tn != type_num:
__pyx_t_1 = ((__pyx_v_tn != __pyx_v_type_num) != 0); if (__pyx_t_1) { /* … */ }
+033: array = np.PyArray_Cast(array, type_num)
__pyx_t_2 = PyArray_Cast(__pyx_v_array, __pyx_v_type_num); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_DECREF_SET(__pyx_v_array, ((PyArrayObject *)__pyx_t_2)); __pyx_t_2 = 0;
+034: return array
__Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_array)); __pyx_r = __pyx_v_array; goto __pyx_L0;
035:
+036: cdef np.ndarray number_to_array(object num, np.npy_intp rank, int type_num):
static PyArrayObject *__pyx_f_6pyrost_3bin_10simulation_number_to_array(PyObject *__pyx_v_num, npy_intp __pyx_v_rank, int __pyx_v_type_num) { npy_intp *__pyx_v_dims; PyArrayObject *__pyx_v_arr = 0; int __pyx_v_i; PyArrayObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("number_to_array", 0); /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("pyrost.bin.simulation.number_to_array", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_arr); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; }
+037: cdef np.npy_intp *dims = [rank,]
__pyx_t_1[0] = __pyx_v_rank; __pyx_v_dims = __pyx_t_1;
+038: cdef np.ndarray arr = <np.ndarray>np.PyArray_SimpleNew(1, dims, type_num)
__pyx_t_2 = PyArray_SimpleNew(1, __pyx_v_dims, __pyx_v_type_num); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 38, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __pyx_t_2; __Pyx_INCREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_arr = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0;
039: cdef int i
+040: for i in range(rank):
__pyx_t_4 = __pyx_v_rank; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6;
+041: arr[i] = num
if (unlikely(__Pyx_SetItemInt(((PyObject *)__pyx_v_arr), __pyx_v_i, __pyx_v_num, int, 1, __Pyx_PyInt_From_int, 0, 0, 0) < 0)) __PYX_ERR(0, 41, __pyx_L1_error) }
+042: return arr
__Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_arr)); __pyx_r = __pyx_v_arr; goto __pyx_L0;
043:
+044: cdef np.ndarray normalize_sequence(object inp, np.npy_intp rank, int type_num):
static PyArrayObject *__pyx_f_6pyrost_3bin_10simulation_normalize_sequence(PyObject *__pyx_v_inp, npy_intp __pyx_v_rank, int __pyx_v_type_num) { PyArrayObject *__pyx_v_arr = 0; int __pyx_v_tn; npy_intp __pyx_v_size; PyArrayObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("normalize_sequence", 0); /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("pyrost.bin.simulation.normalize_sequence", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_arr); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; }
045: # If input is a scalar, create a sequence of length equal to the
046: # rank by duplicating the input. If input is a sequence,
047: # check if its length is equal to the length of array.
048: cdef np.ndarray arr
049: cdef int tn
+050: if np.PyArray_IsAnyScalar(inp):
__pyx_t_1 = (PyArray_IsAnyScalar(__pyx_v_inp) != 0);
if (__pyx_t_1) {
/* … */
goto __pyx_L3;
}
+051: arr = number_to_array(inp, rank, type_num)
__pyx_t_2 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_number_to_array(__pyx_v_inp, __pyx_v_rank, __pyx_v_type_num)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 51, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_arr = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0;
+052: elif np.PyArray_Check(inp):
__pyx_t_1 = (PyArray_Check(__pyx_v_inp) != 0);
if (__pyx_t_1) {
/* … */
goto __pyx_L3;
}
+053: arr = <np.ndarray>inp
__pyx_t_2 = __pyx_v_inp;
__Pyx_INCREF(__pyx_t_2);
__pyx_v_arr = ((PyArrayObject *)__pyx_t_2);
__pyx_t_2 = 0;
+054: tn = np.PyArray_TYPE(arr)
__pyx_v_tn = PyArray_TYPE(__pyx_v_arr);
+055: if tn != type_num:
__pyx_t_1 = ((__pyx_v_tn != __pyx_v_type_num) != 0); if (__pyx_t_1) { /* … */ }
+056: arr = <np.ndarray>np.PyArray_Cast(arr, type_num)
__pyx_t_2 = PyArray_Cast(__pyx_v_arr, __pyx_v_type_num); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 56, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __pyx_t_2; __Pyx_INCREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_arr, ((PyArrayObject *)__pyx_t_3)); __pyx_t_3 = 0;
+057: elif isinstance(inp, (list, tuple)):
__pyx_t_4 = PyList_Check(__pyx_v_inp); __pyx_t_5 = (__pyx_t_4 != 0); if (!__pyx_t_5) { } else { __pyx_t_1 = __pyx_t_5; goto __pyx_L5_bool_binop_done; } __pyx_t_5 = PyTuple_Check(__pyx_v_inp); __pyx_t_4 = (__pyx_t_5 != 0); __pyx_t_1 = __pyx_t_4; __pyx_L5_bool_binop_done:; __pyx_t_4 = (__pyx_t_1 != 0); if (likely(__pyx_t_4)) { /* … */ goto __pyx_L3; }
+058: arr = <np.ndarray>np.PyArray_FROM_OTF(inp, type_num, np.NPY_ARRAY_C_CONTIGUOUS)
__pyx_t_3 = PyArray_FROM_OTF(__pyx_v_inp, __pyx_v_type_num, NPY_ARRAY_C_CONTIGUOUS); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 58, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __pyx_t_3; __Pyx_INCREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_arr = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0;
059: else:
+060: raise ValueError("Wrong sequence argument type")
/*else*/ { __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 60, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 60, __pyx_L1_error) } __pyx_L3:; /* … */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_Wrong_sequence_argument_type); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 60, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2);
+061: cdef np.npy_intp size = np.PyArray_SIZE(arr)
__pyx_v_size = PyArray_SIZE(__pyx_v_arr);
+062: if size != rank:
__pyx_t_4 = ((__pyx_v_size != __pyx_v_rank) != 0); if (unlikely(__pyx_t_4)) { /* … */ }
+063: raise ValueError("Sequence argument must have length equal to input rank")
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 63, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 63, __pyx_L1_error) /* … */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Sequence_argument_must_have_leng); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 63, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3);
+064: return arr
__Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_arr)); __pyx_r = __pyx_v_arr; goto __pyx_L0;
065:
+066: def next_fast_len(target: cython.uint, backend: str='numpy') -> cython.uint:
/* Python wrapper */ static PyObject *__pyx_pw_6pyrost_3bin_10simulation_1next_fast_len(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_6pyrost_3bin_10simulation_next_fast_len[] = "next_fast_len(unsigned int target: cython.uint, unicode backend: str = u'numpy') -> cython.uint\nFind the next fast size of input data to fft, for zero-padding, etc.\n FFT algorithms gain their speed by a recursive divide and conquer strategy.\n This relies on efficient functions for small prime factors of the input length.\n Thus, the transforms are fastest when using composites of the prime factors handled\n by the fft implementation. If there are efficient functions for all radices <= n,\n then the result will be a number x >= target with only prime factors < n. (Also\n known as n-smooth numbers)\n\n Parameters\n ----------\n target : int\n Length to start searching from. Must be a positive integer.\n backend : {'fftw', 'numpy'}, optional\n Find n-smooth number for the FFT implementation from the specified\n library.\n\n Returns\n -------\n n : int\n The smallest fast length greater than or equal to `target`.\n "; static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_1next_fast_len = {"next_fast_len", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_1next_fast_len, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_next_fast_len}; static PyObject *__pyx_pw_6pyrost_3bin_10simulation_1next_fast_len(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { unsigned int __pyx_v_target; PyObject *__pyx_v_backend = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("next_fast_len (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_target,&__pyx_n_s_backend,0}; PyObject* values[2] = {0,0}; values[1] = ((PyObject*)((PyObject*)__pyx_n_u_numpy)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_target)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_backend); if (value) { values[1] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "next_fast_len") < 0)) __PYX_ERR(0, 66, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_target = __Pyx_PyInt_As_unsigned_int(values[0]); if (unlikely((__pyx_v_target == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 66, __pyx_L3_error) __pyx_v_backend = ((PyObject*)values[1]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("next_fast_len", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 66, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("pyrost.bin.simulation.next_fast_len", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_backend), (&PyUnicode_Type), 1, "backend", 1))) __PYX_ERR(0, 66, __pyx_L1_error) __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_next_fast_len(__pyx_self, __pyx_v_target, __pyx_v_backend); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_6pyrost_3bin_10simulation_next_fast_len(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_target, PyObject *__pyx_v_backend) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("next_fast_len", 0); /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("pyrost.bin.simulation.next_fast_len", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* … */ __pyx_tuple__10 = PyTuple_Pack(2, __pyx_n_s_target, __pyx_n_s_backend); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(0, 66, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); __pyx_codeobj__11 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__10, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_next_fast_len, 66, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__11)) __PYX_ERR(0, 66, __pyx_L1_error) /* … */ __pyx_t_1 = __Pyx_PyDict_NewPresized(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 66, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_target, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 66, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_backend, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 66, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_return, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 66, __pyx_L1_error) __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_1next_fast_len, 0, __pyx_n_s_next_fast_len, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__11)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 66, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__12); __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_3, __pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_next_fast_len, __pyx_t_3) < 0) __PYX_ERR(0, 66, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_tuple__12 = PyTuple_Pack(1, ((PyObject*)__pyx_n_u_numpy)); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(0, 66, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12);
067: r"""Find the next fast size of input data to fft, for zero-padding, etc.
068: FFT algorithms gain their speed by a recursive divide and conquer strategy.
069: This relies on efficient functions for small prime factors of the input length.
070: Thus, the transforms are fastest when using composites of the prime factors handled
071: by the fft implementation. If there are efficient functions for all radices <= n,
072: then the result will be a number x >= target with only prime factors < n. (Also
073: known as n-smooth numbers)
074:
075: Parameters
076: ----------
077: target : int
078: Length to start searching from. Must be a positive integer.
079: backend : {'fftw', 'numpy'}, optional
080: Find n-smooth number for the FFT implementation from the specified
081: library.
082:
083: Returns
084: -------
085: n : int
086: The smallest fast length greater than or equal to `target`.
087: """
+088: if target < 0:
__pyx_t_1 = ((__pyx_v_target < 0) != 0); if (unlikely(__pyx_t_1)) { /* … */ }
+089: raise ValueError('Target length must be positive')
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 89, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(0, 89, __pyx_L1_error) /* … */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Target_length_must_be_positive); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 89, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4);
+090: if backend == 'fftw':
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_fftw, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 90, __pyx_L1_error) __pyx_t_3 = (__pyx_t_1 != 0); if (__pyx_t_3) { /* … */ }
+091: return next_fast_len_fftw(target)
__Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_unsigned_long(next_fast_len_fftw(__pyx_v_target)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 91, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0;
+092: elif backend == 'numpy':
__pyx_t_3 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_numpy, Py_EQ)); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 92, __pyx_L1_error) __pyx_t_1 = (__pyx_t_3 != 0); if (likely(__pyx_t_1)) { /* … */ }
+093: return good_size(target)
__Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_unsigned_long(good_size(__pyx_v_target)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 93, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0;
094: else:
+095: raise ValueError('{:s} is invalid backend'.format(backend))
/*else*/ { __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_s_is_invalid_backend, __pyx_n_s_format); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 95, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_v_backend) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_backend); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 95, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 95, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 95, __pyx_L1_error) }
096:
+097: def fft_convolve(array: np.ndarray, kernel: np.ndarray, axis: cython.int=-1,
/* Python wrapper */ static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_3fft_convolve(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_6pyrost_3bin_10simulation_2fft_convolve[] = "fft_convolve(ndarray array: np.ndarray, ndarray kernel: np.ndarray, int axis: cython.int = -1, unicode mode: str = u'constant', double cval: cython.double = 0.0, unicode backend: str = u'numpy', unsigned int num_threads: cython.uint = 1) -> np.ndarray\nConvolve a multi-dimensional `array` with one-dimensional `kernel` along the\n `axis` by means of FFT. Output has the same size as `array`.\n\n Parameters\n ----------\n array : numpy.ndarray\n Input array.\n kernel : numpy.ndarray\n Kernel array.\n axis : int, optional\n Array axis along which convolution is performed.\n mode : {'constant', 'nearest', 'mirror', 'reflect', 'wrap'}, optional\n The mode parameter determines how the input array is extended when the filter\n overlaps a border. Default value is 'constant'. The valid values and their behavior\n is as follows:\n\n * 'constant', (k k k k | a b c d | k k k k) : The input is extended by filling all\n values beyond the edge with the same constant value, defined by the `cval`\n parameter.\n * 'nearest', (a a a a | a b c d | d d d d) : The input is extended by replicating\n the last pixel.\n * 'mirror', (c d c b | a b c d | c b a b) : The input is extended by reflecting\n about the center of the last pixel. This mode is also sometimes referred to as\n whole-sample symmetric.\n * 'reflect', (d c b a | a b c d | d c b a) : The input is extended by reflecting\n about the edge of the last pixel. This mode is also sometimes referred to as\n half-sample symmetric.\n * 'wrap', (a b c d | a b c d | a b c d) : The input is extended by wrapping around\n to the opposite edge.\n cval : float, optional\n Value to fill past edges of input if mode is \342\200\230constant\342\200\231. Default is 0.0.\n backend : {'fftw', 'numpy'}, optional\n Choose backend library for the FFT implementation.""\n num_threads : int, optional\n Number of threads.\n\n Returns\n -------\n out : numpy.ndarray\n A multi-dimensional array containing the discrete linear\n convolution of `array` with `kernel`.\n "; static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_3fft_convolve = {"fft_convolve", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_3fft_convolve, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_2fft_convolve}; static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_3fft_convolve(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_array = 0; PyArrayObject *__pyx_v_kernel = 0; int __pyx_v_axis; PyObject *__pyx_v_mode = 0; double __pyx_v_cval; PyObject *__pyx_v_backend = 0; unsigned int __pyx_v_num_threads; PyArrayObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("fft_convolve (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_array,&__pyx_n_s_kernel,&__pyx_n_s_axis,&__pyx_n_s_mode,&__pyx_n_s_cval,&__pyx_n_s_backend,&__pyx_n_s_num_threads,0}; PyObject* values[7] = {0,0,0,0,0,0,0}; values[3] = ((PyObject*)((PyObject*)__pyx_n_u_constant)); values[5] = ((PyObject*)((PyObject*)__pyx_n_u_numpy)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_array)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_kernel)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fft_convolve", 0, 2, 7, 1); __PYX_ERR(0, 97, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_axis); if (value) { values[2] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_cval); if (value) { values[4] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 5: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_backend); if (value) { values[5] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 6: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads); if (value) { values[6] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "fft_convolve") < 0)) __PYX_ERR(0, 97, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_array = ((PyArrayObject *)values[0]); __pyx_v_kernel = ((PyArrayObject *)values[1]); if (values[2]) { __pyx_v_axis = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 97, __pyx_L3_error) } else { __pyx_v_axis = ((int)((int)-1)); } __pyx_v_mode = ((PyObject*)values[3]); if (values[4]) { __pyx_v_cval = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_cval == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 98, __pyx_L3_error) } else { __pyx_v_cval = ((double)((double)0.0)); } __pyx_v_backend = ((PyObject*)values[5]); if (values[6]) { __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[6]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 99, __pyx_L3_error) } else { __pyx_v_num_threads = ((unsigned int)((unsigned int)1)); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("fft_convolve", 0, 2, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 97, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("pyrost.bin.simulation.fft_convolve", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_array), __pyx_ptype_5numpy_ndarray, 1, "array", 0))) __PYX_ERR(0, 97, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_kernel), __pyx_ptype_5numpy_ndarray, 1, "kernel", 0))) __PYX_ERR(0, 97, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mode), (&PyUnicode_Type), 1, "mode", 1))) __PYX_ERR(0, 98, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_backend), (&PyUnicode_Type), 1, "backend", 1))) __PYX_ERR(0, 98, __pyx_L1_error) __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_2fft_convolve(__pyx_self, __pyx_v_array, __pyx_v_kernel, __pyx_v_axis, __pyx_v_mode, __pyx_v_cval, __pyx_v_backend, __pyx_v_num_threads); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_2fft_convolve(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_array, PyArrayObject *__pyx_v_kernel, int __pyx_v_axis, PyObject *__pyx_v_mode, double __pyx_v_cval, PyObject *__pyx_v_backend, unsigned int __pyx_v_num_threads) { int __pyx_v_fail; int __pyx_v_ndim; npy_intp __pyx_v_ksize; int __pyx_v__mode; npy_intp *__pyx_v_dims; unsigned long *__pyx_v__dims; PyArrayObject *__pyx_v_out = 0; double *__pyx_v__out; double *__pyx_v__inp; double *__pyx_v__krn; PyArrayObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("fft_convolve", 0); __Pyx_INCREF((PyObject *)__pyx_v_array); __Pyx_INCREF((PyObject *)__pyx_v_kernel); /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("pyrost.bin.simulation.fft_convolve", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_out); __Pyx_XDECREF((PyObject *)__pyx_v_array); __Pyx_XDECREF((PyObject *)__pyx_v_kernel); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* … */ __pyx_tuple__13 = PyTuple_Pack(17, __pyx_n_s_array, __pyx_n_s_kernel, __pyx_n_s_axis, __pyx_n_s_mode, __pyx_n_s_cval, __pyx_n_s_backend, __pyx_n_s_num_threads, __pyx_n_s_fail, __pyx_n_s_ndim, __pyx_n_s_ksize, __pyx_n_s_mode_2, __pyx_n_s_dims, __pyx_n_s_dims_2, __pyx_n_s_out, __pyx_n_s_out_2, __pyx_n_s_inp_2, __pyx_n_s_krn); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* … */ __pyx_t_3 = __Pyx_PyInt_From_int(((int)-1)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* … */ __pyx_t_5 = PyTuple_New(5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __Pyx_INCREF(((PyObject*)__pyx_n_u_constant)); __Pyx_GIVEREF(((PyObject*)__pyx_n_u_constant)); PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject*)__pyx_n_u_constant)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_1); __Pyx_INCREF(((PyObject*)__pyx_n_u_numpy)); __Pyx_GIVEREF(((PyObject*)__pyx_n_u_numpy)); PyTuple_SET_ITEM(__pyx_t_5, 3, ((PyObject*)__pyx_n_u_numpy)); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 4, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_1 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_array, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 97, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_kernel, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 97, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_axis, __pyx_n_u_int) < 0) __PYX_ERR(0, 97, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_mode, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 97, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_cval, __pyx_n_u_double) < 0) __PYX_ERR(0, 97, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_backend, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 97, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 97, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 97, __pyx_L1_error) __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_3fft_convolve, 0, __pyx_n_s_fft_convolve, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__14)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_1, __pyx_t_5); __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_1, __pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_fft_convolve, __pyx_t_1) < 0) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(7, 0, 17, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_fft_convolve, 97, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) __PYX_ERR(0, 97, __pyx_L1_error)
+098: mode: str='constant', cval: cython.double=0.0, backend: str='numpy',
__pyx_t_1 = PyFloat_FromDouble(((double)0.0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 98, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1);
+099: num_threads: cython.uint=1) -> np.ndarray:
__pyx_t_4 = __Pyx_PyInt_From_unsigned_int(((unsigned int)1)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 99, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4);
100: """Convolve a multi-dimensional `array` with one-dimensional `kernel` along the
101: `axis` by means of FFT. Output has the same size as `array`.
102:
103: Parameters
104: ----------
105: array : numpy.ndarray
106: Input array.
107: kernel : numpy.ndarray
108: Kernel array.
109: axis : int, optional
110: Array axis along which convolution is performed.
111: mode : {'constant', 'nearest', 'mirror', 'reflect', 'wrap'}, optional
112: The mode parameter determines how the input array is extended when the filter
113: overlaps a border. Default value is 'constant'. The valid values and their behavior
114: is as follows:
115:
116: * 'constant', (k k k k | a b c d | k k k k) : The input is extended by filling all
117: values beyond the edge with the same constant value, defined by the `cval`
118: parameter.
119: * 'nearest', (a a a a | a b c d | d d d d) : The input is extended by replicating
120: the last pixel.
121: * 'mirror', (c d c b | a b c d | c b a b) : The input is extended by reflecting
122: about the center of the last pixel. This mode is also sometimes referred to as
123: whole-sample symmetric.
124: * 'reflect', (d c b a | a b c d | d c b a) : The input is extended by reflecting
125: about the edge of the last pixel. This mode is also sometimes referred to as
126: half-sample symmetric.
127: * 'wrap', (a b c d | a b c d | a b c d) : The input is extended by wrapping around
128: to the opposite edge.
129: cval : float, optional
130: Value to fill past edges of input if mode is ‘constant’. Default is 0.0.
131: backend : {'fftw', 'numpy'}, optional
132: Choose backend library for the FFT implementation.
133: num_threads : int, optional
134: Number of threads.
135:
136: Returns
137: -------
138: out : numpy.ndarray
139: A multi-dimensional array containing the discrete linear
140: convolution of `array` with `kernel`.
141: """
+142: array = check_array(array, np.NPY_FLOAT64)
__pyx_t_1 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_check_array(__pyx_v_array, NPY_FLOAT64)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 142, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_array, ((PyArrayObject *)__pyx_t_1)); __pyx_t_1 = 0;
+143: kernel = check_array(kernel, np.NPY_FLOAT64)
__pyx_t_1 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_check_array(__pyx_v_kernel, NPY_FLOAT64)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 143, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_kernel, ((PyArrayObject *)__pyx_t_1)); __pyx_t_1 = 0;
144:
+145: cdef int fail = 0
__pyx_v_fail = 0;
+146: cdef int ndim = array.ndim
__pyx_t_2 = __pyx_v_array->nd; __pyx_v_ndim = __pyx_t_2;
+147: axis = axis if axis >= 0 else ndim + axis
if (((__pyx_v_axis >= 0) != 0)) { __pyx_t_2 = __pyx_v_axis; } else { __pyx_t_2 = (__pyx_v_ndim + __pyx_v_axis); } __pyx_v_axis = __pyx_t_2;
+148: axis = axis if axis <= ndim - 1 else ndim - 1
if (((__pyx_v_axis <= (__pyx_v_ndim - 1)) != 0)) { __pyx_t_3 = __pyx_v_axis; } else { __pyx_t_3 = (__pyx_v_ndim - 1); } __pyx_v_axis = __pyx_t_3;
+149: cdef np.npy_intp ksize = np.PyArray_DIM(kernel, 0)
__pyx_v_ksize = PyArray_DIM(__pyx_v_kernel, 0);
+150: cdef int _mode = extend_mode_to_code(mode)
__pyx_t_2 = __pyx_f_6pyrost_3bin_10simulation_extend_mode_to_code(__pyx_v_mode); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 150, __pyx_L1_error)
__pyx_v__mode = __pyx_t_2;
+151: cdef np.npy_intp *dims = array.shape
__pyx_t_4 = __pyx_v_array->dimensions; __pyx_v_dims = __pyx_t_4;
+152: cdef unsigned long *_dims = <unsigned long *>dims
__pyx_v__dims = ((unsigned long *)__pyx_v_dims);
153:
+154: cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(ndim, dims, np.NPY_FLOAT64)
__pyx_t_1 = PyArray_SimpleNew(__pyx_v_ndim, __pyx_v_dims, NPY_FLOAT64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 154, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __pyx_t_1; __Pyx_INCREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_out = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0;
+155: cdef double *_out = <double *>np.PyArray_DATA(out)
__pyx_v__out = ((double *)PyArray_DATA(__pyx_v_out));
+156: cdef double *_inp = <double *>np.PyArray_DATA(array)
__pyx_v__inp = ((double *)PyArray_DATA(__pyx_v_array));
+157: cdef double *_krn = <double *>np.PyArray_DATA(kernel)
__pyx_v__krn = ((double *)PyArray_DATA(__pyx_v_kernel));
+158: with nogil:
{ #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* … */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L4_error: { #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } }
+159: if backend == 'fftw':
__pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_fftw, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 159, __pyx_L4_error) __pyx_t_7 = (__pyx_t_6 != 0); if (__pyx_t_7) { /* … */ goto __pyx_L6; }
+160: fail = fft_convolve_fftw(_out, _inp, ndim, _dims, _krn, ksize, axis, _mode, cval, num_threads)
__pyx_v_fail = fft_convolve_fftw(__pyx_v__out, __pyx_v__inp, __pyx_v_ndim, __pyx_v__dims, __pyx_v__krn, __pyx_v_ksize, __pyx_v_axis, __pyx_v__mode, __pyx_v_cval, __pyx_v_num_threads);
+161: elif backend == 'numpy':
__pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_numpy, Py_EQ)); if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 161, __pyx_L4_error) __pyx_t_6 = (__pyx_t_7 != 0); if (__pyx_t_6) { /* … */ goto __pyx_L6; }
+162: fail = fft_convolve_np(_out, _inp, ndim, _dims, _krn, ksize, axis, _mode, cval, num_threads)
__pyx_v_fail = fft_convolve_np(__pyx_v__out, __pyx_v__inp, __pyx_v_ndim, __pyx_v__dims, __pyx_v__krn, __pyx_v_ksize, __pyx_v_axis, __pyx_v__mode, __pyx_v_cval, __pyx_v_num_threads);
163: else:
+164: raise ValueError('{:s} is invalid backend'.format(backend))
/*else*/ { { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif /*try:*/ { __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_s_is_invalid_backend, __pyx_n_s_format); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 164, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_5 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_8, __pyx_v_backend) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_backend); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 164, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 164, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 164, __pyx_L8_error) } /*finally:*/ { __pyx_L8_error: { #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif goto __pyx_L4_error; } } } } __pyx_L6:; }
+165: if fail:
__pyx_t_6 = (__pyx_v_fail != 0); if (unlikely(__pyx_t_6)) { /* … */ }
+166: raise RuntimeError('C backend exited with error.')
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 166, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 166, __pyx_L1_error) /* … */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_C_backend_exited_with_error); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 166, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5);
+167: return out
__Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_out)); __pyx_r = __pyx_v_out; goto __pyx_L0;
168:
+169: def rsc_wp(wft: np.ndarray, dx0: cython.double, dx: cython.double, z: cython.double,
/* Python wrapper */ static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_5rsc_wp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_6pyrost_3bin_10simulation_4rsc_wp[] = "rsc_wp(ndarray wft: np.ndarray, double dx0: cython.double, double dx: cython.double, double z: cython.double, double wl: cython.double, int axis: cython.int = -1, unicode backend: str = u'numpy', unsigned int num_threads: cython.uint = 1) -> np.ndarray\nWavefront propagator based on Rayleigh-Sommerfeld convolution\n method [RSC]_. Propagates a wavefront `wft` by `z` distance\n downstream. You can choose between 'fftw' and 'numpy' backends for FFT\n calculations. 'fftw' backend supports multiprocessing.\n\n Parameters\n ----------\n wft : numpy.ndarray\n Initial wavefront.\n dx0 : float\n Sampling interval at the plane upstream [um].\n dx : float\n Sampling interval at the plane downstream [um].\n z : float\n Propagation distance [um].\n wl : float\n Incoming beam's wavelength [um].\n axis : int, optional\n Axis of `wft` array along which the calculation is\n performed.\n backend : {'fftw', 'numpy'}, optional\n Choose backend library for the FFT implementation.\n num_threads: int, optional\n Number of threads used in calculation. Only 'fftw' backend\n supports it.\n\n Returns\n -------\n out : numpy.ndarray\n Propagated wavefront.\n\n Raises\n ------\n RuntimeError\n If 'numpy' backend exits with eror during the calculation.\n ValueError\n If `backend` option is invalid.\n\n Notes\n -----\n The Rayleigh\342\200\223Sommerfeld diffraction integral transform is defined as:\n\n .. math::\n u^{\\prime}(x^{\\prime}) = \\frac{z}{j \\sqrt{\\lambda}} \\int_{-\\infty}^{+\\infty}\n u(x) \\mathrm{exp} \\left[-j k r(x, x^{\\prime}) \\right] dx\n \n with\n\n .. math::\n r(x, x^{\\prime}) = \\left[ (x - x^{\\prime})^2 + z^2 \\right]^{1 / 2}\n\n References\n ----------\n .. [RSC] V. Nascov and P. C. Logof\304\203tu, \"Fast computation algorithm\n for the Rayleigh-Sommerfel""d diffraction formula using\n a type of scaled convolution,\" Appl. Opt. 48, 4310-4319\n (2009).\n "; static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_5rsc_wp = {"rsc_wp", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_5rsc_wp, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_4rsc_wp}; static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_5rsc_wp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_wft = 0; double __pyx_v_dx0; double __pyx_v_dx; double __pyx_v_z; double __pyx_v_wl; int __pyx_v_axis; PyObject *__pyx_v_backend = 0; unsigned int __pyx_v_num_threads; PyArrayObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("rsc_wp (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_wft,&__pyx_n_s_dx0,&__pyx_n_s_dx,&__pyx_n_s_z,&__pyx_n_s_wl,&__pyx_n_s_axis,&__pyx_n_s_backend,&__pyx_n_s_num_threads,0}; PyObject* values[8] = {0,0,0,0,0,0,0,0}; values[6] = ((PyObject*)((PyObject*)__pyx_n_u_numpy)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); CYTHON_FALLTHROUGH; case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wft)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dx0)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("rsc_wp", 0, 5, 8, 1); __PYX_ERR(0, 169, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("rsc_wp", 0, 5, 8, 2); __PYX_ERR(0, 169, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("rsc_wp", 0, 5, 8, 3); __PYX_ERR(0, 169, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wl)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("rsc_wp", 0, 5, 8, 4); __PYX_ERR(0, 169, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_axis); if (value) { values[5] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 6: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_backend); if (value) { values[6] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 7: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads); if (value) { values[7] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "rsc_wp") < 0)) __PYX_ERR(0, 169, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); CYTHON_FALLTHROUGH; case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_wft = ((PyArrayObject *)values[0]); __pyx_v_dx0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_dx0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 169, __pyx_L3_error) __pyx_v_dx = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_dx == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 169, __pyx_L3_error) __pyx_v_z = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 169, __pyx_L3_error) __pyx_v_wl = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_wl == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 170, __pyx_L3_error) if (values[5]) { __pyx_v_axis = __Pyx_PyInt_As_int(values[5]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 170, __pyx_L3_error) } else { __pyx_v_axis = ((int)((int)-1)); } __pyx_v_backend = ((PyObject*)values[6]); if (values[7]) { __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[7]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 171, __pyx_L3_error) } else { __pyx_v_num_threads = ((unsigned int)((unsigned int)1)); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("rsc_wp", 0, 5, 8, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 169, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("pyrost.bin.simulation.rsc_wp", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_wft), __pyx_ptype_5numpy_ndarray, 1, "wft", 0))) __PYX_ERR(0, 169, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_backend), (&PyUnicode_Type), 1, "backend", 1))) __PYX_ERR(0, 170, __pyx_L1_error) __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_4rsc_wp(__pyx_self, __pyx_v_wft, __pyx_v_dx0, __pyx_v_dx, __pyx_v_z, __pyx_v_wl, __pyx_v_axis, __pyx_v_backend, __pyx_v_num_threads); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_4rsc_wp(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_wft, double __pyx_v_dx0, double __pyx_v_dx, double __pyx_v_z, double __pyx_v_wl, int __pyx_v_axis, PyObject *__pyx_v_backend, unsigned int __pyx_v_num_threads) { int __pyx_v_fail; CYTHON_UNUSED npy_intp __pyx_v_isize; int __pyx_v_ndim; npy_intp *__pyx_v_dims; unsigned long *__pyx_v__dims; PyArrayObject *__pyx_v_out = 0; __pyx_t_double_complex *__pyx_v__out; __pyx_t_double_complex *__pyx_v__inp; PyArrayObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("rsc_wp", 0); __Pyx_INCREF((PyObject *)__pyx_v_wft); /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("pyrost.bin.simulation.rsc_wp", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_out); __Pyx_XDECREF((PyObject *)__pyx_v_wft); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* … */ __pyx_tuple__15 = PyTuple_Pack(16, __pyx_n_s_wft, __pyx_n_s_dx0, __pyx_n_s_dx, __pyx_n_s_z, __pyx_n_s_wl, __pyx_n_s_axis, __pyx_n_s_backend, __pyx_n_s_num_threads, __pyx_n_s_fail, __pyx_n_s_isize, __pyx_n_s_ndim, __pyx_n_s_dims, __pyx_n_s_dims_2, __pyx_n_s_out, __pyx_n_s_out_2, __pyx_n_s_inp_2); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(0, 169, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); /* … */ __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 169, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1); __Pyx_INCREF(((PyObject*)__pyx_n_u_numpy)); __Pyx_GIVEREF(((PyObject*)__pyx_n_u_numpy)); PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject*)__pyx_n_u_numpy)); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_4); __pyx_t_1 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(9); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 169, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_wft, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 169, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dx0, __pyx_n_u_double) < 0) __PYX_ERR(0, 169, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dx, __pyx_n_u_double) < 0) __PYX_ERR(0, 169, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_z, __pyx_n_u_double) < 0) __PYX_ERR(0, 169, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_wl, __pyx_n_u_double) < 0) __PYX_ERR(0, 169, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_axis, __pyx_n_u_int) < 0) __PYX_ERR(0, 169, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_backend, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 169, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 169, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 169, __pyx_L1_error) __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_5rsc_wp, 0, __pyx_n_s_rsc_wp, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__16)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 169, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_1, __pyx_t_5); __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_1, __pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_rsc_wp, __pyx_t_1) < 0) __PYX_ERR(0, 169, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(8, 0, 16, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_rsc_wp, 169, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) __PYX_ERR(0, 169, __pyx_L1_error)
+170: wl: cython.double, axis: cython.int=-1, backend: str='numpy',
__pyx_t_1 = __Pyx_PyInt_From_int(((int)-1)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 170, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1);
+171: num_threads: cython.uint=1) -> np.ndarray:
__pyx_t_4 = __Pyx_PyInt_From_unsigned_int(((unsigned int)1)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 171, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4);
172: r"""Wavefront propagator based on Rayleigh-Sommerfeld convolution
173: method [RSC]_. Propagates a wavefront `wft` by `z` distance
174: downstream. You can choose between 'fftw' and 'numpy' backends for FFT
175: calculations. 'fftw' backend supports multiprocessing.
176:
177: Parameters
178: ----------
179: wft : numpy.ndarray
180: Initial wavefront.
181: dx0 : float
182: Sampling interval at the plane upstream [um].
183: dx : float
184: Sampling interval at the plane downstream [um].
185: z : float
186: Propagation distance [um].
187: wl : float
188: Incoming beam's wavelength [um].
189: axis : int, optional
190: Axis of `wft` array along which the calculation is
191: performed.
192: backend : {'fftw', 'numpy'}, optional
193: Choose backend library for the FFT implementation.
194: num_threads: int, optional
195: Number of threads used in calculation. Only 'fftw' backend
196: supports it.
197:
198: Returns
199: -------
200: out : numpy.ndarray
201: Propagated wavefront.
202:
203: Raises
204: ------
205: RuntimeError
206: If 'numpy' backend exits with eror during the calculation.
207: ValueError
208: If `backend` option is invalid.
209:
210: Notes
211: -----
212: The Rayleigh–Sommerfeld diffraction integral transform is defined as:
213:
214: .. math::
215: u^{\prime}(x^{\prime}) = \frac{z}{j \sqrt{\lambda}} \int_{-\infty}^{+\infty}
216: u(x) \mathrm{exp} \left[-j k r(x, x^{\prime}) \right] dx
217:
218: with
219:
220: .. math::
221: r(x, x^{\prime}) = \left[ (x - x^{\prime})^2 + z^2 \right]^{1 / 2}
222:
223: References
224: ----------
225: .. [RSC] V. Nascov and P. C. Logofătu, "Fast computation algorithm
226: for the Rayleigh-Sommerfeld diffraction formula using
227: a type of scaled convolution," Appl. Opt. 48, 4310-4319
228: (2009).
229: """
+230: wft = check_array(wft, np.NPY_COMPLEX128)
__pyx_t_1 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_check_array(__pyx_v_wft, NPY_COMPLEX128)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 230, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_wft, ((PyArrayObject *)__pyx_t_1)); __pyx_t_1 = 0;
231:
+232: cdef int fail = 0
__pyx_v_fail = 0;
+233: cdef np.npy_intp isize = np.PyArray_SIZE(wft)
__pyx_v_isize = PyArray_SIZE(__pyx_v_wft);
+234: cdef int ndim = wft.ndim
__pyx_t_2 = __pyx_v_wft->nd; __pyx_v_ndim = __pyx_t_2;
+235: axis = axis if axis >= 0 else ndim + axis
if (((__pyx_v_axis >= 0) != 0)) { __pyx_t_2 = __pyx_v_axis; } else { __pyx_t_2 = (__pyx_v_ndim + __pyx_v_axis); } __pyx_v_axis = __pyx_t_2;
+236: axis = axis if axis <= ndim - 1 else ndim - 1
if (((__pyx_v_axis <= (__pyx_v_ndim - 1)) != 0)) { __pyx_t_3 = __pyx_v_axis; } else { __pyx_t_3 = (__pyx_v_ndim - 1); } __pyx_v_axis = __pyx_t_3;
+237: cdef np.npy_intp *dims = wft.shape
__pyx_t_4 = __pyx_v_wft->dimensions; __pyx_v_dims = __pyx_t_4;
+238: cdef unsigned long *_dims = <unsigned long *>dims
__pyx_v__dims = ((unsigned long *)__pyx_v_dims);
+239: cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(ndim, dims, np.NPY_COMPLEX128)
__pyx_t_1 = PyArray_SimpleNew(__pyx_v_ndim, __pyx_v_dims, NPY_COMPLEX128); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 239, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __pyx_t_1; __Pyx_INCREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_out = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0;
+240: cdef complex *_out = <complex *>np.PyArray_DATA(out)
__pyx_v__out = ((__pyx_t_double_complex *)PyArray_DATA(__pyx_v_out));
+241: cdef complex *_inp = <complex *>np.PyArray_DATA(wft)
__pyx_v__inp = ((__pyx_t_double_complex *)PyArray_DATA(__pyx_v_wft));
+242: with nogil:
{ #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* … */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L4_error: { #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } }
+243: if backend == 'fftw':
__pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_fftw, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 243, __pyx_L4_error) __pyx_t_7 = (__pyx_t_6 != 0); if (__pyx_t_7) { /* … */ goto __pyx_L6; }
+244: fail = rsc_fftw(_out, _inp, ndim, _dims, axis, dx0, dx, z, wl, num_threads)
__pyx_v_fail = rsc_fftw(__pyx_v__out, __pyx_v__inp, __pyx_v_ndim, __pyx_v__dims, __pyx_v_axis, __pyx_v_dx0, __pyx_v_dx, __pyx_v_z, __pyx_v_wl, __pyx_v_num_threads);
+245: elif backend == 'numpy':
__pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_numpy, Py_EQ)); if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 245, __pyx_L4_error) __pyx_t_6 = (__pyx_t_7 != 0); if (__pyx_t_6) { /* … */ goto __pyx_L6; }
+246: fail = rsc_np(_out, _inp, ndim, _dims, axis, dx0, dx, z, wl, num_threads)
__pyx_v_fail = rsc_np(__pyx_v__out, __pyx_v__inp, __pyx_v_ndim, __pyx_v__dims, __pyx_v_axis, __pyx_v_dx0, __pyx_v_dx, __pyx_v_z, __pyx_v_wl, __pyx_v_num_threads);
247: else:
+248: raise ValueError('{:s} is invalid backend'.format(backend))
/*else*/ { { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif /*try:*/ { __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_s_is_invalid_backend, __pyx_n_s_format); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 248, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_5 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_8, __pyx_v_backend) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_backend); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 248, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 248, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 248, __pyx_L8_error) } /*finally:*/ { __pyx_L8_error: { #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif goto __pyx_L4_error; } } } } __pyx_L6:; }
+249: if fail:
__pyx_t_6 = (__pyx_v_fail != 0); if (unlikely(__pyx_t_6)) { /* … */ }
+250: raise RuntimeError('C backend exited with error.')
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 250, __pyx_L1_error)
+251: return out
__Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_out)); __pyx_r = __pyx_v_out; goto __pyx_L0;
252:
+253: def fraunhofer_wp(wft: np.ndarray, dx0: cython.double, dx: cython.double,
/* Python wrapper */ static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_7fraunhofer_wp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_6pyrost_3bin_10simulation_6fraunhofer_wp[] = "fraunhofer_wp(ndarray wft: np.ndarray, double dx0: cython.double, double dx: cython.double, double z: cython.double, double wl: cython.double, int axis: cython.int = -1, unicode backend: str = u'numpy', unsigned int num_threads: cython.uint = 1) -> np.ndarray\nFraunhofer diffraction propagator. Propagates a wavefront `wft` by\n `z` distance downstream. You can choose between 'fftw' and 'numpy'\n backends for FFT calculations. 'fftw' backend supports multiprocessing.\n\n Parameters\n ----------\n wft : numpy.ndarray\n Initial wavefront.\n dx0 : float\n Sampling interval at the plane upstream [um].\n dx : float\n Sampling interval at the plane downstream [um].\n z : float\n Propagation distance [um].\n wl : float\n Incoming beam's wavelength [um].\n axis : int, optional\n Axis of `wft` array along which the calculation is\n performed.\n backend : {'fftw', 'numpy'}, optional\n Choose backend library for the FFT implementation.\n num_threads: int, optional\n Number of threads used in calculation. Only 'fftw' backend\n supports it.\n\n Returns\n -------\n out : numpy.ndarray\n Propagated wavefront.\n\n Raises\n ------\n RuntimeError\n If 'numpy' backend exits with eror during the calculation.\n ValueError\n If `backend` option is invalid.\n\n Notes\n -----\n The Fraunhofer integral transform is defined as:\n\n .. math::\n u^{\\prime}(x^{\\prime}) = \\frac{e^{-j k z}}{j \\sqrt{\\lambda z}}\n e^{-\\frac{j k}{2 z} x^{\\prime 2}} \\int_{-\\infty}^{+\\infty} u(x)\n e^{j\\frac{2 \\pi}{\\lambda z} x x^{\\prime}} dx\n "; static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_7fraunhofer_wp = {"fraunhofer_wp", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_7fraunhofer_wp, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_6fraunhofer_wp}; static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_7fraunhofer_wp(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_wft = 0; double __pyx_v_dx0; double __pyx_v_dx; double __pyx_v_z; double __pyx_v_wl; int __pyx_v_axis; PyObject *__pyx_v_backend = 0; unsigned int __pyx_v_num_threads; PyArrayObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("fraunhofer_wp (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_wft,&__pyx_n_s_dx0,&__pyx_n_s_dx,&__pyx_n_s_z,&__pyx_n_s_wl,&__pyx_n_s_axis,&__pyx_n_s_backend,&__pyx_n_s_num_threads,0}; PyObject* values[8] = {0,0,0,0,0,0,0,0}; values[6] = ((PyObject*)((PyObject*)__pyx_n_u_numpy)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); CYTHON_FALLTHROUGH; case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wft)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dx0)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fraunhofer_wp", 0, 5, 8, 1); __PYX_ERR(0, 253, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fraunhofer_wp", 0, 5, 8, 2); __PYX_ERR(0, 253, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_z)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fraunhofer_wp", 0, 5, 8, 3); __PYX_ERR(0, 253, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wl)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fraunhofer_wp", 0, 5, 8, 4); __PYX_ERR(0, 253, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_axis); if (value) { values[5] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 6: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_backend); if (value) { values[6] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 7: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads); if (value) { values[7] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "fraunhofer_wp") < 0)) __PYX_ERR(0, 253, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); CYTHON_FALLTHROUGH; case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_wft = ((PyArrayObject *)values[0]); __pyx_v_dx0 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_dx0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 253, __pyx_L3_error) __pyx_v_dx = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_dx == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 253, __pyx_L3_error) __pyx_v_z = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_z == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 254, __pyx_L3_error) __pyx_v_wl = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_wl == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 254, __pyx_L3_error) if (values[5]) { __pyx_v_axis = __Pyx_PyInt_As_int(values[5]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 254, __pyx_L3_error) } else { __pyx_v_axis = ((int)((int)-1)); } __pyx_v_backend = ((PyObject*)values[6]); if (values[7]) { __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[7]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 255, __pyx_L3_error) } else { __pyx_v_num_threads = ((unsigned int)((unsigned int)1)); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("fraunhofer_wp", 0, 5, 8, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 253, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("pyrost.bin.simulation.fraunhofer_wp", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_wft), __pyx_ptype_5numpy_ndarray, 1, "wft", 0))) __PYX_ERR(0, 253, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_backend), (&PyUnicode_Type), 1, "backend", 1))) __PYX_ERR(0, 255, __pyx_L1_error) __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_6fraunhofer_wp(__pyx_self, __pyx_v_wft, __pyx_v_dx0, __pyx_v_dx, __pyx_v_z, __pyx_v_wl, __pyx_v_axis, __pyx_v_backend, __pyx_v_num_threads); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_6fraunhofer_wp(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_wft, double __pyx_v_dx0, double __pyx_v_dx, double __pyx_v_z, double __pyx_v_wl, int __pyx_v_axis, PyObject *__pyx_v_backend, unsigned int __pyx_v_num_threads) { int __pyx_v_fail; CYTHON_UNUSED npy_intp __pyx_v_isize; int __pyx_v_ndim; npy_intp *__pyx_v_dims; unsigned long *__pyx_v__dims; PyArrayObject *__pyx_v_out = 0; __pyx_t_double_complex *__pyx_v__out; __pyx_t_double_complex *__pyx_v__inp; PyArrayObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("fraunhofer_wp", 0); __Pyx_INCREF((PyObject *)__pyx_v_wft); /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("pyrost.bin.simulation.fraunhofer_wp", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_out); __Pyx_XDECREF((PyObject *)__pyx_v_wft); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* … */ __pyx_tuple__17 = PyTuple_Pack(16, __pyx_n_s_wft, __pyx_n_s_dx0, __pyx_n_s_dx, __pyx_n_s_z, __pyx_n_s_wl, __pyx_n_s_axis, __pyx_n_s_backend, __pyx_n_s_num_threads, __pyx_n_s_fail, __pyx_n_s_isize, __pyx_n_s_ndim, __pyx_n_s_dims, __pyx_n_s_dims_2, __pyx_n_s_out, __pyx_n_s_out_2, __pyx_n_s_inp_2); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(0, 253, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* … */ __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1); __Pyx_INCREF(((PyObject*)__pyx_n_u_numpy)); __Pyx_GIVEREF(((PyObject*)__pyx_n_u_numpy)); PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject*)__pyx_n_u_numpy)); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_4); __pyx_t_1 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(9); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_wft, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 253, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dx0, __pyx_n_u_double) < 0) __PYX_ERR(0, 253, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dx, __pyx_n_u_double) < 0) __PYX_ERR(0, 253, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_z, __pyx_n_u_double) < 0) __PYX_ERR(0, 253, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_wl, __pyx_n_u_double) < 0) __PYX_ERR(0, 253, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_axis, __pyx_n_u_int) < 0) __PYX_ERR(0, 253, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_backend, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 253, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 253, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 253, __pyx_L1_error) __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_7fraunhofer_wp, 0, __pyx_n_s_fraunhofer_wp, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__18)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_1, __pyx_t_5); __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_1, __pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_fraunhofer_wp, __pyx_t_1) < 0) __PYX_ERR(0, 253, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_codeobj__18 = (PyObject*)__Pyx_PyCode_New(8, 0, 16, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_fraunhofer_wp, 253, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__18)) __PYX_ERR(0, 253, __pyx_L1_error)
+254: z: cython.double, wl: cython.double, axis: cython.int=-1,
__pyx_t_1 = __Pyx_PyInt_From_int(((int)-1)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1);
+255: backend: str='numpy', num_threads: cython.uint=1) -> np.ndarray:
__pyx_t_4 = __Pyx_PyInt_From_unsigned_int(((unsigned int)1)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4);
256: r"""Fraunhofer diffraction propagator. Propagates a wavefront `wft` by
257: `z` distance downstream. You can choose between 'fftw' and 'numpy'
258: backends for FFT calculations. 'fftw' backend supports multiprocessing.
259:
260: Parameters
261: ----------
262: wft : numpy.ndarray
263: Initial wavefront.
264: dx0 : float
265: Sampling interval at the plane upstream [um].
266: dx : float
267: Sampling interval at the plane downstream [um].
268: z : float
269: Propagation distance [um].
270: wl : float
271: Incoming beam's wavelength [um].
272: axis : int, optional
273: Axis of `wft` array along which the calculation is
274: performed.
275: backend : {'fftw', 'numpy'}, optional
276: Choose backend library for the FFT implementation.
277: num_threads: int, optional
278: Number of threads used in calculation. Only 'fftw' backend
279: supports it.
280:
281: Returns
282: -------
283: out : numpy.ndarray
284: Propagated wavefront.
285:
286: Raises
287: ------
288: RuntimeError
289: If 'numpy' backend exits with eror during the calculation.
290: ValueError
291: If `backend` option is invalid.
292:
293: Notes
294: -----
295: The Fraunhofer integral transform is defined as:
296:
297: .. math::
298: u^{\prime}(x^{\prime}) = \frac{e^{-j k z}}{j \sqrt{\lambda z}}
299: e^{-\frac{j k}{2 z} x^{\prime 2}} \int_{-\infty}^{+\infty} u(x)
300: e^{j\frac{2 \pi}{\lambda z} x x^{\prime}} dx
301: """
+302: wft = check_array(wft, np.NPY_COMPLEX128)
__pyx_t_1 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_check_array(__pyx_v_wft, NPY_COMPLEX128)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 302, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_wft, ((PyArrayObject *)__pyx_t_1)); __pyx_t_1 = 0;
303:
+304: cdef int fail = 0
__pyx_v_fail = 0;
+305: cdef np.npy_intp isize = np.PyArray_SIZE(wft)
__pyx_v_isize = PyArray_SIZE(__pyx_v_wft);
+306: cdef int ndim = wft.ndim
__pyx_t_2 = __pyx_v_wft->nd; __pyx_v_ndim = __pyx_t_2;
+307: axis = axis if axis >= 0 else ndim + axis
if (((__pyx_v_axis >= 0) != 0)) { __pyx_t_2 = __pyx_v_axis; } else { __pyx_t_2 = (__pyx_v_ndim + __pyx_v_axis); } __pyx_v_axis = __pyx_t_2;
+308: axis = axis if axis <= ndim - 1 else ndim - 1
if (((__pyx_v_axis <= (__pyx_v_ndim - 1)) != 0)) { __pyx_t_3 = __pyx_v_axis; } else { __pyx_t_3 = (__pyx_v_ndim - 1); } __pyx_v_axis = __pyx_t_3;
+309: cdef np.npy_intp *dims = wft.shape
__pyx_t_4 = __pyx_v_wft->dimensions; __pyx_v_dims = __pyx_t_4;
+310: cdef unsigned long *_dims = <unsigned long *>dims
__pyx_v__dims = ((unsigned long *)__pyx_v_dims);
+311: cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(ndim, dims, np.NPY_COMPLEX128)
__pyx_t_1 = PyArray_SimpleNew(__pyx_v_ndim, __pyx_v_dims, NPY_COMPLEX128); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 311, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __pyx_t_1; __Pyx_INCREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_out = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0;
+312: cdef complex *_out = <complex *>np.PyArray_DATA(out)
__pyx_v__out = ((__pyx_t_double_complex *)PyArray_DATA(__pyx_v_out));
+313: cdef complex *_inp = <complex *>np.PyArray_DATA(wft)
__pyx_v__inp = ((__pyx_t_double_complex *)PyArray_DATA(__pyx_v_wft));
+314: with nogil:
{ #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* … */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L4_error: { #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } }
+315: if backend == 'fftw':
__pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_fftw, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 315, __pyx_L4_error) __pyx_t_7 = (__pyx_t_6 != 0); if (__pyx_t_7) { /* … */ goto __pyx_L6; }
+316: fail = fraunhofer_fftw(_out, _inp, ndim, _dims, axis, dx0, dx, z, wl, num_threads)
__pyx_v_fail = fraunhofer_fftw(__pyx_v__out, __pyx_v__inp, __pyx_v_ndim, __pyx_v__dims, __pyx_v_axis, __pyx_v_dx0, __pyx_v_dx, __pyx_v_z, __pyx_v_wl, __pyx_v_num_threads);
+317: elif backend == 'numpy':
__pyx_t_7 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_numpy, Py_EQ)); if (unlikely(__pyx_t_7 < 0)) __PYX_ERR(0, 317, __pyx_L4_error) __pyx_t_6 = (__pyx_t_7 != 0); if (__pyx_t_6) { /* … */ goto __pyx_L6; }
+318: fail = fraunhofer_np(_out, _inp, ndim, _dims, axis, dx0, dx, z, wl, num_threads)
__pyx_v_fail = fraunhofer_np(__pyx_v__out, __pyx_v__inp, __pyx_v_ndim, __pyx_v__dims, __pyx_v_axis, __pyx_v_dx0, __pyx_v_dx, __pyx_v_z, __pyx_v_wl, __pyx_v_num_threads);
319: else:
+320: raise ValueError('{:s} is invalid backend'.format(backend))
/*else*/ { { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif /*try:*/ { __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_s_is_invalid_backend, __pyx_n_s_format); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 320, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_5 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_8, __pyx_v_backend) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_backend); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 320, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 320, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 320, __pyx_L8_error) } /*finally:*/ { __pyx_L8_error: { #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif goto __pyx_L4_error; } } } } __pyx_L6:; }
+321: if fail:
__pyx_t_6 = (__pyx_v_fail != 0); if (unlikely(__pyx_t_6)) { /* … */ }
+322: raise RuntimeError('C backend exited with error.')
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 322, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 322, __pyx_L1_error)
+323: return out
__Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_out)); __pyx_r = __pyx_v_out; goto __pyx_L0;
324:
+325: def gaussian_kernel(sigma: double, order: cython.uint=0, truncate: cython.double=4.) -> np.ndarray:
/* Python wrapper */ static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_9gaussian_kernel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_6pyrost_3bin_10simulation_8gaussian_kernel[] = "gaussian_kernel(double sigma: double, unsigned int order: cython.uint = 0, double truncate: cython.double = 4.) -> np.ndarray\nDiscrete Gaussian kernel.\n \n Parameters\n ----------\n sigma : float\n Standard deviation for Gaussian kernel.\n order : int, optional\n The order of the filter. An order of 0 corresponds to convolution with a\n Gaussian kernel. A positive order corresponds to convolution with that\n derivative of a Gaussian. Default is 0.\n truncate : float, optional\n Truncate the filter at this many standard deviations. Default is 4.0.\n \n Returns\n -------\n krn : np.ndarray\n Gaussian kernel.\n "; static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_9gaussian_kernel = {"gaussian_kernel", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_9gaussian_kernel, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_8gaussian_kernel}; static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_9gaussian_kernel(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_sigma; unsigned int __pyx_v_order; double __pyx_v_truncate; PyArrayObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gaussian_kernel (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_sigma,&__pyx_n_s_order,&__pyx_n_s_truncate,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sigma)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_order); if (value) { values[1] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_truncate); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gaussian_kernel") < 0)) __PYX_ERR(0, 325, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_sigma = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_sigma == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 325, __pyx_L3_error) if (values[1]) { __pyx_v_order = __Pyx_PyInt_As_unsigned_int(values[1]); if (unlikely((__pyx_v_order == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 325, __pyx_L3_error) } else { __pyx_v_order = ((unsigned int)((unsigned int)0)); } if (values[2]) { __pyx_v_truncate = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_truncate == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 325, __pyx_L3_error) } else { __pyx_v_truncate = ((double)((double)4.)); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gaussian_kernel", 0, 1, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 325, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("pyrost.bin.simulation.gaussian_kernel", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_8gaussian_kernel(__pyx_self, __pyx_v_sigma, __pyx_v_order, __pyx_v_truncate); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_8gaussian_kernel(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_sigma, unsigned int __pyx_v_order, double __pyx_v_truncate) { npy_intp __pyx_v_radius; npy_intp *__pyx_v_dims; PyArrayObject *__pyx_v_out = 0; double *__pyx_v__out; PyArrayObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gaussian_kernel", 0); /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("pyrost.bin.simulation.gaussian_kernel", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_out); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* … */ __pyx_tuple__19 = PyTuple_Pack(7, __pyx_n_s_sigma, __pyx_n_s_order, __pyx_n_s_truncate, __pyx_n_s_radius, __pyx_n_s_dims, __pyx_n_s_out, __pyx_n_s_out_2); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(0, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); /* … */ __pyx_t_1 = __Pyx_PyInt_From_unsigned_int(((unsigned int)0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = PyFloat_FromDouble(((double)4.)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __pyx_t_1 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyDict_NewPresized(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_sigma, __pyx_n_u_double) < 0) __PYX_ERR(0, 325, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_order, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 325, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_truncate, __pyx_n_u_double) < 0) __PYX_ERR(0, 325, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 325, __pyx_L1_error) __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_9gaussian_kernel, 0, __pyx_n_s_gaussian_kernel, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__20)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_1, __pyx_t_5); __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_1, __pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_gaussian_kernel, __pyx_t_1) < 0) __PYX_ERR(0, 325, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(3, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_gaussian_kernel, 325, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) __PYX_ERR(0, 325, __pyx_L1_error)
326: """Discrete Gaussian kernel.
327:
328: Parameters
329: ----------
330: sigma : float
331: Standard deviation for Gaussian kernel.
332: order : int, optional
333: The order of the filter. An order of 0 corresponds to convolution with a
334: Gaussian kernel. A positive order corresponds to convolution with that
335: derivative of a Gaussian. Default is 0.
336: truncate : float, optional
337: Truncate the filter at this many standard deviations. Default is 4.0.
338:
339: Returns
340: -------
341: krn : np.ndarray
342: Gaussian kernel.
343: """
+344: cdef np.npy_intp radius = <np.npy_intp>(sigma * truncate)
__pyx_v_radius = ((npy_intp)(__pyx_v_sigma * __pyx_v_truncate));
+345: cdef np.npy_intp *dims = [2 * radius + 1,]
__pyx_t_1[0] = ((2 * __pyx_v_radius) + 1); __pyx_v_dims = __pyx_t_1;
+346: cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(1, dims, np.NPY_FLOAT64)
__pyx_t_2 = PyArray_SimpleNew(1, __pyx_v_dims, NPY_FLOAT64); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 346, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __pyx_t_2; __Pyx_INCREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_out = ((PyArrayObject *)__pyx_t_3); __pyx_t_3 = 0;
+347: cdef double *_out = <double *>np.PyArray_DATA(out)
__pyx_v__out = ((double *)PyArray_DATA(__pyx_v_out));
+348: with nogil:
{ #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* … */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } }
+349: gauss_kernel1d(_out, sigma, order, dims[0])
(void)(gauss_kernel1d(__pyx_v__out, __pyx_v_sigma, __pyx_v_order, (__pyx_v_dims[0]))); }
+350: return out
__Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_out)); __pyx_r = __pyx_v_out; goto __pyx_L0;
351:
+352: def gaussian_filter(inp: np.ndarray, sigma: object, order: object=0, mode: str='reflect',
/* Python wrapper */ static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_11gaussian_filter(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_6pyrost_3bin_10simulation_10gaussian_filter[] = "gaussian_filter(ndarray inp: np.ndarray, sigma: object, order: object = 0, unicode mode: str = u'reflect', double cval: cython.double = 0., double truncate: cython.double = 4., unicode backend: str = u'numpy', unsigned int num_threads: cython.uint = 1) -> np.ndarray\nMultidimensional Gaussian filter. The multidimensional filter is implemented as\n a sequence of 1-D FFT convolutions.\n\n Parameters\n ----------\n inp : np.ndarray\n The input array.\n sigma : float or list of floats\n Standard deviation for Gaussian kernel. The standard deviations of the Gaussian\n filter are given for each axis as a sequence, or as a single number, in which case\n it is equal for all axes.\n order : int or list of ints, optional\n The order of the filter along each axis is given as a sequence of integers, or as\n a single number. An order of 0 corresponds to convolution with a Gaussian kernel.\n A positive order corresponds to convolution with that derivative of a Gaussian.\n mode : {'constant', 'nearest', 'mirror', 'reflect', 'wrap'}, optional\n The mode parameter determines how the input array is extended when the filter\n overlaps a border. Default value is 'reflect'. The valid values and their behavior\n is as follows:\n\n * 'constant', (k k k k | a b c d | k k k k) : The input is extended by filling all\n values beyond the edge with the same constant value, defined by the `cval`\n parameter.\n * 'nearest', (a a a a | a b c d | d d d d) : The input is extended by replicating\n the last pixel.\n * 'mirror', (c d c b | a b c d | c b a b) : The input is extended by reflecting\n about the center of the last pixel. This mode is also sometimes referred to as\n whole-sample symmetric.\n * 'reflect', (d c b a | a b c d | d c b a) : The input is extended by reflecting\n about the edge of the last pixel. This mode is al""so sometimes referred to as\n half-sample symmetric.\n * 'wrap', (a b c d | a b c d | a b c d) : The input is extended by wrapping around\n to the opposite edge.\n cval : float, optional\n Value to fill past edges of input if mode is \342\200\230constant\342\200\231. Default is 0.0.\n truncate : float, optional\n Truncate the filter at this many standard deviations. Default is 4.0.\n backend : {'fftw', 'numpy'}, optional\n Choose backend library for the FFT implementation.\n num_threads : int, optional\n Number of threads.\n \n Returns\n -------\n out : np.ndarray\n Returned array of same shape as `input`.\n "; static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_11gaussian_filter = {"gaussian_filter", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_11gaussian_filter, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_10gaussian_filter}; static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_11gaussian_filter(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_inp = 0; PyObject *__pyx_v_sigma = 0; PyObject *__pyx_v_order = 0; PyObject *__pyx_v_mode = 0; double __pyx_v_cval; double __pyx_v_truncate; PyObject *__pyx_v_backend = 0; unsigned int __pyx_v_num_threads; PyArrayObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gaussian_filter (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_inp,&__pyx_n_s_sigma,&__pyx_n_s_order,&__pyx_n_s_mode,&__pyx_n_s_cval,&__pyx_n_s_truncate,&__pyx_n_s_backend,&__pyx_n_s_num_threads,0}; PyObject* values[8] = {0,0,0,0,0,0,0,0}; values[2] = ((PyObject *)((PyObject *)__pyx_int_0)); values[3] = ((PyObject*)((PyObject*)__pyx_n_u_reflect)); values[6] = ((PyObject*)((PyObject*)__pyx_n_u_numpy)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); CYTHON_FALLTHROUGH; case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sigma)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gaussian_filter", 0, 2, 8, 1); __PYX_ERR(0, 352, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_order); if (value) { values[2] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_cval); if (value) { values[4] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 5: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_truncate); if (value) { values[5] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 6: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_backend); if (value) { values[6] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 7: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads); if (value) { values[7] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gaussian_filter") < 0)) __PYX_ERR(0, 352, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); CYTHON_FALLTHROUGH; case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_inp = ((PyArrayObject *)values[0]); __pyx_v_sigma = values[1]; __pyx_v_order = values[2]; __pyx_v_mode = ((PyObject*)values[3]); if (values[4]) { __pyx_v_cval = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_cval == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 353, __pyx_L3_error) } else { __pyx_v_cval = ((double)((double)0.)); } if (values[5]) { __pyx_v_truncate = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_truncate == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 353, __pyx_L3_error) } else { __pyx_v_truncate = ((double)((double)4.)); } __pyx_v_backend = ((PyObject*)values[6]); if (values[7]) { __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[7]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 354, __pyx_L3_error) } else { __pyx_v_num_threads = ((unsigned int)((unsigned int)1)); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gaussian_filter", 0, 2, 8, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 352, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("pyrost.bin.simulation.gaussian_filter", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_inp), __pyx_ptype_5numpy_ndarray, 1, "inp", 0))) __PYX_ERR(0, 352, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mode), (&PyUnicode_Type), 1, "mode", 1))) __PYX_ERR(0, 352, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_backend), (&PyUnicode_Type), 1, "backend", 1))) __PYX_ERR(0, 353, __pyx_L1_error) __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_10gaussian_filter(__pyx_self, __pyx_v_inp, __pyx_v_sigma, __pyx_v_order, __pyx_v_mode, __pyx_v_cval, __pyx_v_truncate, __pyx_v_backend, __pyx_v_num_threads); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_10gaussian_filter(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_inp, PyObject *__pyx_v_sigma, PyObject *__pyx_v_order, PyObject *__pyx_v_mode, double __pyx_v_cval, double __pyx_v_truncate, PyObject *__pyx_v_backend, unsigned int __pyx_v_num_threads) { int __pyx_v_ndim; PyArrayObject *__pyx_v_sigmas = 0; PyArrayObject *__pyx_v_orders = 0; int __pyx_v_fail; npy_intp *__pyx_v_dims; PyArrayObject *__pyx_v_out = 0; double *__pyx_v__out; double *__pyx_v__inp; unsigned long *__pyx_v__dims; double *__pyx_v__sig; unsigned int *__pyx_v__ord; int __pyx_v__mode; PyArrayObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gaussian_filter", 0); __Pyx_INCREF((PyObject *)__pyx_v_inp); /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("pyrost.bin.simulation.gaussian_filter", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_sigmas); __Pyx_XDECREF((PyObject *)__pyx_v_orders); __Pyx_XDECREF((PyObject *)__pyx_v_out); __Pyx_XDECREF((PyObject *)__pyx_v_inp); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* … */ __pyx_tuple__21 = PyTuple_Pack(20, __pyx_n_s_inp, __pyx_n_s_sigma, __pyx_n_s_order, __pyx_n_s_mode, __pyx_n_s_cval, __pyx_n_s_truncate, __pyx_n_s_backend, __pyx_n_s_num_threads, __pyx_n_s_ndim, __pyx_n_s_sigmas, __pyx_n_s_orders, __pyx_n_s_fail, __pyx_n_s_dims, __pyx_n_s_out, __pyx_n_s_out_2, __pyx_n_s_inp_2, __pyx_n_s_dims_2, __pyx_n_s_sig, __pyx_n_s_ord, __pyx_n_s_mode_2); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(0, 352, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* … */ __pyx_t_3 = PyTuple_New(6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 352, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_int_0)); __Pyx_GIVEREF(((PyObject *)__pyx_int_0)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_int_0)); __Pyx_INCREF(((PyObject*)__pyx_n_u_reflect)); __Pyx_GIVEREF(((PyObject*)__pyx_n_u_reflect)); PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject*)__pyx_n_u_reflect)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 3, __pyx_t_4); __Pyx_INCREF(((PyObject*)__pyx_n_u_numpy)); __Pyx_GIVEREF(((PyObject*)__pyx_n_u_numpy)); PyTuple_SET_ITEM(__pyx_t_3, 4, ((PyObject*)__pyx_n_u_numpy)); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_3, 5, __pyx_t_5); __pyx_t_1 = 0; __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyDict_NewPresized(9); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 352, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_inp, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 352, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_sigma, __pyx_n_u_object) < 0) __PYX_ERR(0, 352, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_order, __pyx_n_u_object) < 0) __PYX_ERR(0, 352, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_mode, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 352, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_cval, __pyx_n_u_double) < 0) __PYX_ERR(0, 352, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_truncate, __pyx_n_u_double) < 0) __PYX_ERR(0, 352, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_backend, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 352, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 352, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 352, __pyx_L1_error) __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_11gaussian_filter, 0, __pyx_n_s_gaussian_filter, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__22)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 352, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_4, __pyx_t_3); __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_4, __pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_gaussian_filter, __pyx_t_4) < 0) __PYX_ERR(0, 352, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(8, 0, 20, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_gaussian_filter, 352, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) __PYX_ERR(0, 352, __pyx_L1_error)
+353: cval: cython.double=0., truncate: cython.double=4., backend: str='numpy',
__pyx_t_1 = PyFloat_FromDouble(((double)0.)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = PyFloat_FromDouble(((double)4.)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4);
+354: num_threads: cython.uint=1) -> np.ndarray:
__pyx_t_5 = __Pyx_PyInt_From_unsigned_int(((unsigned int)1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 354, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5);
355: r"""Multidimensional Gaussian filter. The multidimensional filter is implemented as
356: a sequence of 1-D FFT convolutions.
357:
358: Parameters
359: ----------
360: inp : np.ndarray
361: The input array.
362: sigma : float or list of floats
363: Standard deviation for Gaussian kernel. The standard deviations of the Gaussian
364: filter are given for each axis as a sequence, or as a single number, in which case
365: it is equal for all axes.
366: order : int or list of ints, optional
367: The order of the filter along each axis is given as a sequence of integers, or as
368: a single number. An order of 0 corresponds to convolution with a Gaussian kernel.
369: A positive order corresponds to convolution with that derivative of a Gaussian.
370: mode : {'constant', 'nearest', 'mirror', 'reflect', 'wrap'}, optional
371: The mode parameter determines how the input array is extended when the filter
372: overlaps a border. Default value is 'reflect'. The valid values and their behavior
373: is as follows:
374:
375: * 'constant', (k k k k | a b c d | k k k k) : The input is extended by filling all
376: values beyond the edge with the same constant value, defined by the `cval`
377: parameter.
378: * 'nearest', (a a a a | a b c d | d d d d) : The input is extended by replicating
379: the last pixel.
380: * 'mirror', (c d c b | a b c d | c b a b) : The input is extended by reflecting
381: about the center of the last pixel. This mode is also sometimes referred to as
382: whole-sample symmetric.
383: * 'reflect', (d c b a | a b c d | d c b a) : The input is extended by reflecting
384: about the edge of the last pixel. This mode is also sometimes referred to as
385: half-sample symmetric.
386: * 'wrap', (a b c d | a b c d | a b c d) : The input is extended by wrapping around
387: to the opposite edge.
388: cval : float, optional
389: Value to fill past edges of input if mode is ‘constant’. Default is 0.0.
390: truncate : float, optional
391: Truncate the filter at this many standard deviations. Default is 4.0.
392: backend : {'fftw', 'numpy'}, optional
393: Choose backend library for the FFT implementation.
394: num_threads : int, optional
395: Number of threads.
396:
397: Returns
398: -------
399: out : np.ndarray
400: Returned array of same shape as `input`.
401: """
+402: inp = check_array(inp, np.NPY_FLOAT64)
__pyx_t_1 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_check_array(__pyx_v_inp, NPY_FLOAT64)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 402, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_inp, ((PyArrayObject *)__pyx_t_1)); __pyx_t_1 = 0;
403:
+404: cdef int ndim = inp.ndim
__pyx_t_2 = __pyx_v_inp->nd; __pyx_v_ndim = __pyx_t_2;
+405: cdef np.ndarray sigmas = normalize_sequence(sigma, ndim, np.NPY_FLOAT64)
__pyx_t_1 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_normalize_sequence(__pyx_v_sigma, __pyx_v_ndim, NPY_FLOAT64)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 405, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_sigmas = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0;
+406: cdef np.ndarray orders = normalize_sequence(order, ndim, np.NPY_UINT32)
__pyx_t_1 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_normalize_sequence(__pyx_v_order, __pyx_v_ndim, NPY_UINT32)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 406, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_orders = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0;
407:
+408: cdef int fail = 0
__pyx_v_fail = 0;
+409: cdef np.npy_intp *dims = inp.shape
__pyx_t_3 = __pyx_v_inp->dimensions; __pyx_v_dims = __pyx_t_3;
+410: cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(ndim, dims, np.NPY_FLOAT64)
__pyx_t_1 = PyArray_SimpleNew(__pyx_v_ndim, __pyx_v_dims, NPY_FLOAT64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 410, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __pyx_t_1; __Pyx_INCREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_out = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0;
+411: cdef double *_out = <double *>np.PyArray_DATA(out)
__pyx_v__out = ((double *)PyArray_DATA(__pyx_v_out));
+412: cdef double *_inp = <double *>np.PyArray_DATA(inp)
__pyx_v__inp = ((double *)PyArray_DATA(__pyx_v_inp));
+413: cdef unsigned long *_dims = <unsigned long *>dims
__pyx_v__dims = ((unsigned long *)__pyx_v_dims);
+414: cdef double *_sig = <double *>np.PyArray_DATA(sigmas)
__pyx_v__sig = ((double *)PyArray_DATA(__pyx_v_sigmas));
+415: cdef unsigned *_ord = <unsigned *>np.PyArray_DATA(orders)
__pyx_v__ord = ((unsigned int *)PyArray_DATA(__pyx_v_orders));
+416: cdef int _mode = extend_mode_to_code(mode)
__pyx_t_2 = __pyx_f_6pyrost_3bin_10simulation_extend_mode_to_code(__pyx_v_mode); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 416, __pyx_L1_error)
__pyx_v__mode = __pyx_t_2;
+417: with nogil:
{ #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* … */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L4_error: { #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } }
+418: if backend == 'fftw':
__pyx_t_5 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_fftw, Py_EQ)); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 418, __pyx_L4_error) __pyx_t_6 = (__pyx_t_5 != 0); if (__pyx_t_6) { /* … */ goto __pyx_L6; }
+419: fail = gauss_filter(_out, _inp, ndim, _dims, _sig, _ord, _mode, cval, truncate, num_threads, fft_convolve_fftw)
__pyx_v_fail = gauss_filter(__pyx_v__out, __pyx_v__inp, __pyx_v_ndim, __pyx_v__dims, __pyx_v__sig, __pyx_v__ord, __pyx_v__mode, __pyx_v_cval, __pyx_v_truncate, __pyx_v_num_threads, fft_convolve_fftw);
+420: elif backend == 'numpy':
__pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_numpy, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 420, __pyx_L4_error) __pyx_t_5 = (__pyx_t_6 != 0); if (__pyx_t_5) { /* … */ goto __pyx_L6; }
+421: fail = gauss_filter(_out, _inp, ndim, _dims, _sig, _ord, _mode, cval, truncate, num_threads, fft_convolve_np)
__pyx_v_fail = gauss_filter(__pyx_v__out, __pyx_v__inp, __pyx_v_ndim, __pyx_v__dims, __pyx_v__sig, __pyx_v__ord, __pyx_v__mode, __pyx_v_cval, __pyx_v_truncate, __pyx_v_num_threads, fft_convolve_np);
422: else:
+423: raise ValueError('{:s} is invalid backend'.format(backend))
/*else*/ { { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif /*try:*/ { __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_s_is_invalid_backend, __pyx_n_s_format); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 423, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_7 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_4 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_7, __pyx_v_backend) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_backend); __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 423, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 423, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 423, __pyx_L8_error) } /*finally:*/ { __pyx_L8_error: { #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif goto __pyx_L4_error; } } } } __pyx_L6:; }
+424: if fail:
__pyx_t_5 = (__pyx_v_fail != 0); if (unlikely(__pyx_t_5)) { /* … */ }
+425: raise RuntimeError('C backend exited with error.')
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 425, __pyx_L1_error)
+426: return out
__Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_out)); __pyx_r = __pyx_v_out; goto __pyx_L0;
427:
+428: def gaussian_gradient_magnitude(inp: np.ndarray, sigma: object, mode: str='reflect',
/* Python wrapper */ static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_13gaussian_gradient_magnitude(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_6pyrost_3bin_10simulation_12gaussian_gradient_magnitude[] = "gaussian_gradient_magnitude(ndarray inp: np.ndarray, sigma: object, unicode mode: str = u'reflect', double cval: cython.double = 0., double truncate: cython.double = 4., unicode backend: str = u'numpy', unsigned int num_threads: cython.uint = 1) -> np.ndarray\nMultidimensional gradient magnitude using Gaussian derivatives. The multidimensional\n filter is implemented as a sequence of 1-D FFT convolutions.\n\n Parameters\n ----------\n inp : np.ndarray\n The input array.\n sigma : float or list of floats\n The standard deviations of the Gaussian filter are given for each axis as a sequence,\n or as a single number, in which case it is equal for all axes.\n mode : {'constant', 'nearest', 'mirror', 'reflect', 'wrap'}, optional\n The mode parameter determines how the input array is extended when the filter\n overlaps a border. Default value is 'reflect'. The valid values and their behavior\n is as follows:\n\n * 'constant', (k k k k | a b c d | k k k k) : The input is extended by filling all\n values beyond the edge with the same constant value, defined by the `cval`\n parameter.\n * 'nearest', (a a a a | a b c d | d d d d) : The input is extended by replicating\n the last pixel.\n * 'mirror', (c d c b | a b c d | c b a b) : The input is extended by reflecting\n about the center of the last pixel. This mode is also sometimes referred to as\n whole-sample symmetric.\n * 'reflect', (d c b a | a b c d | d c b a) : The input is extended by reflecting\n about the edge of the last pixel. This mode is also sometimes referred to as\n half-sample symmetric.\n * 'wrap', (a b c d | a b c d | a b c d) : The input is extended by wrapping around\n to the opposite edge.\n cval : float, optional\n Value to fill past edges of input if mode is \342\200\230constant\342\200\231. Default is 0.0.\n truncate : float"", optional\n Truncate the filter at this many standard deviations. Default is 4.0.\n backend : {'fftw', 'numpy'}, optional\n Choose backend library for the FFT implementation.\n num_threads : int, optional\n Number of threads.\n "; static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_13gaussian_gradient_magnitude = {"gaussian_gradient_magnitude", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_13gaussian_gradient_magnitude, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_12gaussian_gradient_magnitude}; static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_13gaussian_gradient_magnitude(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_inp = 0; PyObject *__pyx_v_sigma = 0; PyObject *__pyx_v_mode = 0; double __pyx_v_cval; double __pyx_v_truncate; PyObject *__pyx_v_backend = 0; unsigned int __pyx_v_num_threads; PyArrayObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gaussian_gradient_magnitude (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_inp,&__pyx_n_s_sigma,&__pyx_n_s_mode,&__pyx_n_s_cval,&__pyx_n_s_truncate,&__pyx_n_s_backend,&__pyx_n_s_num_threads,0}; PyObject* values[7] = {0,0,0,0,0,0,0}; values[2] = ((PyObject*)((PyObject*)__pyx_n_u_reflect)); values[5] = ((PyObject*)((PyObject*)__pyx_n_u_numpy)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sigma)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gaussian_gradient_magnitude", 0, 2, 7, 1); __PYX_ERR(0, 428, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[2] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_cval); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_truncate); if (value) { values[4] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 5: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_backend); if (value) { values[5] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 6: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads); if (value) { values[6] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gaussian_gradient_magnitude") < 0)) __PYX_ERR(0, 428, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_inp = ((PyArrayObject *)values[0]); __pyx_v_sigma = values[1]; __pyx_v_mode = ((PyObject*)values[2]); if (values[3]) { __pyx_v_cval = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_cval == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 429, __pyx_L3_error) } else { __pyx_v_cval = ((double)((double)0.)); } if (values[4]) { __pyx_v_truncate = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_truncate == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 429, __pyx_L3_error) } else { __pyx_v_truncate = ((double)((double)4.)); } __pyx_v_backend = ((PyObject*)values[5]); if (values[6]) { __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[6]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 430, __pyx_L3_error) } else { __pyx_v_num_threads = ((unsigned int)((unsigned int)1)); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gaussian_gradient_magnitude", 0, 2, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 428, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("pyrost.bin.simulation.gaussian_gradient_magnitude", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_inp), __pyx_ptype_5numpy_ndarray, 1, "inp", 0))) __PYX_ERR(0, 428, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mode), (&PyUnicode_Type), 1, "mode", 1))) __PYX_ERR(0, 428, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_backend), (&PyUnicode_Type), 1, "backend", 1))) __PYX_ERR(0, 430, __pyx_L1_error) __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_12gaussian_gradient_magnitude(__pyx_self, __pyx_v_inp, __pyx_v_sigma, __pyx_v_mode, __pyx_v_cval, __pyx_v_truncate, __pyx_v_backend, __pyx_v_num_threads); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_12gaussian_gradient_magnitude(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_inp, PyObject *__pyx_v_sigma, PyObject *__pyx_v_mode, double __pyx_v_cval, double __pyx_v_truncate, PyObject *__pyx_v_backend, unsigned int __pyx_v_num_threads) { int __pyx_v_ndim; PyArrayObject *__pyx_v_sigmas = 0; int __pyx_v_fail; npy_intp *__pyx_v_dims; PyArrayObject *__pyx_v_out = 0; double *__pyx_v__out; double *__pyx_v__inp; unsigned long *__pyx_v__dims; double *__pyx_v__sig; int __pyx_v__mode; PyArrayObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gaussian_gradient_magnitude", 0); __Pyx_INCREF((PyObject *)__pyx_v_inp); /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("pyrost.bin.simulation.gaussian_gradient_magnitude", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_sigmas); __Pyx_XDECREF((PyObject *)__pyx_v_out); __Pyx_XDECREF((PyObject *)__pyx_v_inp); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* … */ __pyx_tuple__23 = PyTuple_Pack(17, __pyx_n_s_inp, __pyx_n_s_sigma, __pyx_n_s_mode, __pyx_n_s_cval, __pyx_n_s_truncate, __pyx_n_s_backend, __pyx_n_s_num_threads, __pyx_n_s_ndim, __pyx_n_s_sigmas, __pyx_n_s_fail, __pyx_n_s_dims, __pyx_n_s_out, __pyx_n_s_out_2, __pyx_n_s_inp_2, __pyx_n_s_dims_2, __pyx_n_s_sig, __pyx_n_s_mode_2); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(0, 428, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* … */ __pyx_t_1 = PyTuple_New(5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 428, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject*)__pyx_n_u_reflect)); __Pyx_GIVEREF(((PyObject*)__pyx_n_u_reflect)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject*)__pyx_n_u_reflect)); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_5); __Pyx_INCREF(((PyObject*)__pyx_n_u_numpy)); __Pyx_GIVEREF(((PyObject*)__pyx_n_u_numpy)); PyTuple_SET_ITEM(__pyx_t_1, 3, ((PyObject*)__pyx_n_u_numpy)); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 4, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyDict_NewPresized(8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 428, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_inp, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 428, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_sigma, __pyx_n_u_object) < 0) __PYX_ERR(0, 428, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_mode, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 428, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_cval, __pyx_n_u_double) < 0) __PYX_ERR(0, 428, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_truncate, __pyx_n_u_double) < 0) __PYX_ERR(0, 428, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_backend, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 428, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 428, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 428, __pyx_L1_error) __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_13gaussian_gradient_magnitude, 0, __pyx_n_s_gaussian_gradient_magnitude, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__24)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 428, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_t_1); __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_5, __pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_gaussian_gradient_magnitude, __pyx_t_5) < 0) __PYX_ERR(0, 428, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(7, 0, 17, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_gaussian_gradient_magnitude, 428, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) __PYX_ERR(0, 428, __pyx_L1_error)
+429: cval: cython.double=0., truncate: cython.double=4.,
__pyx_t_4 = PyFloat_FromDouble(((double)0.)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyFloat_FromDouble(((double)4.)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5);
+430: backend: str='numpy', num_threads: cython.uint=1) -> np.ndarray:
__pyx_t_3 = __Pyx_PyInt_From_unsigned_int(((unsigned int)1)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 430, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3);
431: r"""Multidimensional gradient magnitude using Gaussian derivatives. The multidimensional
432: filter is implemented as a sequence of 1-D FFT convolutions.
433:
434: Parameters
435: ----------
436: inp : np.ndarray
437: The input array.
438: sigma : float or list of floats
439: The standard deviations of the Gaussian filter are given for each axis as a sequence,
440: or as a single number, in which case it is equal for all axes.
441: mode : {'constant', 'nearest', 'mirror', 'reflect', 'wrap'}, optional
442: The mode parameter determines how the input array is extended when the filter
443: overlaps a border. Default value is 'reflect'. The valid values and their behavior
444: is as follows:
445:
446: * 'constant', (k k k k | a b c d | k k k k) : The input is extended by filling all
447: values beyond the edge with the same constant value, defined by the `cval`
448: parameter.
449: * 'nearest', (a a a a | a b c d | d d d d) : The input is extended by replicating
450: the last pixel.
451: * 'mirror', (c d c b | a b c d | c b a b) : The input is extended by reflecting
452: about the center of the last pixel. This mode is also sometimes referred to as
453: whole-sample symmetric.
454: * 'reflect', (d c b a | a b c d | d c b a) : The input is extended by reflecting
455: about the edge of the last pixel. This mode is also sometimes referred to as
456: half-sample symmetric.
457: * 'wrap', (a b c d | a b c d | a b c d) : The input is extended by wrapping around
458: to the opposite edge.
459: cval : float, optional
460: Value to fill past edges of input if mode is ‘constant’. Default is 0.0.
461: truncate : float, optional
462: Truncate the filter at this many standard deviations. Default is 4.0.
463: backend : {'fftw', 'numpy'}, optional
464: Choose backend library for the FFT implementation.
465: num_threads : int, optional
466: Number of threads.
467: """
+468: inp = check_array(inp, np.NPY_FLOAT64)
__pyx_t_1 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_check_array(__pyx_v_inp, NPY_FLOAT64)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 468, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_inp, ((PyArrayObject *)__pyx_t_1)); __pyx_t_1 = 0;
469:
+470: cdef int ndim = inp.ndim
__pyx_t_2 = __pyx_v_inp->nd; __pyx_v_ndim = __pyx_t_2;
+471: cdef np.ndarray sigmas = normalize_sequence(sigma, ndim, np.NPY_FLOAT64)
__pyx_t_1 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_normalize_sequence(__pyx_v_sigma, __pyx_v_ndim, NPY_FLOAT64)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 471, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_sigmas = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0;
472:
+473: cdef int fail = 0
__pyx_v_fail = 0;
+474: cdef np.npy_intp *dims = inp.shape
__pyx_t_3 = __pyx_v_inp->dimensions; __pyx_v_dims = __pyx_t_3;
+475: cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(ndim, dims, np.NPY_FLOAT64)
__pyx_t_1 = PyArray_SimpleNew(__pyx_v_ndim, __pyx_v_dims, NPY_FLOAT64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 475, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __pyx_t_1; __Pyx_INCREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_out = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0;
+476: cdef double *_out = <double *>np.PyArray_DATA(out)
__pyx_v__out = ((double *)PyArray_DATA(__pyx_v_out));
+477: cdef double *_inp = <double *>np.PyArray_DATA(inp)
__pyx_v__inp = ((double *)PyArray_DATA(__pyx_v_inp));
+478: cdef unsigned long *_dims = <unsigned long *>dims
__pyx_v__dims = ((unsigned long *)__pyx_v_dims);
+479: cdef double *_sig = <double *>np.PyArray_DATA(sigmas)
__pyx_v__sig = ((double *)PyArray_DATA(__pyx_v_sigmas));
+480: cdef int _mode = extend_mode_to_code(mode)
__pyx_t_2 = __pyx_f_6pyrost_3bin_10simulation_extend_mode_to_code(__pyx_v_mode); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 480, __pyx_L1_error)
__pyx_v__mode = __pyx_t_2;
+481: with nogil:
{ #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* … */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L4_error: { #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } }
+482: if backend == 'fftw':
__pyx_t_5 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_fftw, Py_EQ)); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 482, __pyx_L4_error) __pyx_t_6 = (__pyx_t_5 != 0); if (__pyx_t_6) { /* … */ goto __pyx_L6; }
+483: fail = gauss_grad_mag(_out, _inp, ndim, _dims, _sig, _mode, cval, truncate, num_threads, fft_convolve_fftw)
__pyx_v_fail = gauss_grad_mag(__pyx_v__out, __pyx_v__inp, __pyx_v_ndim, __pyx_v__dims, __pyx_v__sig, __pyx_v__mode, __pyx_v_cval, __pyx_v_truncate, __pyx_v_num_threads, fft_convolve_fftw);
+484: elif backend == 'numpy':
__pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_v_backend, __pyx_n_u_numpy, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 484, __pyx_L4_error) __pyx_t_5 = (__pyx_t_6 != 0); if (__pyx_t_5) { /* … */ goto __pyx_L6; }
+485: fail = gauss_grad_mag(_out, _inp, ndim, _dims, _sig, _mode, cval, truncate, num_threads, fft_convolve_np)
__pyx_v_fail = gauss_grad_mag(__pyx_v__out, __pyx_v__inp, __pyx_v_ndim, __pyx_v__dims, __pyx_v__sig, __pyx_v__mode, __pyx_v_cval, __pyx_v_truncate, __pyx_v_num_threads, fft_convolve_np);
486: else:
+487: raise ValueError('{:s} is invalid backend'.format(backend))
/*else*/ { { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif /*try:*/ { __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_s_is_invalid_backend, __pyx_n_s_format); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 487, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_7 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } __pyx_t_4 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_7, __pyx_v_backend) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_v_backend); __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 487, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 487, __pyx_L8_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 487, __pyx_L8_error) } /*finally:*/ { __pyx_L8_error: { #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif goto __pyx_L4_error; } } } } __pyx_L6:; }
+488: if fail:
__pyx_t_5 = (__pyx_v_fail != 0); if (unlikely(__pyx_t_5)) { /* … */ }
+489: raise RuntimeError('C backend exited with error.')
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 489, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 489, __pyx_L1_error)
+490: return out
__Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_out)); __pyx_r = __pyx_v_out; goto __pyx_L0;
491:
+492: def bar_positions(x0: cython.double, x1: cython.double, b_dx: cython.double,
/* Python wrapper */ static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_15bar_positions(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_6pyrost_3bin_10simulation_14bar_positions[] = "bar_positions(double x0: cython.double, double x1: cython.double, double b_dx: cython.double, double rd: cython.double, unsigned long seed: cython.ulong) -> np.ndarray\nGenerate a coordinate array of randomized barcode's bar positions.\n\n Parameters\n ----------\n x0 : float\n Barcode's lower bound along the x axis [um].\n x1 : float\n Barcode's upper bound along the x axis [um].\n b_dx : float\n Average bar's size [um].\n rd : float\n Random deviation of barcode's bar positions (0.0 - 1.0).\n seed : int\n Seed used for pseudo random number generation.\n\n Returns\n -------\n bx_arr : numpy.ndarray\n Array of barcode's bar coordinates.\n "; static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_15bar_positions = {"bar_positions", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_15bar_positions, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_14bar_positions}; static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_15bar_positions(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { double __pyx_v_x0; double __pyx_v_x1; double __pyx_v_b_dx; double __pyx_v_rd; unsigned long __pyx_v_seed; PyArrayObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bar_positions (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x0,&__pyx_n_s_x1,&__pyx_n_s_b_dx,&__pyx_n_s_rd,&__pyx_n_s_seed,0}; PyObject* values[5] = {0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x0)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bar_positions", 1, 5, 5, 1); __PYX_ERR(0, 492, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_b_dx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bar_positions", 1, 5, 5, 2); __PYX_ERR(0, 492, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_rd)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bar_positions", 1, 5, 5, 3); __PYX_ERR(0, 492, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_seed)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bar_positions", 1, 5, 5, 4); __PYX_ERR(0, 492, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bar_positions") < 0)) __PYX_ERR(0, 492, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 5) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); } __pyx_v_x0 = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_x0 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 492, __pyx_L3_error) __pyx_v_x1 = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_x1 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 492, __pyx_L3_error) __pyx_v_b_dx = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_b_dx == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 492, __pyx_L3_error) __pyx_v_rd = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_rd == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 493, __pyx_L3_error) __pyx_v_seed = __Pyx_PyInt_As_unsigned_long(values[4]); if (unlikely((__pyx_v_seed == (unsigned long)-1) && PyErr_Occurred())) __PYX_ERR(0, 493, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("bar_positions", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 492, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("pyrost.bin.simulation.bar_positions", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_14bar_positions(__pyx_self, __pyx_v_x0, __pyx_v_x1, __pyx_v_b_dx, __pyx_v_rd, __pyx_v_seed); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_14bar_positions(CYTHON_UNUSED PyObject *__pyx_self, double __pyx_v_x0, double __pyx_v_x1, double __pyx_v_b_dx, double __pyx_v_rd, unsigned long __pyx_v_seed) { npy_intp __pyx_v_size; npy_intp *__pyx_v_dims; PyArrayObject *__pyx_v_bars = 0; double *__pyx_v__bars; __Pyx_LocalBuf_ND __pyx_pybuffernd_bars; __Pyx_Buffer __pyx_pybuffer_bars; PyArrayObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bar_positions", 0); __pyx_pybuffer_bars.pybuffer.buf = NULL; __pyx_pybuffer_bars.refcount = 0; __pyx_pybuffernd_bars.data = NULL; __pyx_pybuffernd_bars.rcbuffer = &__pyx_pybuffer_bars; /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_bars.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("pyrost.bin.simulation.bar_positions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_bars.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_bars); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* … */ __pyx_tuple__25 = PyTuple_Pack(9, __pyx_n_s_x0, __pyx_n_s_x1, __pyx_n_s_b_dx, __pyx_n_s_rd, __pyx_n_s_seed, __pyx_n_s_size, __pyx_n_s_dims, __pyx_n_s_bars, __pyx_n_s_bars_2); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(0, 492, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* … */ __pyx_t_5 = __Pyx_PyDict_NewPresized(6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 492, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_x0, __pyx_n_u_double) < 0) __PYX_ERR(0, 492, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_x1, __pyx_n_u_double) < 0) __PYX_ERR(0, 492, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_b_dx, __pyx_n_u_double) < 0) __PYX_ERR(0, 492, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_rd, __pyx_n_u_double) < 0) __PYX_ERR(0, 492, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_seed, __pyx_kp_u_unsigned_long) < 0) __PYX_ERR(0, 492, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 492, __pyx_L1_error) __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_15bar_positions, 0, __pyx_n_s_bar_positions, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__26)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 492, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_3, __pyx_t_5); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_bar_positions, __pyx_t_3) < 0) __PYX_ERR(0, 492, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(5, 0, 9, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_bar_positions, 492, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(0, 492, __pyx_L1_error)
493: rd: cython.double, seed: cython.ulong) -> np.ndarray:
494: """Generate a coordinate array of randomized barcode's bar positions.
495:
496: Parameters
497: ----------
498: x0 : float
499: Barcode's lower bound along the x axis [um].
500: x1 : float
501: Barcode's upper bound along the x axis [um].
502: b_dx : float
503: Average bar's size [um].
504: rd : float
505: Random deviation of barcode's bar positions (0.0 - 1.0).
506: seed : int
507: Seed used for pseudo random number generation.
508:
509: Returns
510: -------
511: bx_arr : numpy.ndarray
512: Array of barcode's bar coordinates.
513: """
+514: cdef np.npy_intp size = 2 * (<np.npy_intp>((x1 - x0) / 2 / b_dx) + 1) if x1 > x0 else 0
if (((__pyx_v_x1 > __pyx_v_x0) != 0)) { __pyx_t_1 = (2 * (((npy_intp)(((__pyx_v_x1 - __pyx_v_x0) / 2.0) / __pyx_v_b_dx)) + 1)); } else { __pyx_t_1 = 0; } __pyx_v_size = __pyx_t_1;
+515: cdef np.npy_intp *dims = [size,]
__pyx_t_2[0] = __pyx_v_size; __pyx_v_dims = __pyx_t_2;
+516: cdef np.ndarray[double] bars = <np.ndarray>np.PyArray_SimpleNew(1, dims, np.NPY_FLOAT64)
__pyx_t_3 = PyArray_SimpleNew(1, __pyx_v_dims, NPY_FLOAT64); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 516, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __pyx_t_3; __Pyx_INCREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_bars.rcbuffer->pybuffer, (PyObject*)((PyArrayObject *)__pyx_t_4), &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_v_bars = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_bars.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 516, __pyx_L1_error) } else {__pyx_pybuffernd_bars.diminfo[0].strides = __pyx_pybuffernd_bars.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_bars.diminfo[0].shape = __pyx_pybuffernd_bars.rcbuffer->pybuffer.shape[0]; } } __pyx_v_bars = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0;
+517: cdef double *_bars = <double *>np.PyArray_DATA(bars)
__pyx_v__bars = ((double *)PyArray_DATA(((PyArrayObject *)__pyx_v_bars)));
+518: if size:
__pyx_t_5 = (__pyx_v_size != 0); if (__pyx_t_5) { /* … */ }
+519: with nogil:
{ #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* … */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L6; } __pyx_L6:; } }
+520: barcode_bars(_bars, size, x0, b_dx, rd, seed)
barcode_bars(__pyx_v__bars, __pyx_v_size, __pyx_v_x0, __pyx_v_b_dx, __pyx_v_rd, __pyx_v_seed); }
+521: return bars
__Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_bars)); __pyx_r = ((PyArrayObject *)__pyx_v_bars); goto __pyx_L0;
522:
+523: cdef np.ndarray ml_profile_wrapper(np.ndarray x_arr, np.ndarray layers, complex mt0,
static PyArrayObject *__pyx_f_6pyrost_3bin_10simulation_ml_profile_wrapper(PyArrayObject *__pyx_v_x_arr, PyArrayObject *__pyx_v_layers, __pyx_t_double_complex __pyx_v_mt0, __pyx_t_double_complex __pyx_v_mt1, __pyx_t_double_complex __pyx_v_mt2, double __pyx_v_sigma, unsigned int __pyx_v_num_threads) { int __pyx_v_fail; int __pyx_v_ndim; npy_intp *__pyx_v_dims; PyArrayObject *__pyx_v_out = 0; npy_intp __pyx_v_isize; npy_intp __pyx_v_lsize; __pyx_t_double_complex *__pyx_v__out; double *__pyx_v__x; double *__pyx_v__lyrs; PyArrayObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("ml_profile_wrapper", 0); __Pyx_INCREF((PyObject *)__pyx_v_x_arr); __Pyx_INCREF((PyObject *)__pyx_v_layers); /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("pyrost.bin.simulation.ml_profile_wrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_out); __Pyx_XDECREF((PyObject *)__pyx_v_x_arr); __Pyx_XDECREF((PyObject *)__pyx_v_layers); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; }
524: complex mt1, complex mt2, double sigma, unsigned num_threads):
+525: x_arr = check_array(x_arr, np.NPY_FLOAT64)
__pyx_t_1 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_check_array(__pyx_v_x_arr, NPY_FLOAT64)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 525, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_x_arr, ((PyArrayObject *)__pyx_t_1)); __pyx_t_1 = 0;
+526: layers = check_array(layers, np.NPY_FLOAT64)
__pyx_t_1 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_check_array(__pyx_v_layers, NPY_FLOAT64)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 526, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_layers, ((PyArrayObject *)__pyx_t_1)); __pyx_t_1 = 0;
527:
+528: cdef int fail = 0
__pyx_v_fail = 0;
+529: cdef int ndim = x_arr.ndim
__pyx_t_2 = __pyx_v_x_arr->nd; __pyx_v_ndim = __pyx_t_2;
+530: cdef np.npy_intp *dims = x_arr.shape
__pyx_t_3 = __pyx_v_x_arr->dimensions; __pyx_v_dims = __pyx_t_3;
+531: cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(ndim, dims, np.NPY_COMPLEX128)
__pyx_t_1 = PyArray_SimpleNew(__pyx_v_ndim, __pyx_v_dims, NPY_COMPLEX128); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 531, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __pyx_t_1; __Pyx_INCREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_out = ((PyArrayObject *)__pyx_t_4); __pyx_t_4 = 0;
532:
+533: cdef np.npy_intp isize = np.PyArray_SIZE(x_arr)
__pyx_v_isize = PyArray_SIZE(__pyx_v_x_arr);
+534: cdef np.npy_intp lsize = np.PyArray_SIZE(layers)
__pyx_v_lsize = PyArray_SIZE(__pyx_v_layers);
+535: cdef complex *_out = <complex *>np.PyArray_DATA(out)
__pyx_v__out = ((__pyx_t_double_complex *)PyArray_DATA(__pyx_v_out));
+536: cdef double *_x = <double *>np.PyArray_DATA(x_arr)
__pyx_v__x = ((double *)PyArray_DATA(__pyx_v_x_arr));
+537: cdef double *_lyrs = <double *>np.PyArray_DATA(layers)
__pyx_v__lyrs = ((double *)PyArray_DATA(__pyx_v_layers));
+538: with nogil:
{ #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* … */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } }
+539: fail = ml_profile(_out, _x, isize, _lyrs, lsize, mt0, mt1, mt2, sigma, num_threads)
__pyx_v_fail = ml_profile(__pyx_v__out, __pyx_v__x, __pyx_v_isize, __pyx_v__lyrs, __pyx_v_lsize, __pyx_v_mt0, __pyx_v_mt1, __pyx_v_mt2, __pyx_v_sigma, __pyx_v_num_threads); }
+540: if fail:
__pyx_t_5 = (__pyx_v_fail != 0); if (unlikely(__pyx_t_5)) { /* … */ }
+541: raise RuntimeError('C backend exited with error.')
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 541, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 541, __pyx_L1_error)
+542: return out
__Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_out)); __pyx_r = __pyx_v_out; goto __pyx_L0;
543:
+544: def barcode_profile(x_arr: np.ndarray, bars: np.ndarray, bulk_atn: cython.double,
/* Python wrapper */ static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_17barcode_profile(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_6pyrost_3bin_10simulation_16barcode_profile[] = "barcode_profile(ndarray x_arr: np.ndarray, ndarray bars: np.ndarray, double bulk_atn: cython.double, double bar_atn: cython.double, double bar_sigma: cython.double, unsigned int num_threads: cython.uint) -> np.ndarray\nReturn an array of barcode's transmission profile calculated\n at `x_arr` coordinates.\n\n Parameters\n ----------\n x_arr : numpy.ndarray\n Array of the coordinates, where the transmission coefficients\n are calculated [um]. \n bars : numpy.ndarray\n Coordinates of barcode's bar positions [um].\n bulk_atn : float\n Barcode's bulk attenuation coefficient (0.0 - 1.0).\n bar_atn : float\n Barcode's bar attenuation coefficient (0.0 - 1.0).\n bar_sigma : float\n Bar's blurriness width [um].\n num_threads : int, optional\n Number of threads.\n \n Returns\n -------\n bar_profile : numpy.ndarray\n Array of barcode's transmission profiles.\n\n Notes\n -----\n Barcode's transmission profile is simulated with a set\n of error functions:\n \n .. math::\n \\begin{multline}\n T_{b}(x) = 1 - \\frac{T_{bulk}}{2} \\left\\{\n \\mathrm{erf}\\left[ \\frac{x - x_{bar}[0]}{\\sqrt{2} \\sigma} \\right] +\n \\mathrm{erf}\\left[ \\frac{x_{bar}[n - 1] - x}{\\sqrt{2} \\sigma} \\right]\n \\right\\} -\\\\\n \\frac{T_{bar}}{4} \\sum_{i = 1}^{n - 2} \\left\\{\n 2 \\mathrm{erf}\\left[ \\frac{x - x_{bar}[i]}{\\sqrt{2} \\sigma} \\right] -\n \\mathrm{erf}\\left[ \\frac{x - x_{bar}[i - 1]}{\\sqrt{2} \\sigma} \\right] -\n \\mathrm{erf}\\left[ \\frac{x - x_{bar}[i + 1]}{\\sqrt{2} \\sigma} \\right]\n \\right\\}\n \\end{multline}\n \n where :math:`x_{bar}` is an array of bar coordinates.\n "; static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_17barcode_profile = {"barcode_profile", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_17barcode_profile, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_16barcode_profile}; static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_17barcode_profile(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_x_arr = 0; PyArrayObject *__pyx_v_bars = 0; double __pyx_v_bulk_atn; double __pyx_v_bar_atn; double __pyx_v_bar_sigma; unsigned int __pyx_v_num_threads; PyArrayObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("barcode_profile (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x_arr,&__pyx_n_s_bars,&__pyx_n_s_bulk_atn,&__pyx_n_s_bar_atn,&__pyx_n_s_bar_sigma,&__pyx_n_s_num_threads,0}; PyObject* values[6] = {0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x_arr)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bars)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("barcode_profile", 1, 6, 6, 1); __PYX_ERR(0, 544, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bulk_atn)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("barcode_profile", 1, 6, 6, 2); __PYX_ERR(0, 544, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bar_atn)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("barcode_profile", 1, 6, 6, 3); __PYX_ERR(0, 544, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bar_sigma)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("barcode_profile", 1, 6, 6, 4); __PYX_ERR(0, 544, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("barcode_profile", 1, 6, 6, 5); __PYX_ERR(0, 544, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "barcode_profile") < 0)) __PYX_ERR(0, 544, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 6) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); } __pyx_v_x_arr = ((PyArrayObject *)values[0]); __pyx_v_bars = ((PyArrayObject *)values[1]); __pyx_v_bulk_atn = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_bulk_atn == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 544, __pyx_L3_error) __pyx_v_bar_atn = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_bar_atn == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 545, __pyx_L3_error) __pyx_v_bar_sigma = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_bar_sigma == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 545, __pyx_L3_error) __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[5]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 546, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("barcode_profile", 1, 6, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 544, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("pyrost.bin.simulation.barcode_profile", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x_arr), __pyx_ptype_5numpy_ndarray, 1, "x_arr", 0))) __PYX_ERR(0, 544, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_bars), __pyx_ptype_5numpy_ndarray, 1, "bars", 0))) __PYX_ERR(0, 544, __pyx_L1_error) __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_16barcode_profile(__pyx_self, __pyx_v_x_arr, __pyx_v_bars, __pyx_v_bulk_atn, __pyx_v_bar_atn, __pyx_v_bar_sigma, __pyx_v_num_threads); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_16barcode_profile(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x_arr, PyArrayObject *__pyx_v_bars, double __pyx_v_bulk_atn, double __pyx_v_bar_atn, double __pyx_v_bar_sigma, unsigned int __pyx_v_num_threads) { __pyx_t_double_complex __pyx_v_mt0; __pyx_t_double_complex __pyx_v_mt1; PyArrayObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("barcode_profile", 0); /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("pyrost.bin.simulation.barcode_profile", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* … */ __pyx_tuple__27 = PyTuple_Pack(8, __pyx_n_s_x_arr, __pyx_n_s_bars, __pyx_n_s_bulk_atn, __pyx_n_s_bar_atn, __pyx_n_s_bar_sigma, __pyx_n_s_num_threads, __pyx_n_s_mt0, __pyx_n_s_mt1); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(0, 544, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__27); __Pyx_GIVEREF(__pyx_tuple__27); /* … */ __pyx_t_3 = __Pyx_PyDict_NewPresized(7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 544, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_x_arr, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 544, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_bars, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 544, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_bulk_atn, __pyx_n_u_double) < 0) __PYX_ERR(0, 544, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_bar_atn, __pyx_n_u_double) < 0) __PYX_ERR(0, 544, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_bar_sigma, __pyx_n_u_double) < 0) __PYX_ERR(0, 544, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 544, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 544, __pyx_L1_error) __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_17barcode_profile, 0, __pyx_n_s_barcode_profile, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__28)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 544, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_5, __pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_barcode_profile, __pyx_t_5) < 0) __PYX_ERR(0, 544, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_codeobj__28 = (PyObject*)__Pyx_PyCode_New(6, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_barcode_profile, 544, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) __PYX_ERR(0, 544, __pyx_L1_error)
545: bar_atn: cython.double, bar_sigma: cython.double,
546: num_threads: cython.uint) -> np.ndarray:
547: r"""Return an array of barcode's transmission profile calculated
548: at `x_arr` coordinates.
549:
550: Parameters
551: ----------
552: x_arr : numpy.ndarray
553: Array of the coordinates, where the transmission coefficients
554: are calculated [um].
555: bars : numpy.ndarray
556: Coordinates of barcode's bar positions [um].
557: bulk_atn : float
558: Barcode's bulk attenuation coefficient (0.0 - 1.0).
559: bar_atn : float
560: Barcode's bar attenuation coefficient (0.0 - 1.0).
561: bar_sigma : float
562: Bar's blurriness width [um].
563: num_threads : int, optional
564: Number of threads.
565:
566: Returns
567: -------
568: bar_profile : numpy.ndarray
569: Array of barcode's transmission profiles.
570:
571: Notes
572: -----
573: Barcode's transmission profile is simulated with a set
574: of error functions:
575:
576: .. math::
577: \begin{multline}
578: T_{b}(x) = 1 - \frac{T_{bulk}}{2} \left\{
579: \mathrm{erf}\left[ \frac{x - x_{bar}[0]}{\sqrt{2} \sigma} \right] +
580: \mathrm{erf}\left[ \frac{x_{bar}[n - 1] - x}{\sqrt{2} \sigma} \right]
581: \right\} -\\
582: \frac{T_{bar}}{4} \sum_{i = 1}^{n - 2} \left\{
583: 2 \mathrm{erf}\left[ \frac{x - x_{bar}[i]}{\sqrt{2} \sigma} \right] -
584: \mathrm{erf}\left[ \frac{x - x_{bar}[i - 1]}{\sqrt{2} \sigma} \right] -
585: \mathrm{erf}\left[ \frac{x - x_{bar}[i + 1]}{\sqrt{2} \sigma} \right]
586: \right\}
587: \end{multline}
588:
589: where :math:`x_{bar}` is an array of bar coordinates.
590: """
+591: cdef complex mt0 = -1j * log(1 - bulk_atn)
__pyx_v_mt0 = __Pyx_c_prod_double(__Pyx_c_neg_double(__pyx_t_double_complex_from_parts(0, 1.0)), __pyx_t_double_complex_from_parts(log((1.0 - __pyx_v_bulk_atn)), 0));
+592: cdef complex mt1 = -1j * log(1 - bar_atn)
__pyx_v_mt1 = __Pyx_c_prod_double(__Pyx_c_neg_double(__pyx_t_double_complex_from_parts(0, 1.0)), __pyx_t_double_complex_from_parts(log((1.0 - __pyx_v_bar_atn)), 0));
+593: return ml_profile_wrapper(x_arr, bars, mt0, mt1, 0., bar_sigma, num_threads)
__Pyx_XDECREF(((PyObject *)__pyx_r)); __pyx_t_1 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_ml_profile_wrapper(__pyx_v_x_arr, __pyx_v_bars, __pyx_v_mt0, __pyx_v_mt1, __pyx_t_double_complex_from_parts(0., 0), __pyx_v_bar_sigma, __pyx_v_num_threads)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 593, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0;
594:
+595: def mll_profile(x_arr: np.ndarray, layers: np.ndarray, complex mt0,
/* Python wrapper */ static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_19mll_profile(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_6pyrost_3bin_10simulation_18mll_profile[] = "mll_profile(ndarray x_arr: np.ndarray, ndarray layers: np.ndarray, double complex mt0, double complex mt1, double sigma: cython.double, unsigned int num_threads: cython.uint) -> np.ndarray\nReturn an array of MLL's transmission profile calculated\n at `x_arr` coordinates.\n\n Parameters\n ----------\n x_arr : numpy.ndarray\n Array of the coordinates, where the transmission coefficients\n are calculated [um]. \n layers : numpy.ndarray\n Coordinates of MLL's layers positions [um].\n mt0 : complex\n Fresnel transmission coefficient for the first material of MLL's\n bilayer.\n mt1 : complex\n Fresnel transmission coefficient for the first material of MLL's\n bilayer.\n sigma : float\n Interdiffusion length [um].\n num_threads : int, optional\n Number of threads.\n \n Returns\n -------\n bar_profile : numpy.ndarray\n Array of barcode's transmission profiles.\n\n Notes\n -----\n MLL's transmission profile is simulated with a set\n of error functions:\n \n .. math::\n \\begin{multline}\n T_{b}(x) = 1 - \\frac{T_{bulk}}{2} \\left\\{\n \\mathrm{erf}\\left[ \\frac{x - x_{lyr}[0]}{\\sqrt{2} \\sigma} \\right] +\n \\mathrm{erf}\\left[ \\frac{x_{lyr}[n - 1] - x}{\\sqrt{2} \\sigma} \\right]\n \\right\\} -\\\\\n \\frac{T_{bar}}{4} \\sum_{i = 1}^{n - 2} \\left\\{\n 2 \\mathrm{erf}\\left[ \\frac{x - x_{lyr}[i]}{\\sqrt{2} \\sigma} \\right] -\n \\mathrm{erf}\\left[ \\frac{x - x_{lyr}[i - 1]}{\\sqrt{2} \\sigma} \\right] -\n \\mathrm{erf}\\left[ \\frac{x - x_{lyr}[i + 1]}{\\sqrt{2} \\sigma} \\right]\n \\right\\}\n \\end{multline}\n \n where :math:`x_{lyr}` is an array of MLL's layer coordinates.\n "; static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_19mll_profile = {"mll_profile", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_19mll_profile, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_18mll_profile}; static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_19mll_profile(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_x_arr = 0; PyArrayObject *__pyx_v_layers = 0; __pyx_t_double_complex __pyx_v_mt0; __pyx_t_double_complex __pyx_v_mt1; double __pyx_v_sigma; unsigned int __pyx_v_num_threads; PyArrayObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("mll_profile (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x_arr,&__pyx_n_s_layers,&__pyx_n_s_mt0,&__pyx_n_s_mt1,&__pyx_n_s_sigma,&__pyx_n_s_num_threads,0}; PyObject* values[6] = {0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x_arr)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_layers)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mll_profile", 1, 6, 6, 1); __PYX_ERR(0, 595, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mt0)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mll_profile", 1, 6, 6, 2); __PYX_ERR(0, 595, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mt1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mll_profile", 1, 6, 6, 3); __PYX_ERR(0, 595, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sigma)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mll_profile", 1, 6, 6, 4); __PYX_ERR(0, 595, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("mll_profile", 1, 6, 6, 5); __PYX_ERR(0, 595, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "mll_profile") < 0)) __PYX_ERR(0, 595, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 6) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); } __pyx_v_x_arr = ((PyArrayObject *)values[0]); __pyx_v_layers = ((PyArrayObject *)values[1]); __pyx_v_mt0 = __Pyx_PyComplex_As___pyx_t_double_complex(values[2]); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 595, __pyx_L3_error) __pyx_v_mt1 = __Pyx_PyComplex_As___pyx_t_double_complex(values[3]); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 596, __pyx_L3_error) __pyx_v_sigma = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_sigma == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 596, __pyx_L3_error) __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[5]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 596, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("mll_profile", 1, 6, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 595, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("pyrost.bin.simulation.mll_profile", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x_arr), __pyx_ptype_5numpy_ndarray, 1, "x_arr", 0))) __PYX_ERR(0, 595, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_layers), __pyx_ptype_5numpy_ndarray, 1, "layers", 0))) __PYX_ERR(0, 595, __pyx_L1_error) __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_18mll_profile(__pyx_self, __pyx_v_x_arr, __pyx_v_layers, __pyx_v_mt0, __pyx_v_mt1, __pyx_v_sigma, __pyx_v_num_threads); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_18mll_profile(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_x_arr, PyArrayObject *__pyx_v_layers, __pyx_t_double_complex __pyx_v_mt0, __pyx_t_double_complex __pyx_v_mt1, double __pyx_v_sigma, unsigned int __pyx_v_num_threads) { PyArrayObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("mll_profile", 0); /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("pyrost.bin.simulation.mll_profile", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* … */ __pyx_tuple__29 = PyTuple_Pack(6, __pyx_n_s_x_arr, __pyx_n_s_layers, __pyx_n_s_mt0, __pyx_n_s_mt1, __pyx_n_s_sigma, __pyx_n_s_num_threads); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(0, 595, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__29); __Pyx_GIVEREF(__pyx_tuple__29); /* … */ __pyx_t_5 = __Pyx_PyDict_NewPresized(5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 595, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_x_arr, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 595, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_layers, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 595, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_sigma, __pyx_n_u_double) < 0) __PYX_ERR(0, 595, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 595, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 595, __pyx_L1_error) __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_19mll_profile, 0, __pyx_n_s_mll_profile, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__30)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 595, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_3, __pyx_t_5); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_mll_profile, __pyx_t_3) < 0) __PYX_ERR(0, 595, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_codeobj__30 = (PyObject*)__Pyx_PyCode_New(6, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__29, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_mll_profile, 595, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__30)) __PYX_ERR(0, 595, __pyx_L1_error)
596: complex mt1, sigma: cython.double, num_threads: cython.uint) -> np.ndarray:
597: r"""Return an array of MLL's transmission profile calculated
598: at `x_arr` coordinates.
599:
600: Parameters
601: ----------
602: x_arr : numpy.ndarray
603: Array of the coordinates, where the transmission coefficients
604: are calculated [um].
605: layers : numpy.ndarray
606: Coordinates of MLL's layers positions [um].
607: mt0 : complex
608: Fresnel transmission coefficient for the first material of MLL's
609: bilayer.
610: mt1 : complex
611: Fresnel transmission coefficient for the first material of MLL's
612: bilayer.
613: sigma : float
614: Interdiffusion length [um].
615: num_threads : int, optional
616: Number of threads.
617:
618: Returns
619: -------
620: bar_profile : numpy.ndarray
621: Array of barcode's transmission profiles.
622:
623: Notes
624: -----
625: MLL's transmission profile is simulated with a set
626: of error functions:
627:
628: .. math::
629: \begin{multline}
630: T_{b}(x) = 1 - \frac{T_{bulk}}{2} \left\{
631: \mathrm{erf}\left[ \frac{x - x_{lyr}[0]}{\sqrt{2} \sigma} \right] +
632: \mathrm{erf}\left[ \frac{x_{lyr}[n - 1] - x}{\sqrt{2} \sigma} \right]
633: \right\} -\\
634: \frac{T_{bar}}{4} \sum_{i = 1}^{n - 2} \left\{
635: 2 \mathrm{erf}\left[ \frac{x - x_{lyr}[i]}{\sqrt{2} \sigma} \right] -
636: \mathrm{erf}\left[ \frac{x - x_{lyr}[i - 1]}{\sqrt{2} \sigma} \right] -
637: \mathrm{erf}\left[ \frac{x - x_{lyr}[i + 1]}{\sqrt{2} \sigma} \right]
638: \right\}
639: \end{multline}
640:
641: where :math:`x_{lyr}` is an array of MLL's layer coordinates.
642: """
+643: return ml_profile_wrapper(x_arr, layers, 0., mt0, mt1, sigma, num_threads)
__Pyx_XDECREF(((PyObject *)__pyx_r)); __pyx_t_1 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_ml_profile_wrapper(__pyx_v_x_arr, __pyx_v_layers, __pyx_t_double_complex_from_parts(0., 0), __pyx_v_mt0, __pyx_v_mt1, __pyx_v_sigma, __pyx_v_num_threads)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 643, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0;
644:
+645: def make_frames(pfx: np.ndarray, pfy: np.ndarray, dx: cython.double, dy: cython.double,
/* Python wrapper */ static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_21make_frames(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_6pyrost_3bin_10simulation_20make_frames[] = "make_frames(ndarray pfx: np.ndarray, ndarray pfy: np.ndarray, double dx: cython.double, double dy: cython.double, tuple shape: tuple, long seed: cython.long, unsigned int num_threads: cython.uint) -> np.ndarray\nGenerate intensity frames from one-dimensional intensity profiles (`pfx`,\n `pfy`) and whitefield profiles (`wfx`, `wfy`). Intensity profiles resized into\n the shape of a frame. Poisson noise is applied if `seed` is non-negative.\n\n Parameters\n ----------\n pfx : numpy.ndarray\n Intensity profile along the x axis.\n pfy : numpy.ndarray\n Intensity profile along the y axis.\n dx : float\n Sampling interval along the x axis [um].\n dy : float\n Sampling interval along the y axis [um].\n shape : tuple\n Shape of the detector array.\n seed : int, optional\n Seed for pseudo-random number generation.\n num_threads : int, optional\n Number of threads.\n\n Returns\n -------\n frames : numpy.ndarray\n Intensity frames.\n "; static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_21make_frames = {"make_frames", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_21make_frames, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_20make_frames}; static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_21make_frames(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_pfx = 0; PyArrayObject *__pyx_v_pfy = 0; double __pyx_v_dx; double __pyx_v_dy; PyObject *__pyx_v_shape = 0; long __pyx_v_seed; unsigned int __pyx_v_num_threads; PyArrayObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("make_frames (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pfx,&__pyx_n_s_pfy,&__pyx_n_s_dx,&__pyx_n_s_dy,&__pyx_n_s_shape,&__pyx_n_s_seed,&__pyx_n_s_num_threads,0}; PyObject* values[7] = {0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); CYTHON_FALLTHROUGH; case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pfx)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pfy)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("make_frames", 1, 7, 7, 1); __PYX_ERR(0, 645, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("make_frames", 1, 7, 7, 2); __PYX_ERR(0, 645, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dy)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("make_frames", 1, 7, 7, 3); __PYX_ERR(0, 645, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("make_frames", 1, 7, 7, 4); __PYX_ERR(0, 645, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 5: if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_seed)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("make_frames", 1, 7, 7, 5); __PYX_ERR(0, 645, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 6: if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("make_frames", 1, 7, 7, 6); __PYX_ERR(0, 645, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "make_frames") < 0)) __PYX_ERR(0, 645, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 7) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); } __pyx_v_pfx = ((PyArrayObject *)values[0]); __pyx_v_pfy = ((PyArrayObject *)values[1]); __pyx_v_dx = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_dx == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 645, __pyx_L3_error) __pyx_v_dy = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_dy == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 645, __pyx_L3_error) __pyx_v_shape = ((PyObject*)values[4]); __pyx_v_seed = __Pyx_PyInt_As_long(values[5]); if (unlikely((__pyx_v_seed == (long)-1) && PyErr_Occurred())) __PYX_ERR(0, 646, __pyx_L3_error) __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[6]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 646, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("make_frames", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 645, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("pyrost.bin.simulation.make_frames", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pfx), __pyx_ptype_5numpy_ndarray, 1, "pfx", 0))) __PYX_ERR(0, 645, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pfy), __pyx_ptype_5numpy_ndarray, 1, "pfy", 0))) __PYX_ERR(0, 645, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(0, 646, __pyx_L1_error) __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_20make_frames(__pyx_self, __pyx_v_pfx, __pyx_v_pfy, __pyx_v_dx, __pyx_v_dy, __pyx_v_shape, __pyx_v_seed, __pyx_v_num_threads); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_20make_frames(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_pfx, PyArrayObject *__pyx_v_pfy, double __pyx_v_dx, double __pyx_v_dy, PyObject *__pyx_v_shape, long __pyx_v_seed, unsigned int __pyx_v_num_threads) { int __pyx_v_fail; npy_intp *__pyx_v_oshape; PyArrayObject *__pyx_v_out = 0; unsigned long *__pyx_v__ishape; unsigned long *__pyx_v__oshape; double *__pyx_v__out; double *__pyx_v__pfx; double *__pyx_v__pfy; PyArrayObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("make_frames", 0); __Pyx_INCREF((PyObject *)__pyx_v_pfx); __Pyx_INCREF((PyObject *)__pyx_v_pfy); /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("pyrost.bin.simulation.make_frames", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_out); __Pyx_XDECREF((PyObject *)__pyx_v_pfx); __Pyx_XDECREF((PyObject *)__pyx_v_pfy); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* … */ __pyx_tuple__31 = PyTuple_Pack(15, __pyx_n_s_pfx, __pyx_n_s_pfy, __pyx_n_s_dx, __pyx_n_s_dy, __pyx_n_s_shape, __pyx_n_s_seed, __pyx_n_s_num_threads, __pyx_n_s_fail, __pyx_n_s_oshape, __pyx_n_s_out, __pyx_n_s_ishape, __pyx_n_s_oshape_2, __pyx_n_s_out_2, __pyx_n_s_pfx_2, __pyx_n_s_pfy_2); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(0, 645, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__31); __Pyx_GIVEREF(__pyx_tuple__31); /* … */ __pyx_t_3 = __Pyx_PyDict_NewPresized(8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 645, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_pfx, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 645, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_pfy, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 645, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dx, __pyx_n_u_double) < 0) __PYX_ERR(0, 645, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dy, __pyx_n_u_double) < 0) __PYX_ERR(0, 645, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_shape, __pyx_n_u_tuple) < 0) __PYX_ERR(0, 645, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_seed, __pyx_n_u_long) < 0) __PYX_ERR(0, 645, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 645, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 645, __pyx_L1_error) __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_21make_frames, 0, __pyx_n_s_make_frames, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__32)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 645, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_5, __pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_make_frames, __pyx_t_5) < 0) __PYX_ERR(0, 645, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_codeobj__32 = (PyObject*)__Pyx_PyCode_New(7, 0, 15, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__31, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_make_frames, 645, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__32)) __PYX_ERR(0, 645, __pyx_L1_error)
646: shape: tuple, seed: cython.long, num_threads: cython.uint) -> np.ndarray:
647: """Generate intensity frames from one-dimensional intensity profiles (`pfx`,
648: `pfy`) and whitefield profiles (`wfx`, `wfy`). Intensity profiles resized into
649: the shape of a frame. Poisson noise is applied if `seed` is non-negative.
650:
651: Parameters
652: ----------
653: pfx : numpy.ndarray
654: Intensity profile along the x axis.
655: pfy : numpy.ndarray
656: Intensity profile along the y axis.
657: dx : float
658: Sampling interval along the x axis [um].
659: dy : float
660: Sampling interval along the y axis [um].
661: shape : tuple
662: Shape of the detector array.
663: seed : int, optional
664: Seed for pseudo-random number generation.
665: num_threads : int, optional
666: Number of threads.
667:
668: Returns
669: -------
670: frames : numpy.ndarray
671: Intensity frames.
672: """
+673: pfx = check_array(pfx, np.NPY_FLOAT64)
__pyx_t_1 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_check_array(__pyx_v_pfx, NPY_FLOAT64)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 673, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_pfx, ((PyArrayObject *)__pyx_t_1)); __pyx_t_1 = 0;
+674: pfy = check_array(pfy, np.NPY_FLOAT64)
__pyx_t_1 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_check_array(__pyx_v_pfy, NPY_FLOAT64)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 674, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF_SET(__pyx_v_pfy, ((PyArrayObject *)__pyx_t_1)); __pyx_t_1 = 0;
675:
+676: cdef int fail = 0
__pyx_v_fail = 0;
+677: cdef np.npy_intp *oshape = [pfx.shape[0], <np.npy_intp>(shape[0]), <np.npy_intp>(shape[1])]
if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(0, 677, __pyx_L1_error) } __pyx_t_2 = __Pyx_PyInt_As_Py_intptr_t(PyTuple_GET_ITEM(__pyx_v_shape, 0)); if (unlikely((__pyx_t_2 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 677, __pyx_L1_error) if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(0, 677, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyInt_As_Py_intptr_t(PyTuple_GET_ITEM(__pyx_v_shape, 1)); if (unlikely((__pyx_t_3 == ((npy_intp)-1)) && PyErr_Occurred())) __PYX_ERR(0, 677, __pyx_L1_error) __pyx_t_4[0] = (__pyx_v_pfx->dimensions[0]); __pyx_t_4[1] = ((npy_intp)__pyx_t_2); __pyx_t_4[2] = ((npy_intp)__pyx_t_3); __pyx_v_oshape = __pyx_t_4;
+678: cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(3, oshape, np.NPY_FLOAT64)
__pyx_t_1 = PyArray_SimpleNew(3, __pyx_v_oshape, NPY_FLOAT64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 678, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __pyx_t_1; __Pyx_INCREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_out = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0;
+679: cdef unsigned long *_ishape = [<unsigned long>(pfx.shape[0]), <unsigned long>(pfy.shape[0]),
__pyx_t_6[0] = ((unsigned long)(__pyx_v_pfx->dimensions[0])); __pyx_t_6[1] = ((unsigned long)(__pyx_v_pfy->dimensions[0])); __pyx_t_6[2] = ((unsigned long)(__pyx_v_pfx->dimensions[1])); __pyx_v__ishape = __pyx_t_6;
680: <unsigned long>(pfx.shape[1])]
+681: cdef unsigned long *_oshape = [<unsigned long>(oshape[0]), <unsigned long>(oshape[1]), <unsigned long>(oshape[2])]
__pyx_t_7[0] = ((unsigned long)(__pyx_v_oshape[0])); __pyx_t_7[1] = ((unsigned long)(__pyx_v_oshape[1])); __pyx_t_7[2] = ((unsigned long)(__pyx_v_oshape[2])); __pyx_v__oshape = __pyx_t_7;
+682: cdef double *_out = <double *>np.PyArray_DATA(out)
__pyx_v__out = ((double *)PyArray_DATA(__pyx_v_out));
+683: cdef double *_pfx = <double *>np.PyArray_DATA(pfx)
__pyx_v__pfx = ((double *)PyArray_DATA(__pyx_v_pfx));
+684: cdef double *_pfy = <double *>np.PyArray_DATA(pfy)
__pyx_v__pfy = ((double *)PyArray_DATA(__pyx_v_pfy));
+685: with nogil:
{ #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* … */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } }
+686: fail = frames(_out, _pfx, _pfy, dx, dy, _ishape, _oshape, seed, num_threads)
__pyx_v_fail = frames(__pyx_v__out, __pyx_v__pfx, __pyx_v__pfy, __pyx_v_dx, __pyx_v_dy, __pyx_v__ishape, __pyx_v__oshape, __pyx_v_seed, __pyx_v_num_threads); }
+687: if fail:
__pyx_t_8 = (__pyx_v_fail != 0); if (unlikely(__pyx_t_8)) { /* … */ }
+688: raise RuntimeError('C backend exited with error.')
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 688, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 688, __pyx_L1_error)
+689: return out
__Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_out)); __pyx_r = __pyx_v_out; goto __pyx_L0;
690:
+691: def median(data: np.ndarray, mask: np.ndarray=None, axis: cython.int=0,
/* Python wrapper */ static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_23median(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_6pyrost_3bin_10simulation_22median[] = "median(ndarray data: np.ndarray, ndarray mask: np.ndarray = None, int axis: cython.int = 0, unsigned int num_threads: cython.uint = 1) -> np.ndarray\nCalculate a median along the `axis`.\n\n Parameters\n ----------\n data : numpy.ndarray\n Intensity frames.\n mask : numpy.ndarray, optional\n Bad pixel mask.\n axis : int, optional\n Array axis along which median values are calculated.\n num_threads : int, optional\n Number of threads.\n\n Returns\n -------\n wfield : numpy.ndarray\n Whitefield.\n "; static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_23median = {"median", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_23median, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_22median}; static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_23median(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_data = 0; PyArrayObject *__pyx_v_mask = 0; int __pyx_v_axis; unsigned int __pyx_v_num_threads; PyArrayObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("median (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_data,&__pyx_n_s_mask,&__pyx_n_s_axis,&__pyx_n_s_num_threads,0}; PyObject* values[4] = {0,0,0,0}; values[1] = (PyObject *)((PyArrayObject *)((PyObject *)Py_None)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mask); if (value) { values[1] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_axis); if (value) { values[2] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads); if (value) { values[3] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "median") < 0)) __PYX_ERR(0, 691, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_data = ((PyArrayObject *)values[0]); __pyx_v_mask = ((PyArrayObject *)values[1]); if (values[2]) { __pyx_v_axis = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_axis == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 691, __pyx_L3_error) } else { __pyx_v_axis = ((int)((int)0)); } if (values[3]) { __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[3]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 692, __pyx_L3_error) } else { __pyx_v_num_threads = ((unsigned int)((unsigned int)1)); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("median", 0, 1, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 691, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("pyrost.bin.simulation.median", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_data), __pyx_ptype_5numpy_ndarray, 1, "data", 0))) __PYX_ERR(0, 691, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mask), __pyx_ptype_5numpy_ndarray, 1, "mask", 0))) __PYX_ERR(0, 691, __pyx_L1_error) __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_22median(__pyx_self, __pyx_v_data, __pyx_v_mask, __pyx_v_axis, __pyx_v_num_threads); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_22median(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_data, PyArrayObject *__pyx_v_mask, int __pyx_v_axis, unsigned int __pyx_v_num_threads) { int __pyx_v_ndim; unsigned long *__pyx_v__dims; npy_intp *__pyx_v_odims; int __pyx_v_i; int __pyx_v_type_num; PyArrayObject *__pyx_v_out = 0; void *__pyx_v__out; void *__pyx_v__data; unsigned char *__pyx_v__mask; int __pyx_v_fail; PyArrayObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("median", 0); __Pyx_INCREF((PyObject *)__pyx_v_data); __Pyx_INCREF((PyObject *)__pyx_v_mask); /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("pyrost.bin.simulation.median", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_out); __Pyx_XDECREF((PyObject *)__pyx_v_data); __Pyx_XDECREF((PyObject *)__pyx_v_mask); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* … */ __pyx_tuple__33 = PyTuple_Pack(14, __pyx_n_s_data, __pyx_n_s_mask, __pyx_n_s_axis, __pyx_n_s_num_threads, __pyx_n_s_ndim, __pyx_n_s_dims_2, __pyx_n_s_odims, __pyx_n_s_i, __pyx_n_s_type_num, __pyx_n_s_out, __pyx_n_s_out_2, __pyx_n_s_data_2, __pyx_n_s_mask_2, __pyx_n_s_fail); if (unlikely(!__pyx_tuple__33)) __PYX_ERR(0, 691, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__33); __Pyx_GIVEREF(__pyx_tuple__33); /* … */ __pyx_t_5 = __Pyx_PyInt_From_int(((int)0)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); /* … */ __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_None)); __Pyx_GIVEREF(((PyObject *)Py_None)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_None)); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_3); __pyx_t_5 = 0; __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyDict_NewPresized(5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_data, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 691, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_mask, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 691, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_axis, __pyx_n_u_int) < 0) __PYX_ERR(0, 691, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 691, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 691, __pyx_L1_error) __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_23median, 0, __pyx_n_s_median, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__34)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_t_1); __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_5, __pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_median, __pyx_t_5) < 0) __PYX_ERR(0, 691, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_codeobj__34 = (PyObject*)__Pyx_PyCode_New(4, 0, 14, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__33, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pyrost_bin_simulation_pyx, __pyx_n_s_median, 691, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__34)) __PYX_ERR(0, 691, __pyx_L1_error)
+692: num_threads: cython.uint=1) -> np.ndarray:
__pyx_t_3 = __Pyx_PyInt_From_unsigned_int(((unsigned int)1)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 692, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3);
693: """Calculate a median along the `axis`.
694:
695: Parameters
696: ----------
697: data : numpy.ndarray
698: Intensity frames.
699: mask : numpy.ndarray, optional
700: Bad pixel mask.
701: axis : int, optional
702: Array axis along which median values are calculated.
703: num_threads : int, optional
704: Number of threads.
705:
706: Returns
707: -------
708: wfield : numpy.ndarray
709: Whitefield.
710: """
+711: if not np.PyArray_IS_C_CONTIGUOUS(data):
__pyx_t_1 = ((!(PyArray_IS_C_CONTIGUOUS(__pyx_v_data) != 0)) != 0);
if (__pyx_t_1) {
/* … */
}
+712: data = np.PyArray_GETCONTIGUOUS(data)
__pyx_t_2 = ((PyObject *)PyArray_GETCONTIGUOUS(__pyx_v_data)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 712, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF_SET(__pyx_v_data, ((PyArrayObject *)__pyx_t_2)); __pyx_t_2 = 0;
713:
+714: cdef int ndim = data.ndim
__pyx_t_3 = __pyx_v_data->nd; __pyx_v_ndim = __pyx_t_3;
+715: axis = axis if axis >= 0 else ndim + axis
if (((__pyx_v_axis >= 0) != 0)) { __pyx_t_3 = __pyx_v_axis; } else { __pyx_t_3 = (__pyx_v_ndim + __pyx_v_axis); } __pyx_v_axis = __pyx_t_3;
+716: axis = axis if axis <= ndim - 1 else ndim - 1
if (((__pyx_v_axis <= (__pyx_v_ndim - 1)) != 0)) { __pyx_t_4 = __pyx_v_axis; } else { __pyx_t_4 = (__pyx_v_ndim - 1); } __pyx_v_axis = __pyx_t_4;
717:
+718: if mask is None:
__pyx_t_1 = (((PyObject *)__pyx_v_mask) == Py_None); __pyx_t_5 = (__pyx_t_1 != 0); if (__pyx_t_5) { /* … */ goto __pyx_L4; }
+719: mask = <np.ndarray>np.PyArray_SimpleNew(ndim, data.shape, np.NPY_BOOL)
__pyx_t_2 = PyArray_SimpleNew(__pyx_v_ndim, __pyx_v_data->dimensions, NPY_BOOL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 719, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __pyx_t_2; __Pyx_INCREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_mask, ((PyArrayObject *)__pyx_t_6)); __pyx_t_6 = 0;
+720: np.PyArray_FILLWBYTE(mask, 1)
PyArray_FILLWBYTE(((PyObject *)__pyx_v_mask), 1);
721: else:
+722: mask = check_array(mask, np.NPY_BOOL)
/*else*/ { __pyx_t_6 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_check_array(__pyx_v_mask, NPY_BOOL)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 722, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_mask, ((PyArrayObject *)__pyx_t_6)); __pyx_t_6 = 0;
+723: if memcmp(data.shape, mask.shape, ndim * sizeof(np.npy_intp)):
__pyx_t_5 = (memcmp(__pyx_v_data->dimensions, __pyx_v_mask->dimensions, (__pyx_v_ndim * (sizeof(npy_intp)))) != 0); if (unlikely(__pyx_t_5)) { /* … */ } } __pyx_L4:;
+724: raise ValueError('mask and data arrays must have identical shapes')
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 724, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(0, 724, __pyx_L1_error) /* … */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_mask_and_data_arrays_must_have_i); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(0, 724, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6);
725:
+726: cdef unsigned long *_dims = <unsigned long *>data.shape
__pyx_v__dims = ((unsigned long *)__pyx_v_data->dimensions);
727:
+728: cdef np.npy_intp *odims = <np.npy_intp *>malloc((ndim - 1) * sizeof(np.npy_intp))
__pyx_v_odims = ((npy_intp *)malloc(((__pyx_v_ndim - 1) * (sizeof(npy_intp)))));
+729: if odims is NULL:
__pyx_t_5 = ((__pyx_v_odims == NULL) != 0); if (unlikely(__pyx_t_5)) { /* … */ }
+730: raise MemoryError('not enough memory')
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 730, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(0, 730, __pyx_L1_error) /* … */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_not_enough_memory); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(0, 730, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7);
731: cdef int i
+732: for i in range(axis):
__pyx_t_3 = __pyx_v_axis; __pyx_t_7 = __pyx_t_3; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_i = __pyx_t_8;
+733: odims[i] = data.shape[i]
(__pyx_v_odims[__pyx_v_i]) = (__pyx_v_data->dimensions[__pyx_v_i]); }
+734: for i in range(axis + 1, ndim):
__pyx_t_3 = __pyx_v_ndim; __pyx_t_7 = __pyx_t_3; for (__pyx_t_8 = (__pyx_v_axis + 1); __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_i = __pyx_t_8;
+735: odims[i - 1] = data.shape[i]
(__pyx_v_odims[(__pyx_v_i - 1)]) = (__pyx_v_data->dimensions[__pyx_v_i]); }
736:
+737: cdef int type_num = np.PyArray_TYPE(data)
__pyx_v_type_num = PyArray_TYPE(__pyx_v_data);
+738: cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(ndim - 1, odims, type_num)
__pyx_t_6 = PyArray_SimpleNew((__pyx_v_ndim - 1), __pyx_v_odims, __pyx_v_type_num); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 738, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __pyx_t_6; __Pyx_INCREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_v_out = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0;
+739: cdef void *_out = <void *>np.PyArray_DATA(out)
__pyx_v__out = ((void *)PyArray_DATA(__pyx_v_out));
+740: cdef void *_data = <void *>np.PyArray_DATA(data)
__pyx_v__data = ((void *)PyArray_DATA(__pyx_v_data));
+741: cdef unsigned char *_mask = <unsigned char *>np.PyArray_DATA(mask)
__pyx_v__mask = ((unsigned char *)PyArray_DATA(__pyx_v_mask));
742:
+743: with nogil:
{ #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* … */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L13; } __pyx_L12_error: { #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L13:; } }
+744: if type_num == np.NPY_FLOAT64:
switch (__pyx_v_type_num) { case NPY_FLOAT64: /* … */ break; case NPY_FLOAT32:
+745: fail = median_c(_out, _data, _mask, ndim, _dims, 8, axis, compare_double, num_threads)
__pyx_v_fail = median(__pyx_v__out, __pyx_v__data, __pyx_v__mask, __pyx_v_ndim, __pyx_v__dims, 8, __pyx_v_axis, compare_double, __pyx_v_num_threads);
+746: elif type_num == np.NPY_FLOAT32:
break; case NPY_INT32:
+747: fail = median_c(_out, _data, _mask, ndim, _dims, 4, axis, compare_float, num_threads)
__pyx_v_fail = median(__pyx_v__out, __pyx_v__data, __pyx_v__mask, __pyx_v_ndim, __pyx_v__dims, 4, __pyx_v_axis, compare_float, __pyx_v_num_threads);
+748: elif type_num == np.NPY_INT32:
break; case NPY_UINT32:
+749: fail = median_c(_out, _data, _mask, ndim, _dims, 4, axis, compare_int, num_threads)
__pyx_v_fail = median(__pyx_v__out, __pyx_v__data, __pyx_v__mask, __pyx_v_ndim, __pyx_v__dims, 4, __pyx_v_axis, compare_int, __pyx_v_num_threads);
+750: elif type_num == np.NPY_UINT32:
break; default:
+751: fail = median_c(_out, _data, _mask, ndim, _dims, 4, axis, compare_uint, num_threads)
__pyx_v_fail = median(__pyx_v__out, __pyx_v__data, __pyx_v__mask, __pyx_v_ndim, __pyx_v__dims, 4, __pyx_v_axis, compare_uint, __pyx_v_num_threads);
752: else:
+753: raise TypeError('data argument has incompatible type: {:s}'.format(data.dtype))
{ #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif /*try:*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_data_argument_has_incompatible_t, __pyx_n_s_format); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 753, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_data), __pyx_n_s_dtype); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 753, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_10 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_10)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_10); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); } } __pyx_t_2 = (__pyx_t_10) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_10, __pyx_t_9) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 753, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 753, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(0, 753, __pyx_L15_error) } /*finally:*/ { __pyx_L15_error: { #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif goto __pyx_L12_error; } } } break; } }
+754: if fail:
__pyx_t_5 = (__pyx_v_fail != 0); if (unlikely(__pyx_t_5)) { /* … */ }
+755: raise RuntimeError('C backend exited with error.')
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 755, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(0, 755, __pyx_L1_error)
756:
+757: free(odims)
free(__pyx_v_odims);
+758: return out
__Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_out)); __pyx_r = __pyx_v_out; goto __pyx_L0;
759:
+760: def median_filter(data: np.ndarray, size: object, mask: np.ndarray=None, mode: str='reflect', cval: cython.double=0.,
/* Python wrapper */ static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_25median_filter(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_6pyrost_3bin_10simulation_24median_filter[] = "median_filter(ndarray data: np.ndarray, size: object, ndarray mask: np.ndarray = None, unicode mode: str = u'reflect', double cval: cython.double = 0., unsigned int num_threads: cython.uint = 1) -> np.ndarray\nCalculate a median along the `axis`.\n\n Parameters\n ----------\n data : numpy.ndarray\n Intensity frames.\n size : numpy.ndarray\n Gives the shape that is taken from the input array, at every element position, to\n define the input to the filter function. We adjust size to the number of dimensions\n of the input array, so that, if the input array is shape (10,10,10), and size is 2,\n then the actual size used is (2,2,2).\n mask : numpy.ndarray, optional\n Bad pixel mask.\n mode : {'constant', 'nearest', 'mirror', 'reflect', 'wrap'}, optional\n The mode parameter determines how the input array is extended when the filter\n overlaps a border. Default value is 'reflect'. The valid values and their behavior\n is as follows:\n\n * 'constant', (k k k k | a b c d | k k k k) : The input is extended by filling all\n values beyond the edge with the same constant value, defined by the `cval`\n parameter.\n * 'nearest', (a a a a | a b c d | d d d d) : The input is extended by replicating\n the last pixel.\n * 'mirror', (c d c b | a b c d | c b a b) : The input is extended by reflecting\n about the center of the last pixel. This mode is also sometimes referred to as\n whole-sample symmetric.\n * 'reflect', (d c b a | a b c d | d c b a) : The input is extended by reflecting\n about the edge of the last pixel. This mode is also sometimes referred to as\n half-sample symmetric.\n * 'wrap', (a b c d | a b c d | a b c d) : The input is extended by wrapping around\n to the opposite edge.\n cval : float, optional\n Value to fill past edges of input if mode is \342\200\230constant""\342\200\231. Default is 0.0.\n num_threads : int, optional\n Number of threads.\n\n Returns\n -------\n wfield : numpy.ndarray\n Whitefield.\n "; static PyMethodDef __pyx_mdef_6pyrost_3bin_10simulation_25median_filter = {"median_filter", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6pyrost_3bin_10simulation_25median_filter, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6pyrost_3bin_10simulation_24median_filter}; static PyArrayObject *__pyx_pw_6pyrost_3bin_10simulation_25median_filter(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_data = 0; PyObject *__pyx_v_size = 0; PyArrayObject *__pyx_v_mask = 0; PyObject *__pyx_v_mode = 0; double __pyx_v_cval; unsigned int __pyx_v_num_threads; PyArrayObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("median_filter (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_data,&__pyx_n_s_size,&__pyx_n_s_mask,&__pyx_n_s_mode,&__pyx_n_s_cval,&__pyx_n_s_num_threads,0}; PyObject* values[6] = {0,0,0,0,0,0}; values[2] = (PyObject *)((PyArrayObject *)((PyObject *)Py_None)); values[3] = ((PyObject*)((PyObject*)__pyx_n_u_reflect)); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_size)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("median_filter", 0, 2, 6, 1); __PYX_ERR(0, 760, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mask); if (value) { values[2] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_cval); if (value) { values[4] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 5: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_num_threads); if (value) { values[5] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "median_filter") < 0)) __PYX_ERR(0, 760, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); CYTHON_FALLTHROUGH; case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_data = ((PyArrayObject *)values[0]); __pyx_v_size = values[1]; __pyx_v_mask = ((PyArrayObject *)values[2]); __pyx_v_mode = ((PyObject*)values[3]); if (values[4]) { __pyx_v_cval = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_cval == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 760, __pyx_L3_error) } else { __pyx_v_cval = ((double)((double)0.)); } if (values[5]) { __pyx_v_num_threads = __Pyx_PyInt_As_unsigned_int(values[5]); if (unlikely((__pyx_v_num_threads == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 761, __pyx_L3_error) } else { __pyx_v_num_threads = ((unsigned int)((unsigned int)1)); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("median_filter", 0, 2, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 760, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("pyrost.bin.simulation.median_filter", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_data), __pyx_ptype_5numpy_ndarray, 1, "data", 0))) __PYX_ERR(0, 760, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mask), __pyx_ptype_5numpy_ndarray, 1, "mask", 0))) __PYX_ERR(0, 760, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mode), (&PyUnicode_Type), 1, "mode", 1))) __PYX_ERR(0, 760, __pyx_L1_error) __pyx_r = __pyx_pf_6pyrost_3bin_10simulation_24median_filter(__pyx_self, __pyx_v_data, __pyx_v_size, __pyx_v_mask, __pyx_v_mode, __pyx_v_cval, __pyx_v_num_threads); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyArrayObject *__pyx_pf_6pyrost_3bin_10simulation_24median_filter(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_data, PyObject *__pyx_v_size, PyArrayObject *__pyx_v_mask, PyObject *__pyx_v_mode, double __pyx_v_cval, unsigned int __pyx_v_num_threads) { int __pyx_v_ndim; npy_intp *__pyx_v_dims; PyArrayObject *__pyx_v_fsize = 0; unsigned long *__pyx_v__fsize; unsigned long *__pyx_v__dims; int __pyx_v_type_num; PyArrayObject *__pyx_v_out = 0; void *__pyx_v__out; void *__pyx_v__data; unsigned char *__pyx_v__mask; int __pyx_v__mode; void *__pyx_v__cval; int __pyx_v_fail; PyArrayObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("median_filter", 0); __Pyx_INCREF((PyObject *)__pyx_v_data); __Pyx_INCREF((PyObject *)__pyx_v_mask); /* … */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("pyrost.bin.simulation.median_filter", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_fsize); __Pyx_XDECREF((PyObject *)__pyx_v_out); __Pyx_XDECREF((PyObject *)__pyx_v_data); __Pyx_XDECREF((PyObject *)__pyx_v_mask); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* … */ __pyx_tuple__35 = PyTuple_Pack(19, __pyx_n_s_data, __pyx_n_s_size, __pyx_n_s_mask, __pyx_n_s_mode, __pyx_n_s_cval, __pyx_n_s_num_threads, __pyx_n_s_ndim, __pyx_n_s_dims, __pyx_n_s_fsize, __pyx_n_s_fsize_2, __pyx_n_s_dims_2, __pyx_n_s_type_num, __pyx_n_s_out, __pyx_n_s_out_2, __pyx_n_s_data_2, __pyx_n_s_mask_2, __pyx_n_s_mode_2, __pyx_n_s_cval_2, __pyx_n_s_fail); if (unlikely(!__pyx_tuple__35)) __PYX_ERR(0, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__35); __Pyx_GIVEREF(__pyx_tuple__35); /* … */ __pyx_t_5 = PyFloat_FromDouble(((double)0.)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); /* … */ __pyx_t_1 = PyTuple_New(4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_None)); __Pyx_GIVEREF(((PyObject *)Py_None)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_None)); __Pyx_INCREF(((PyObject*)__pyx_n_u_reflect)); __Pyx_GIVEREF(((PyObject*)__pyx_n_u_reflect)); PyTuple_SET_ITEM(__pyx_t_1, 1, ((PyObject*)__pyx_n_u_reflect)); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 3, __pyx_t_3); __pyx_t_5 = 0; __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyDict_NewPresized(7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_data, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 760, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_size, __pyx_n_u_object) < 0) __PYX_ERR(0, 760, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_mask, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 760, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_mode, __pyx_n_u_unicode) < 0) __PYX_ERR(0, 760, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_cval, __pyx_n_u_double) < 0) __PYX_ERR(0, 760, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_num_threads, __pyx_kp_u_unsigned_int) < 0) __PYX_ERR(0, 760, __pyx_L1_error) if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_return, __pyx_n_u_ndarray) < 0) __PYX_ERR(0, 760, __pyx_L1_error) __pyx_t_5 = __Pyx_CyFunction_New(&__pyx_mdef_6pyrost_3bin_10simulation_25median_filter, 0, __pyx_n_s_median_filter, NULL, __pyx_n_s_pyrost_bin_simulation, __pyx_d, ((PyObject *)__pyx_codeobj__36)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_5, __pyx_t_1); __Pyx_CyFunction_SetAnnotationsDict(__pyx_t_5, __pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_median_filter, __pyx_t_5) < 0) __PYX_ERR(0, 760, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+761: num_threads: cython.uint=1) -> np.ndarray:
__pyx_t_3 = __Pyx_PyInt_From_unsigned_int(((unsigned int)1)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3);
762: """Calculate a median along the `axis`.
763:
764: Parameters
765: ----------
766: data : numpy.ndarray
767: Intensity frames.
768: size : numpy.ndarray
769: Gives the shape that is taken from the input array, at every element position, to
770: define the input to the filter function. We adjust size to the number of dimensions
771: of the input array, so that, if the input array is shape (10,10,10), and size is 2,
772: then the actual size used is (2,2,2).
773: mask : numpy.ndarray, optional
774: Bad pixel mask.
775: mode : {'constant', 'nearest', 'mirror', 'reflect', 'wrap'}, optional
776: The mode parameter determines how the input array is extended when the filter
777: overlaps a border. Default value is 'reflect'. The valid values and their behavior
778: is as follows:
779:
780: * 'constant', (k k k k | a b c d | k k k k) : The input is extended by filling all
781: values beyond the edge with the same constant value, defined by the `cval`
782: parameter.
783: * 'nearest', (a a a a | a b c d | d d d d) : The input is extended by replicating
784: the last pixel.
785: * 'mirror', (c d c b | a b c d | c b a b) : The input is extended by reflecting
786: about the center of the last pixel. This mode is also sometimes referred to as
787: whole-sample symmetric.
788: * 'reflect', (d c b a | a b c d | d c b a) : The input is extended by reflecting
789: about the edge of the last pixel. This mode is also sometimes referred to as
790: half-sample symmetric.
791: * 'wrap', (a b c d | a b c d | a b c d) : The input is extended by wrapping around
792: to the opposite edge.
793: cval : float, optional
794: Value to fill past edges of input if mode is ‘constant’. Default is 0.0.
795: num_threads : int, optional
796: Number of threads.
797:
798: Returns
799: -------
800: wfield : numpy.ndarray
801: Whitefield.
802: """
+803: if not np.PyArray_IS_C_CONTIGUOUS(data):
__pyx_t_1 = ((!(PyArray_IS_C_CONTIGUOUS(__pyx_v_data) != 0)) != 0);
if (__pyx_t_1) {
/* … */
}
+804: data = np.PyArray_GETCONTIGUOUS(data)
__pyx_t_2 = ((PyObject *)PyArray_GETCONTIGUOUS(__pyx_v_data)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF_SET(__pyx_v_data, ((PyArrayObject *)__pyx_t_2)); __pyx_t_2 = 0;
805:
+806: cdef int ndim = data.ndim
__pyx_t_3 = __pyx_v_data->nd; __pyx_v_ndim = __pyx_t_3;
+807: cdef np.npy_intp *dims = data.shape
__pyx_t_4 = __pyx_v_data->dimensions; __pyx_v_dims = __pyx_t_4;
808:
+809: if mask is None:
__pyx_t_1 = (((PyObject *)__pyx_v_mask) == Py_None); __pyx_t_5 = (__pyx_t_1 != 0); if (__pyx_t_5) { /* … */ goto __pyx_L4; }
+810: mask = <np.ndarray>np.PyArray_SimpleNew(ndim, dims, np.NPY_BOOL)
__pyx_t_2 = PyArray_SimpleNew(__pyx_v_ndim, __pyx_v_dims, NPY_BOOL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 810, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __pyx_t_2; __Pyx_INCREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_mask, ((PyArrayObject *)__pyx_t_6)); __pyx_t_6 = 0;
+811: np.PyArray_FILLWBYTE(mask, 1)
PyArray_FILLWBYTE(((PyObject *)__pyx_v_mask), 1);
812: else:
+813: mask = check_array(mask, np.NPY_BOOL)
/*else*/ { __pyx_t_6 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_check_array(__pyx_v_mask, NPY_BOOL)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 813, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_mask, ((PyArrayObject *)__pyx_t_6)); __pyx_t_6 = 0; } __pyx_L4:;
814:
+815: cdef np.ndarray fsize = normalize_sequence(size, ndim, np.NPY_INTP)
__pyx_t_6 = ((PyObject *)__pyx_f_6pyrost_3bin_10simulation_normalize_sequence(__pyx_v_size, __pyx_v_ndim, NPY_INTP)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 815, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_v_fsize = ((PyArrayObject *)__pyx_t_6); __pyx_t_6 = 0;
+816: cdef unsigned long *_fsize = <unsigned long *>np.PyArray_DATA(fsize)
__pyx_v__fsize = ((unsigned long *)PyArray_DATA(__pyx_v_fsize));
817:
+818: cdef unsigned long *_dims = <unsigned long *>dims
__pyx_v__dims = ((unsigned long *)__pyx_v_dims);
+819: cdef int type_num = np.PyArray_TYPE(data)
__pyx_v_type_num = PyArray_TYPE(__pyx_v_data);
+820: cdef np.ndarray out = <np.ndarray>np.PyArray_SimpleNew(ndim, dims, type_num)
__pyx_t_6 = PyArray_SimpleNew(__pyx_v_ndim, __pyx_v_dims, __pyx_v_type_num); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 820, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __pyx_t_6; __Pyx_INCREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_v_out = ((PyArrayObject *)__pyx_t_2); __pyx_t_2 = 0;
+821: cdef void *_out = <void *>np.PyArray_DATA(out)
__pyx_v__out = ((void *)PyArray_DATA(__pyx_v_out));
+822: cdef void *_data = <void *>np.PyArray_DATA(data)
__pyx_v__data = ((void *)PyArray_DATA(__pyx_v_data));
+823: cdef unsigned char *_mask = <unsigned char *>np.PyArray_DATA(mask)
__pyx_v__mask = ((unsigned char *)PyArray_DATA(__pyx_v_mask));
+824: cdef int _mode = extend_mode_to_code(mode)
__pyx_t_3 = __pyx_f_6pyrost_3bin_10simulation_extend_mode_to_code(__pyx_v_mode); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 824, __pyx_L1_error)
__pyx_v__mode = __pyx_t_3;
+825: cdef void *_cval = <void *>&cval
__pyx_v__cval = ((void *)(&__pyx_v_cval));
826:
+827: with nogil:
{ #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* … */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L7; } __pyx_L6_error: { #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L7:; } }
+828: if type_num == np.NPY_FLOAT64:
switch (__pyx_v_type_num) { case NPY_FLOAT64: /* … */ break; case NPY_FLOAT32:
+829: fail = median_filter_c(_out, _data, _mask, ndim, _dims, 8, _fsize, _mode, _cval, compare_double, num_threads)
__pyx_v_fail = median_filter(__pyx_v__out, __pyx_v__data, __pyx_v__mask, __pyx_v_ndim, __pyx_v__dims, 8, __pyx_v__fsize, __pyx_v__mode, __pyx_v__cval, compare_double, __pyx_v_num_threads);
+830: elif type_num == np.NPY_FLOAT32:
break; case NPY_INT32:
+831: fail = median_filter_c(_out, _data, _mask, ndim, _dims, 4, _fsize, _mode, _cval, compare_float, num_threads)
__pyx_v_fail = median_filter(__pyx_v__out, __pyx_v__data, __pyx_v__mask, __pyx_v_ndim, __pyx_v__dims, 4, __pyx_v__fsize, __pyx_v__mode, __pyx_v__cval, compare_float, __pyx_v_num_threads);
+832: elif type_num == np.NPY_INT32:
break; case NPY_UINT32:
+833: fail = median_filter_c(_out, _data, _mask, ndim, _dims, 4, _fsize, _mode, _cval, compare_int, num_threads)
__pyx_v_fail = median_filter(__pyx_v__out, __pyx_v__data, __pyx_v__mask, __pyx_v_ndim, __pyx_v__dims, 4, __pyx_v__fsize, __pyx_v__mode, __pyx_v__cval, compare_int, __pyx_v_num_threads);
+834: elif type_num == np.NPY_UINT32:
break; default:
+835: fail = median_filter_c(_out, _data, _mask, ndim, _dims, 4, _fsize, _mode, _cval, compare_uint, num_threads)
__pyx_v_fail = median_filter(__pyx_v__out, __pyx_v__data, __pyx_v__mask, __pyx_v_ndim, __pyx_v__dims, 4, __pyx_v__fsize, __pyx_v__mode, __pyx_v__cval, compare_uint, __pyx_v_num_threads);
836: else:
+837: raise TypeError('data argument has incompatible type: {:s}'.format(data.dtype))
{ #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif /*try:*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_data_argument_has_incompatible_t, __pyx_n_s_format); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 837, __pyx_L9_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_data), __pyx_n_s_dtype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 837, __pyx_L9_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); } } __pyx_t_2 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_8, __pyx_t_7) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 837, __pyx_L9_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 837, __pyx_L9_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(0, 837, __pyx_L9_error) } /*finally:*/ { __pyx_L9_error: { #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif goto __pyx_L6_error; } } } break; } }
+838: if fail:
__pyx_t_5 = (__pyx_v_fail != 0); if (unlikely(__pyx_t_5)) { /* … */ }
+839: raise RuntimeError('C backend exited with error.')
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 839, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(0, 839, __pyx_L1_error)
840:
+841: return out
__Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_out)); __pyx_r = __pyx_v_out; goto __pyx_L0;