static void construct(PyObject* obj_ptr, converter::rvalue_from_python_stage1_data* data) { const int R = MatType::RowsAtCompileTime; const int C = MatType::ColsAtCompileTime; PyArrayObject *array = reinterpret_cast<PyArrayObject*>(obj_ptr); int flags = PyArray_FLAGS(array); if (!(flags & NPY_ARRAY_C_CONTIGUOUS) || !(flags & NPY_ARRAY_ALIGNED)) throw std::invalid_argument("Contiguous and aligned array required!"); const int ndims = PyArray_NDIM(array); const int dtype_size = (PyArray_DESCR(array))->elsize; const int s1 = PyArray_STRIDE(array, 0), s2 = ndims > 1 ? PyArray_STRIDE(array, 1) : 0; int nrows=1, ncols=1; if( R==1 || C==1 ) { // Vector nrows = R==1 ? 1 : PyArray_SIZE2(array); ncols = C==1 ? 1 : PyArray_SIZE2(array); } else { nrows = (R == Dynamic) ? PyArray_DIMS(array)[0] : R; if ( ndims > 1 ) ncols = (R == Dynamic) ? PyArray_DIMS(array)[1] : R; } T* raw_data = reinterpret_cast<T*>(PyArray_DATA(array)); typedef Map< Matrix<T,Dynamic,Dynamic,RowMajor>,Aligned,Stride<Dynamic, Dynamic> > MapType; void* storage=((converter::rvalue_from_python_storage<MatType>*)(data))->storage.bytes; new (storage) MatType; MatType* emat = (MatType*)storage; *emat = MapType(raw_data, nrows, ncols,Stride<Dynamic, Dynamic>(s1/dtype_size, s2/dtype_size)); data->convertible = storage; }
/*NUMPY_API * * Get New ArrayFlagsObject */ NPY_NO_EXPORT PyObject * PyArray_NewFlagsObject(PyObject *obj) { PyObject *flagobj; int flags; if (obj == NULL) { flags = NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_OWNDATA | NPY_ARRAY_F_CONTIGUOUS | NPY_ARRAY_ALIGNED; } else { if (!PyArray_Check(obj)) { PyErr_SetString(PyExc_ValueError, "Need a NumPy array to create a flags object"); return NULL; } flags = PyArray_FLAGS((PyArrayObject *)obj); } flagobj = PyArrayFlags_Type.tp_alloc(&PyArrayFlags_Type, 0); if (flagobj == NULL) { return NULL; } Py_XINCREF(obj); ((PyArrayFlagsObject *)flagobj)->arr = obj; ((PyArrayFlagsObject *)flagobj)->flags = flags; return flagobj; }
NPY_NO_EXPORT npy_bool _IsWriteable(PyArrayObject *ap) { PyObject *base=PyArray_BASE(ap); #if defined(NPY_PY3K) Py_buffer view; #else void *dummy; Py_ssize_t n; #endif /* If we own our own data, then no-problem */ if ((base == NULL) || (PyArray_FLAGS(ap) & NPY_ARRAY_OWNDATA)) { return NPY_TRUE; } /* * Get to the final base object * If it is a writeable array, then return TRUE * If we can find an array object * or a writeable buffer object as the final base object * or a string object (for pickling support memory savings). * - this last could be removed if a proper pickleable * buffer was added to Python. * * MW: I think it would better to disallow switching from READONLY * to WRITEABLE like this... */ while(PyArray_Check(base)) { if (PyArray_CHKFLAGS((PyArrayObject *)base, NPY_ARRAY_OWNDATA)) { return (npy_bool) (PyArray_ISWRITEABLE((PyArrayObject *)base)); } base = PyArray_BASE((PyArrayObject *)base); } /* * here so pickle support works seamlessly * and unpickled array can be set and reset writeable * -- could be abused -- */ if (PyString_Check(base)) { return NPY_TRUE; } #if defined(NPY_PY3K) if (PyObject_GetBuffer(base, &view, PyBUF_WRITABLE|PyBUF_SIMPLE) < 0) { PyErr_Clear(); return NPY_FALSE; } PyBuffer_Release(&view); #else if (PyObject_AsWriteBuffer(base, &dummy, &n) < 0) { PyErr_Clear(); return NPY_FALSE; } #endif return NPY_TRUE; }
// the data should be FLOAT32 and should be ensured in the wrapper static PyObject *interp3(PyObject *self, PyObject *args) { PyArrayObject *volume, *result, *C, *R, *S; float *pr, *pc, *ps; float *pvol, *pvc; int xdim, ydim, zdim; // We expect 4 arguments of the PyArray_Type if(!PyArg_ParseTuple(args, "O!O!O!O!", &PyArray_Type, &volume, &PyArray_Type, &R, &PyArray_Type, &C, &PyArray_Type, &S)) return NULL; if ( NULL == volume ) return NULL; if ( NULL == C ) return NULL; if ( NULL == R ) return NULL; if ( NULL == S ) return NULL; // result matrix is the same size as C and is float result = (PyArrayObject*) PyArray_ZEROS(PyArray_NDIM(C), C->dimensions, NPY_FLOAT, 0); // This is for reference counting ( I think ) PyArray_FLAGS(result) |= NPY_OWNDATA; // massive use of iterators to progress through the data PyArrayIterObject *itr_v, *itr_r, *itr_c, *itr_s; itr_v = (PyArrayIterObject *) PyArray_IterNew(result); itr_r = (PyArrayIterObject *) PyArray_IterNew(R); itr_c = (PyArrayIterObject *) PyArray_IterNew(C); itr_s = (PyArrayIterObject *) PyArray_IterNew(S); pvol = (float *)PyArray_DATA(volume); xdim = PyArray_DIM(volume, 0); ydim = PyArray_DIM(volume, 1); zdim = PyArray_DIM(volume, 2); //printf("%f\n", pvol[4*20*30 + 11*30 + 15]); while(PyArray_ITER_NOTDONE(itr_v)) { pvc = (float *) PyArray_ITER_DATA(itr_v); pr = (float *) PyArray_ITER_DATA(itr_r); pc = (float *) PyArray_ITER_DATA(itr_c); ps = (float *) PyArray_ITER_DATA(itr_s); // The order is weird because the tricubic code below is // for Fortran ordering. Note that the xdim changes fast in // the code, whereas the rightmost dim should change fast // in C multidimensional arrays. *pvc = TriCubic(*ps, *pc, *pr, pvol, zdim, ydim, xdim); PyArray_ITER_NEXT(itr_v); PyArray_ITER_NEXT(itr_r); PyArray_ITER_NEXT(itr_c); PyArray_ITER_NEXT(itr_s); } return result; }
PyObject * array_view_to_python ( ArrayViewType const & A, bool copy=false) { //_import_array(); typedef typename ArrayViewType::value_type value_type; static const int rank = ArrayViewType::rank; const int elementsType (numpy_to_C_type<value_type>::arraytype); npy_intp dims[rank], strides[rank]; for(size_t i =0; i<rank; ++i) { dims[i] = A.indexmap().lengths()[i]; strides[i] = A.indexmap().strides()[i]*sizeof(value_type); } const value_type * data = A.data_start(); //int flags = NPY_ARRAY_BEHAVED & ~NPY_ARRAY_OWNDATA;;// for numpy2 #ifdef TRIQS_NUMPY_VERSION_LT_17 int flags = NPY_BEHAVED & ~NPY_OWNDATA; #else int flags = NPY_ARRAY_BEHAVED & ~NPY_ARRAY_OWNDATA; #endif PyObject* res = PyArray_NewFromDescr(&PyArray_Type, PyArray_DescrFromType(elementsType), (int) rank, dims, strides, (void*) data, flags, NULL); if (!res) { if (PyErr_Occurred()) {PyErr_Print();PyErr_Clear();} TRIQS_RUNTIME_ERROR<<" array_view_from_numpy : the python numpy object could not be build"; } if (!PyArray_Check(res)) TRIQS_RUNTIME_ERROR<<" array_view_from_numpy : internal error : the python object is not a numpy"; PyArrayObject * arr = (PyArrayObject *)(res); //PyArray_SetBaseObject(arr, A.storage().new_python_ref()); #ifdef TRIQS_NUMPY_VERSION_LT_17 arr->base = A.storage().new_python_ref(); assert( arr->flags == (arr->flags & ~NPY_OWNDATA)); #else int r = PyArray_SetBaseObject(arr,A.storage().new_python_ref()); if (r!=0) TRIQS_RUNTIME_ERROR << "Internal Error setting the guard in numpy !!!!"; assert( PyArray_FLAGS(arr) == (PyArray_FLAGS(arr) & ~NPY_ARRAY_OWNDATA)); #endif if (copy) { PyObject * na = PyObject_CallMethod(res,(char*)"copy",NULL); Py_DECREF(res); // POrt this for 1.7 //assert(((PyArrayObject *)na)->base ==NULL); res = na; } return res; }
MatrixXd PyArray_ToMatrixXd(PyObject* array) { if(PyArray_DESCR(array)->type != PyArray_DescrFromType(NPY_DOUBLE)->type) throw Exception("Can only handle arrays of double values."); if(PyArray_NDIM(array) == 1) { if(PyArray_FLAGS(array) & NPY_F_CONTIGUOUS) return Map<Matrix<double, Dynamic, Dynamic, ColMajor> >( reinterpret_cast<double*>(PyArray_DATA(array)), PyArray_DIM(array, 0), 1); else if(PyArray_FLAGS(array) & NPY_C_CONTIGUOUS) return Map<Matrix<double, Dynamic, Dynamic, RowMajor> >( reinterpret_cast<double*>(PyArray_DATA(array)), PyArray_DIM(array, 0), 1); else throw Exception("Data must be stored in contiguous memory."); } else if(PyArray_NDIM(array) == 2) { if(PyArray_FLAGS(array) & NPY_F_CONTIGUOUS) return Map<Matrix<double, Dynamic, Dynamic, ColMajor> >( reinterpret_cast<double*>(PyArray_DATA(array)), PyArray_DIM(array, 0), PyArray_DIM(array, 1)); else if(PyArray_FLAGS(array) & NPY_C_CONTIGUOUS) return Map<Matrix<double, Dynamic, Dynamic, RowMajor> >( reinterpret_cast<double*>(PyArray_DATA(array)), PyArray_DIM(array, 0), PyArray_DIM(array, 1)); else throw Exception("Data must be stored in contiguous memory."); } else { throw Exception("Can only handle one- or two-dimensional arrays."); } }
/* * check if in "alhs @op@ orhs" that alhs is a temporary (refcnt == 1) so we * can do in-place operations instead of creating a new temporary * "cannot" is set to true if it cannot be done even with swapped arguments */ static int can_elide_temp(PyArrayObject * alhs, PyObject * orhs, int * cannot) { /* * to be a candidate the array needs to have reference count 1, be an exact * array of a basic type, own its data and size larger than threshold */ if (Py_REFCNT(alhs) != 1 || !PyArray_CheckExact(alhs) || PyArray_DESCR(alhs)->type_num >= NPY_OBJECT || !(PyArray_FLAGS(alhs) & NPY_ARRAY_OWNDATA) || PyArray_NBYTES(alhs) < NPY_MIN_ELIDE_BYTES) { return 0; } if (PyArray_CheckExact(orhs) || PyArray_CheckAnyScalar(orhs)) { PyArrayObject * arhs; /* create array from right hand side */ Py_INCREF(orhs); arhs = (PyArrayObject *)PyArray_EnsureArray(orhs); if (arhs == NULL) { return 0; } /* * if rhs is not a scalar dimensions must match * TODO: one could allow broadcasting on equal types */ if (!(PyArray_NDIM(arhs) == 0 || (PyArray_NDIM(arhs) == PyArray_NDIM(alhs) && PyArray_CompareLists(PyArray_DIMS(alhs), PyArray_DIMS(arhs), PyArray_NDIM(arhs))))) { Py_DECREF(arhs); return 0; } /* must be safe to cast (checks values for scalar in rhs) */ if (PyArray_CanCastArrayTo(arhs, PyArray_DESCR(alhs), NPY_SAFE_CASTING)) { Py_DECREF(arhs); return check_callers(cannot); } Py_DECREF(arhs); } return 0; }
/* try elide unary temporary */ NPY_NO_EXPORT int can_elide_temp_unary(PyArrayObject * m1) { int cannot; if (Py_REFCNT(m1) != 1 || !PyArray_CheckExact(m1) || PyArray_DESCR(m1)->type_num == NPY_VOID || !(PyArray_FLAGS(m1) & NPY_ARRAY_OWNDATA) || PyArray_NBYTES(m1) < NPY_MIN_ELIDE_BYTES) { return 0; } if (check_callers(&cannot)) { #if NPY_ELIDE_DEBUG != 0 puts("elided temporary in unary op"); #endif return 1; } else { return 0; } }
/*NUMPY_API * * Get New ArrayFlagsObject */ NPY_NO_EXPORT PyObject * PyArray_NewFlagsObject(PyObject *obj) { PyObject *flagobj; int flags; if (obj == NULL) { flags = CONTIGUOUS | OWNDATA | FORTRAN | ALIGNED; } else { flags = PyArray_FLAGS(obj); } flagobj = PyArrayFlags_Type.tp_alloc(&PyArrayFlags_Type, 0); if (flagobj == NULL) { return NULL; } Py_XINCREF(obj); ((PyArrayFlagsObject *)flagobj)->arr = obj; ((PyArrayFlagsObject *)flagobj)->flags = flags; return flagobj; }
void PyNet::check_contiguous_array(PyArrayObject* arr, string name, int channels, int height, int width) { if (!(PyArray_FLAGS(arr) & NPY_ARRAY_C_CONTIGUOUS)) { throw std::runtime_error(name + " must be C contiguous"); } if (PyArray_NDIM(arr) != 4) { throw std::runtime_error(name + " must be 4-d"); } if (PyArray_TYPE(arr) != NPY_FLOAT32) { throw std::runtime_error(name + " must be float32"); } if (PyArray_DIMS(arr)[1] != channels) { throw std::runtime_error(name + " has wrong number of channels"); } if (PyArray_DIMS(arr)[2] != height) { throw std::runtime_error(name + " has wrong height"); } if (PyArray_DIMS(arr)[3] != width) { throw std::runtime_error(name + " has wrong width"); } }
/* * Conforms an output parameter 'out' to have 'ndim' dimensions * with dimensions of size one added in the appropriate places * indicated by 'axis_flags'. * * The return value is a view into 'out'. */ static PyArrayObject * conform_reduce_result(int ndim, npy_bool *axis_flags, PyArrayObject *out, int keepdims, const char *funcname, int need_copy) { npy_intp strides[NPY_MAXDIMS], shape[NPY_MAXDIMS]; npy_intp *strides_out = PyArray_STRIDES(out); npy_intp *shape_out = PyArray_DIMS(out); int idim, idim_out, ndim_out = PyArray_NDIM(out); PyArray_Descr *dtype; PyArrayObject_fields *ret; /* * If the 'keepdims' parameter is true, do a simpler validation and * return a new reference to 'out'. */ if (keepdims) { if (PyArray_NDIM(out) != ndim) { PyErr_Format(PyExc_ValueError, "output parameter for reduction operation %s " "has the wrong number of dimensions (must match " "the operand's when keepdims=True)", funcname); return NULL; } for (idim = 0; idim < ndim; ++idim) { if (axis_flags[idim]) { if (shape_out[idim] != 1) { PyErr_Format(PyExc_ValueError, "output parameter for reduction operation %s " "has a reduction dimension not equal to one " "(required when keepdims=True)", funcname); return NULL; } } } Py_INCREF(out); return out; } /* Construct the strides and shape */ idim_out = 0; for (idim = 0; idim < ndim; ++idim) { if (axis_flags[idim]) { strides[idim] = 0; shape[idim] = 1; } else { if (idim_out >= ndim_out) { PyErr_Format(PyExc_ValueError, "output parameter for reduction operation %s " "does not have enough dimensions", funcname); return NULL; } strides[idim] = strides_out[idim_out]; shape[idim] = shape_out[idim_out]; ++idim_out; } } if (idim_out != ndim_out) { PyErr_Format(PyExc_ValueError, "output parameter for reduction operation %s " "has too many dimensions", funcname); return NULL; } /* Allocate the view */ dtype = PyArray_DESCR(out); Py_INCREF(dtype); ret = (PyArrayObject_fields *)PyArray_NewFromDescr(&PyArray_Type, dtype, ndim, shape, strides, PyArray_DATA(out), PyArray_FLAGS(out), NULL); if (ret == NULL) { return NULL; } Py_INCREF(out); if (PyArray_SetBaseObject((PyArrayObject *)ret, (PyObject *)out) < 0) { Py_DECREF(ret); return NULL; } if (need_copy) { PyArrayObject *ret_copy; ret_copy = (PyArrayObject *)PyArray_NewLikeArray( (PyArrayObject *)ret, NPY_ANYORDER, NULL, 0); if (ret_copy == NULL) { Py_DECREF(ret); return NULL; } if (PyArray_CopyInto(ret_copy, (PyArrayObject *)ret) != 0) { Py_DECREF(ret); Py_DECREF(ret_copy); return NULL; } Py_INCREF(ret); if (PyArray_SetWritebackIfCopyBase(ret_copy, (PyArrayObject *)ret) < 0) { Py_DECREF(ret); Py_DECREF(ret_copy); return NULL; } return ret_copy; } else { return (PyArrayObject *)ret; } }
/* * digitize(x, bins, right=False) returns an array of integers the same length * as x. The values i returned are such that bins[i - 1] <= x < bins[i] if * bins is monotonically increasing, or bins[i - 1] > x >= bins[i] if bins * is monotonically decreasing. Beyond the bounds of bins, returns either * i = 0 or i = len(bins) as appropriate. If right == True the comparison * is bins [i - 1] < x <= bins[i] or bins [i - 1] >= x > bins[i] */ NPY_NO_EXPORT PyObject * arr_digitize(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) { PyObject *obj_x = NULL; PyObject *obj_bins = NULL; PyArrayObject *arr_x = NULL; PyArrayObject *arr_bins = NULL; PyObject *ret = NULL; npy_intp len_bins; int monotonic, right = 0; NPY_BEGIN_THREADS_DEF static char *kwlist[] = {"x", "bins", "right", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|i", kwlist, &obj_x, &obj_bins, &right)) { goto fail; } /* PyArray_SearchSorted will make `x` contiguous even if we don't */ arr_x = (PyArrayObject *)PyArray_FROMANY(obj_x, NPY_DOUBLE, 0, 0, NPY_ARRAY_CARRAY_RO); if (arr_x == NULL) { goto fail; } /* TODO: `bins` could be strided, needs change to check_array_monotonic */ arr_bins = (PyArrayObject *)PyArray_FROMANY(obj_bins, NPY_DOUBLE, 1, 1, NPY_ARRAY_CARRAY_RO); if (arr_bins == NULL) { goto fail; } len_bins = PyArray_SIZE(arr_bins); if (len_bins == 0) { PyErr_SetString(PyExc_ValueError, "bins must have non-zero length"); goto fail; } NPY_BEGIN_THREADS_THRESHOLDED(len_bins) monotonic = check_array_monotonic((const double *)PyArray_DATA(arr_bins), len_bins); NPY_END_THREADS if (monotonic == 0) { PyErr_SetString(PyExc_ValueError, "bins must be monotonically increasing or decreasing"); goto fail; } /* PyArray_SearchSorted needs an increasing array */ if (monotonic == - 1) { PyArrayObject *arr_tmp = NULL; npy_intp shape = PyArray_DIM(arr_bins, 0); npy_intp stride = -PyArray_STRIDE(arr_bins, 0); void *data = (void *)(PyArray_BYTES(arr_bins) - stride * (shape - 1)); arr_tmp = (PyArrayObject *)PyArray_New(&PyArray_Type, 1, &shape, NPY_DOUBLE, &stride, data, 0, PyArray_FLAGS(arr_bins), NULL); if (!arr_tmp) { goto fail; } if (PyArray_SetBaseObject(arr_tmp, (PyObject *)arr_bins) < 0) { Py_DECREF(arr_tmp); goto fail; } arr_bins = arr_tmp; } ret = PyArray_SearchSorted(arr_bins, (PyObject *)arr_x, right ? NPY_SEARCHLEFT : NPY_SEARCHRIGHT, NULL); if (!ret) { goto fail; } /* If bins is decreasing, ret has bins from end, not start */ if (monotonic == -1) { npy_intp *ret_data = (npy_intp *)PyArray_DATA((PyArrayObject *)ret); npy_intp len_ret = PyArray_SIZE((PyArrayObject *)ret); NPY_BEGIN_THREADS_THRESHOLDED(len_ret) while (len_ret--) { *ret_data = len_bins - *ret_data; ret_data++; } NPY_END_THREADS }
static PyObject * array_slice(PyArrayObject *self, Py_ssize_t ilow, Py_ssize_t ihigh) { PyArrayObject *ret; PyArray_Descr *dtype; Py_ssize_t dim0; char *data; npy_intp shape[NPY_MAXDIMS]; if (PyArray_NDIM(self) == 0) { PyErr_SetString(PyExc_ValueError, "cannot slice a 0-d array"); return NULL; } dim0 = PyArray_DIM(self, 0); if (ilow < 0) { ilow = 0; } else if (ilow > dim0) { ilow = dim0; } if (ihigh < ilow) { ihigh = ilow; } else if (ihigh > dim0) { ihigh = dim0; } data = PyArray_DATA(self); if (ilow < ihigh) { data += ilow * PyArray_STRIDE(self, 0); } /* Same shape except dimension 0 */ shape[0] = ihigh - ilow; memcpy(shape+1, PyArray_DIMS(self) + 1, (PyArray_NDIM(self)-1)*sizeof(npy_intp)); dtype = PyArray_DESCR(self); Py_INCREF(dtype); ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(self), dtype, PyArray_NDIM(self), shape, PyArray_STRIDES(self), data, PyArray_FLAGS(self) & ~(NPY_ARRAY_MASKNA | NPY_ARRAY_OWNMASKNA), (PyObject *)self); if (ret == NULL) { return NULL; } Py_INCREF(self); if (PyArray_SetBaseObject(ret, (PyObject *)self) < 0) { Py_DECREF(ret); return NULL; } PyArray_UpdateFlags(ret, NPY_ARRAY_UPDATE_ALL); /* Also take a view of the NA mask if it exists */ if (PyArray_HASMASKNA(self)) { PyArrayObject_fields *fret = (PyArrayObject_fields *)ret; fret->maskna_dtype = PyArray_MASKNA_DTYPE(self); Py_INCREF(fret->maskna_dtype); data = PyArray_MASKNA_DATA(self); if (ilow < ihigh) { data += ilow * PyArray_MASKNA_STRIDES(self)[0]; } fret->maskna_data = data; memcpy(fret->maskna_strides, PyArray_MASKNA_STRIDES(self), PyArray_NDIM(self) * sizeof(npy_intp)); /* This view doesn't own the mask */ fret->flags |= NPY_ARRAY_MASKNA; fret->flags &= ~NPY_ARRAY_OWNMASKNA; } return (PyObject *)ret; }
/*! @brief load an ANA f0 file data and header @param [in] filename @return [out] data, NULL on failure */ static PyObject *pyana_fzread(PyObject *self, PyObject *args) { // Function arguments char *filename; int debug=0; // Init ANA IO variables char *header = NULL; // ANA header (comments) uint8_t *anaraw = NULL; // Raw data int nd=-1, type=-1, *ds, size=-1, d; // Various properties // Data manipulation PyArrayObject *anadata; // Final ndarray // Parse arguments if (!PyArg_ParseTuple(args, "s|i", &filename, &debug)) { return NULL; } // Read ANA file if (debug == 1) printf("pyana_fzread(): Reading in ANA file\n"); anaraw = ana_fzread(filename, &ds, &nd, &header, &type, &size); if (NULL == anaraw) { PyErr_SetString(PyExc_ValueError, "In pyana_fzread: could not read ana file, data returned is NULL."); return NULL; } if (type == -1) { PyErr_SetString(PyExc_ValueError, "In pyana_fzread: could not read ana file, type invalid."); return NULL; } // Mold into numpy array npy_intp npy_dims[nd]; // Dimensions array int npy_type; // Numpy datatype // Calculate total datasize if (debug == 1) printf("pyana_fzread(): Dimensions: "); for (d=0; d<nd; d++) { if (debug == 1) printf("%d ", ds[d]); // ANA stores dimensions the other way around? //npy_dims[d] = ds[d]; npy_dims[nd-1-d] = ds[d]; } if (debug == 1) printf("\npyana_fzread(): Datasize: %d\n", size); // Convert datatype from ANA type to PyArray type switch (type) { case (INT8): npy_type = PyArray_INT8; break; case (INT16): npy_type = PyArray_INT16; break; case (INT32): npy_type = PyArray_INT32; break; case (FLOAT32): npy_type = PyArray_FLOAT32; break; case (FLOAT64): npy_type = PyArray_FLOAT64; break; case (INT64): npy_type = PyArray_INT64; break; default: PyErr_SetString(PyExc_ValueError, "In pyana_fzread: datatype of ana file unknown/unsupported."); return NULL; } if (debug == 1) printf("pyana_fzread(): Read %d bytes, %d dimensions\n", size, nd); // Create numpy array from the data anadata = (PyArrayObject*) PyArray_SimpleNewFromData(nd, npy_dims, npy_type, (void *) anaraw); // Make sure Python owns the data, so it will free the data after use PyArray_FLAGS(anadata) |= NPY_OWNDATA; if (!PyArray_CHKFLAGS(anadata, NPY_OWNDATA)) { PyErr_SetString(PyExc_RuntimeError, "In pyana_fzread: unable to own the data, will cause memory leak. Aborting"); return NULL; } // Return the data in a dict with some metainfo attached // NB: Use 'N' for PyArrayObject s, because when using 'O' it will create // another reference count such that the memory will never be deallocated. // See: // http://www.mail-archive.com/[email protected]/msg13354.html // ([Numpy-discussion] numpy CAPI questions) return Py_BuildValue("{s:N,s:{s:i,s:(ii),s:s}}", "data", anadata, "header", "size", size, "dims", ds[0], ds[1], "header", header); }
PyObject * reconstruction_coeffs_py (PyObject * self, PyObject * args) { double *x, *c, xi; long int i; int r, k; PyObject *bndry, *coeffs; /* * parse options */ if (!PyArg_ParseTuple (args, "dliiOO", &xi, &i, &r, &k, &bndry, &coeffs)) return NULL; if ((PyArray_FLAGS (bndry) & NPY_IN_ARRAY) != NPY_IN_ARRAY) { PyErr_SetString (PyExc_TypeError, "bndry is not contiguous and/or aligned"); return NULL; } Py_INCREF (bndry); x = (double *) PyArray_DATA (bndry); if ((PyArray_FLAGS (coeffs) & NPY_IN_ARRAY) != NPY_IN_ARRAY) { PyErr_SetString (PyExc_TypeError, "coeffs is not contiguous and/or aligned"); return NULL; } Py_INCREF (coeffs); c = (double *) PyArray_DATA (coeffs); /* * dispatch */ switch (k) { case 3: coeffs003 (xi, i, r, x, c); break; case 4: coeffs004 (xi, i, r, x, c); break; case 5: coeffs005 (xi, i, r, x, c); break; case 6: coeffs006 (xi, i, r, x, c); break; case 7: coeffs007 (xi, i, r, x, c); break; case 8: coeffs008 (xi, i, r, x, c); break; case 9: coeffs009 (xi, i, r, x, c); break; default: return NULL; } /* * done */ Py_INCREF (Py_None); return Py_None; }