static PyObject * array_inplace_right_shift(PyArrayObject *m1, PyObject *m2) { INPLACE_GIVE_UP_IF_NEEDED( m1, m2, nb_inplace_rshift, array_inplace_right_shift); return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.right_shift); }
static PyObject * array_inplace_bitwise_xor(PyArrayObject *m1, PyObject *m2) { INPLACE_GIVE_UP_IF_NEEDED( m1, m2, nb_inplace_xor, array_inplace_bitwise_xor); return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.bitwise_xor); }
static PyObject * array_inplace_remainder(PyArrayObject *m1, PyObject *m2) { INPLACE_GIVE_UP_IF_NEEDED( m1, m2, nb_inplace_remainder, array_inplace_remainder); return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.remainder); }
static PyObject * array_inplace_divide(PyArrayObject *m1, PyObject *m2) { INPLACE_GIVE_UP_IF_NEEDED( m1, m2, nb_inplace_divide, array_inplace_divide); return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.divide); }
static PyObject * array_inplace_multiply(PyArrayObject *m1, PyObject *m2) { INPLACE_GIVE_UP_IF_NEEDED( m1, m2, nb_inplace_multiply, array_inplace_multiply); return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.multiply); }
static PyObject * array_inplace_subtract(PyArrayObject *m1, PyObject *m2) { INPLACE_GIVE_UP_IF_NEEDED( m1, m2, nb_inplace_subtract, array_inplace_subtract); return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.subtract); }
static PyObject * array_inplace_true_divide(PyArrayObject *m1, PyObject *m2) { GIVE_UP_IF_HAS_RIGHT_BINOP(m1, m2, "__itruediv__", "__rtruediv__", 1, nb_inplace_true_divide); return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.true_divide); }
/* optimize float array or complex array to a scalar power */ static PyObject * fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace) { double exp; if (PyArray_Check(a1) && array_power_is_scalar(o2, &exp)) { PyObject *fastop = NULL; if (PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1)) { if (exp == 1.0) { /* we have to do this one special, as the "copy" method of array objects isn't set up early enough to be added by PyArray_SetNumericOps. */ if (inplace) { Py_INCREF(a1); return (PyObject *)a1; } else { return PyArray_Copy(a1); } } else if (exp == -1.0) { fastop = n_ops.reciprocal; } else if (exp == 0.0) { fastop = n_ops.ones_like; } else if (exp == 0.5) { fastop = n_ops.sqrt; } else if (exp == 2.0) { fastop = n_ops.square; } else { return NULL; } if (inplace) { return PyArray_GenericInplaceUnaryFunction(a1, fastop); } else { return PyArray_GenericUnaryFunction(a1, fastop); } } else if (exp==2.0) { fastop = n_ops.multiply; if (inplace) { return PyArray_GenericInplaceBinaryFunction (a1, (PyObject *)a1, fastop); } else { return PyArray_GenericBinaryFunction (a1, (PyObject *)a1, fastop); } } } return NULL; }
static PyObject * array_inplace_power(PyArrayObject *a1, PyObject *o2, PyObject *NPY_UNUSED(modulo)) { /* modulo is ignored! */ PyObject *value; value = fast_scalar_power(a1, o2, 1); if (!value) { value = PyArray_GenericInplaceBinaryFunction(a1, o2, n_ops.power); } return value; }
static PyObject * array_inplace_power(PyArrayObject *a1, PyObject *o2, PyObject *NPY_UNUSED(modulo)) { /* modulo is ignored! */ PyObject *value; GIVE_UP_IF_HAS_RIGHT_BINOP(a1, o2, "__ipow__", "__rpow__", 1); value = fast_scalar_power(a1, o2, 1); if (!value) { value = PyArray_GenericInplaceBinaryFunction(a1, o2, n_ops.power); } return value; }
static PyObject * array_inplace_power(PyArrayObject *a1, PyObject *o2, PyObject *NPY_UNUSED(modulo)) { /* modulo is ignored! */ PyObject *value = NULL; INPLACE_GIVE_UP_IF_NEEDED( a1, o2, nb_inplace_power, array_inplace_power); if (fast_scalar_power(a1, o2, 1, &value) != 0) { value = PyArray_GenericInplaceBinaryFunction(a1, o2, n_ops.power); } return value; }
static PyObject * array_inplace_bitwise_xor(PyArrayObject *m1, PyObject *m2) { GIVE_UP_IF_HAS_RIGHT_BINOP(m1, m2, "__ixor__", "__rxor__", 1); return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.bitwise_xor); }
static PyObject * array_inplace_right_shift(PyArrayObject *m1, PyObject *m2) { GIVE_UP_IF_HAS_RIGHT_BINOP(m1, m2, "__irshift__", "__rrshift__", 1); return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.right_shift); }
static PyObject * array_inplace_remainder(PyArrayObject *m1, PyObject *m2) { GIVE_UP_IF_HAS_RIGHT_BINOP(m1, m2, "__imod__", "__rmod__", 1); return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.remainder); }
static PyObject * array_inplace_subtract(PyArrayObject *m1, PyObject *m2) { GIVE_UP_IF_HAS_RIGHT_BINOP(m1, m2, "__isub__", "__rsub__", 1); return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.subtract); }
static PyObject * array_inplace_multiply(PyArrayObject *m1, PyObject *m2) { GIVE_UP_IF_HAS_RIGHT_BINOP(m1, m2, "__imul__", "__rmul__", 1); return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.multiply); }
/* optimize float array or complex array to a scalar power */ static PyObject * fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace) { double exponent; NPY_SCALARKIND kind; /* NPY_NOSCALAR is not scalar */ if (PyArray_Check(a1) && ((kind=is_scalar_with_conversion(o2, &exponent))>0)) { PyObject *fastop = NULL; if (PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1)) { if (exponent == 1.0) { /* we have to do this one special, as the "copy" method of array objects isn't set up early enough to be added by PyArray_SetNumericOps. */ if (inplace) { Py_INCREF(a1); return (PyObject *)a1; } else { return PyArray_Copy(a1); } } else if (exponent == -1.0) { fastop = n_ops.reciprocal; } else if (exponent == 0.0) { fastop = n_ops._ones_like; } else if (exponent == 0.5) { fastop = n_ops.sqrt; } else if (exponent == 2.0) { fastop = n_ops.square; } else { return NULL; } if (inplace) { return PyArray_GenericInplaceUnaryFunction(a1, fastop); } else { return PyArray_GenericUnaryFunction(a1, fastop); } } /* Because this is called with all arrays, we need to * change the output if the kind of the scalar is different * than that of the input and inplace is not on --- * (thus, the input should be up-cast) */ else if (exponent == 2.0) { fastop = n_ops.multiply; if (inplace) { return PyArray_GenericInplaceBinaryFunction (a1, (PyObject *)a1, fastop); } else { PyArray_Descr *dtype = NULL; PyObject *res; /* We only special-case the FLOAT_SCALAR and integer types */ if (kind == NPY_FLOAT_SCALAR && PyArray_ISINTEGER(a1)) { dtype = PyArray_DescrFromType(NPY_DOUBLE); a1 = (PyArrayObject *)PyArray_CastToType(a1, dtype, PyArray_ISFORTRAN(a1)); if (a1 == NULL) { return NULL; } } else { Py_INCREF(a1); } res = PyArray_GenericBinaryFunction(a1, (PyObject *)a1, fastop); Py_DECREF(a1); return res; } } } return NULL; }
static PyObject * array_inplace_add(PyArrayObject *m1, PyObject *m2) { GIVE_UP_IF_HAS_RIGHT_BINOP(m1, m2, "__iadd__", "__radd__", 1); return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.add); }
static PyObject * array_inplace_bitwise_xor(PyArrayObject *m1, PyObject *m2) { return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.bitwise_xor); }
static PyObject * array_inplace_true_divide(PyArrayObject *m1, PyObject *m2) { return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.true_divide); }
static PyObject * array_inplace_right_shift(PyArrayObject *m1, PyObject *m2) { return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.right_shift); }
static PyObject * array_inplace_add(PyArrayObject *m1, PyObject *m2) { return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.add); }
static PyObject * array_inplace_remainder(PyArrayObject *m1, PyObject *m2) { return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.remainder); }
static PyObject * array_inplace_multiply(PyArrayObject *m1, PyObject *m2) { return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.multiply); }
static PyObject * array_inplace_subtract(PyArrayObject *m1, PyObject *m2) { return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.subtract); }