static PyObject * GMPy_MPZ_FloorDiv_Slot(PyObject *x, PyObject *y) { if (CHECK_MPZANY(x) && CHECK_MPZANY(y)) { MPZ_Object *result; if (mpz_sgn(MPZ(y)) == 0) { ZERO_ERROR("division or modulo by zero"); return NULL; } if ((result = GMPy_MPZ_New(NULL))) { mpz_fdiv_q(result->z, MPZ(x), MPZ(y)); } return (PyObject*)result; } if (IS_INTEGER(x) && IS_INTEGER(y)) return GMPy_Integer_FloorDiv(x, y, NULL); if (IS_RATIONAL(x) && IS_RATIONAL(y)) return GMPy_Rational_FloorDiv(x, y, NULL); if (IS_REAL(x) && IS_REAL(y)) return GMPy_Real_FloorDiv(x, y, NULL); if (IS_COMPLEX(x) && IS_COMPLEX(y)) return GMPy_Complex_FloorDiv(x, y, NULL); Py_RETURN_NOTIMPLEMENTED; }
static PyObject * GMPy_MPZ_Add_Slot(PyObject *x, PyObject *y) { if (CHECK_MPZANY(x) && CHECK_MPZANY(y)) { MPZ_Object *result; if ((result = GMPy_MPZ_New(NULL))) { mpz_add(result->z, MPZ(x), MPZ(y)); } return (PyObject*)result; } if (IS_INTEGER(x) && IS_INTEGER(y)) return GMPy_Integer_Add(x, y, NULL); if (IS_RATIONAL(x) && IS_RATIONAL(y)) return GMPy_Rational_Add(x, y, NULL); if (IS_REAL(x) && IS_REAL(y)) return GMPy_Real_Add(x, y, NULL); if (IS_COMPLEX(x) && IS_COMPLEX(y)) return GMPy_Complex_Add(x, y, NULL); Py_RETURN_NOTIMPLEMENTED; }
static PyObject * GMPy_MPZ_Mul_Slot(PyObject *x, PyObject *y) { if (MPZ_Check(x) && MPZ_Check(y)) { MPZ_Object *result = NULL; if ((result = GMPy_MPZ_New(NULL))) { mpz_mul(result->z, MPZ(x), MPZ(y)); } return (PyObject*)result; } if (IS_INTEGER(x) && IS_INTEGER(y)) return GMPy_Integer_Mul(x, y, NULL); if (IS_RATIONAL(x) && IS_RATIONAL(y)) return GMPy_Rational_Mul(x, y, NULL); if (IS_REAL(x) && IS_REAL(y)) return GMPy_Real_Mul(x, y, NULL); if (IS_COMPLEX(x) && IS_COMPLEX(y)) return GMPy_Complex_Mul(x, y, NULL); Py_RETURN_NOTIMPLEMENTED; }
static PyObject * GMPy_MPFR_Add_Slot(PyObject *x, PyObject *y) { if (MPFR_Check(x) && MPFR_Check(y)) { MPFR_Object *result; CTXT_Object *context = NULL; CHECK_CONTEXT(context); if ((result = GMPy_MPFR_New(0, context))) { mpfr_clear_flags(); SET_MPFR_MPFR_WAS_NAN(context, x, y); result->rc = mpfr_add(result->f, MPFR(x), MPFR(y), GET_MPFR_ROUND(context)); _GMPy_MPFR_Cleanup(&result, context); } return (PyObject*)result; } if (IS_REAL(x) && IS_REAL(y)) return GMPy_Real_Add(x, y, NULL); if (IS_COMPLEX(x) && IS_COMPLEX(y)) return GMPy_Complex_Add(x, y, NULL); Py_RETURN_NOTIMPLEMENTED; }
static PyObject * GMPy_MPC_Mul_Slot(PyObject *x, PyObject *y) { if (IS_COMPLEX(x) && IS_COMPLEX(y)) return GMPy_Complex_Mul(x, y, NULL); Py_RETURN_NOTIMPLEMENTED; }
static PyObject * GMPy_MPFR_Add_Slot(PyObject *x, PyObject *y) { if (IS_REAL(x) && IS_REAL(y)) return GMPy_Real_Add(x, y, NULL); if (IS_COMPLEX(x) && IS_COMPLEX(y)) return GMPy_Complex_Add(x, y, NULL); Py_RETURN_NOTIMPLEMENTED; }
int cksiz(QSP_ARG_DECL int argtyp,Data_Obj *src_dp,Data_Obj *dst_dp) { int i; /* allow exception for vmaxg, etc. * These functions return two scalars, the max value, * and number of occurrences, PLUS a vector with the * indices of the occurrences. * * But where do we check them? Do we need to do any checking for them? */ if( argtyp == V_SCALRET2 ) return(0); for(i=0;i<N_DIMENSIONS;i++){ if( OBJ_TYPE_DIM(src_dp,i) != OBJ_TYPE_DIM(dst_dp,i) ){ /* special case for real/cpx fft */ if( i==1 ){ if( (argtyp & FWD_FT) && IS_REAL(src_dp) && IS_COMPLEX(dst_dp) ){ if( OBJ_COLS(dst_dp) == (1+OBJ_COLS(src_dp)/2) ) continue; } else if( (argtyp & INV_FT) && IS_COMPLEX(src_dp) && IS_REAL(dst_dp) ){ if( OBJ_COLS(src_dp) == (1+OBJ_COLS(dst_dp)/2) ) continue; } } /* if we get to here, the dimensions don't match... */ /* if the source dimension is 1, it may be an outer op */ /* if the destination dimension is 1, it may be a projection op */ if( OBJ_TYPE_DIM(src_dp,i) == 1 /* && (argtyp&VV_SOURCES) == VV_SOURCES */ ){ /* vmul, vadd, vsub, vatan2 */ /* vvm_gt etc also */ /* don't need VV_SOURCES... */ continue; } else if( OBJ_TYPE_DIM(dst_dp,i) == 1 && CAN_PROJECT(argtyp) ){ /* vsum, vmaxv, vmainv, etc */ continue; } else { /* if we get to here, we're not happy... */ sprintf(ERROR_STRING, "cksiz: %s count mismatch, %s (%d) & %s (%d)", dimension_name[i], OBJ_NAME(src_dp),OBJ_TYPE_DIM(src_dp,i), OBJ_NAME(dst_dp),OBJ_TYPE_DIM(dst_dp,i)); WARN(ERROR_STRING); return(-1); } } } return(0); } /* end cksiz() */
static PyObject * GMPy_MPQ_Mod_Slot(PyObject *x, PyObject *y) { if (IS_RATIONAL(x) && IS_RATIONAL(y)) return GMPy_Rational_Mod(x, y, NULL); if (IS_REAL(x) && IS_REAL(y)) return GMPy_Real_Mod(x, y, NULL); if (IS_COMPLEX(x) && IS_COMPLEX(y)) return GMPy_Complex_Mod(x, y, NULL); Py_RETURN_NOTIMPLEMENTED; }
static PyObject * GMPy_Complex_Add(PyObject *x, PyObject *y, CTXT_Object *context) { MPC_Object *result = NULL; CHECK_CONTEXT(context); if (!(result = GMPy_MPC_New(0, 0, context))) { /* LCOV_EXCL_START */ return NULL; /* LCOV_EXCL_STOP */ } if (MPC_Check(x) && MPC_Check(y)) { SET_MPC_MPC_WAS_NAN(context, x, y); result->rc = mpc_add(result->c, MPC(x), MPC(y), GET_MPC_ROUND(context)); _GMPy_MPC_Cleanup(&result, context); return (PyObject*)result; } if (IS_COMPLEX(x) && IS_COMPLEX(y)) { MPC_Object *tempx = NULL, *tempy = NULL; if (!(tempx = GMPy_MPC_From_Complex(x, 1, 1, context)) || !(tempy = GMPy_MPC_From_Complex(y, 1, 1, context))) { /* LCOV_EXCL_START */ Py_XDECREF((PyObject*)tempx); Py_XDECREF((PyObject*)tempy); Py_DECREF((PyObject*)result); return NULL; /* LCOV_EXCL_STOP */ } SET_MPC_MPC_WAS_NAN(context, tempx, tempy); result->rc = mpc_add(result->c, tempx->c, tempy->c, GET_MPC_ROUND(context)); Py_DECREF((PyObject*)tempx); Py_DECREF((PyObject*)tempy); _GMPy_MPC_Cleanup(&result, context); return (PyObject*)result; } /* LCOV_EXCL_START */ Py_DECREF((PyObject*)result); SYSTEM_ERROR("Internal error in GMPy_Complex_Add()."); return NULL; /* LCOV_EXCL_STOP */ }
static int _real_cpx_objs_ok( QSP_ARG_DECL Data_Obj *real_dp,Data_Obj *cpx_dp, const char *funcname ) { if( ! IS_COMPLEX(cpx_dp) ){ sprintf(ERROR_STRING, "%s: %s must be complex",funcname,OBJ_NAME(cpx_dp)); WARN(ERROR_STRING); return FALSE; } if( ! IS_REAL(real_dp) ){ sprintf(ERROR_STRING, "%s: %s must be real",funcname,OBJ_NAME(real_dp)); WARN(ERROR_STRING); return FALSE; } if( ! FLOATING_OBJ( cpx_dp ) ){ sprintf(ERROR_STRING, "%s: precision must be float or double",funcname); WARN(ERROR_STRING); return FALSE; } if( !dp_same_mach_prec(cpx_dp,real_dp,funcname) ){ sprintf(ERROR_STRING, "%s: complex object (%s,%s) and target (%s,%s) must have same precision", funcname,OBJ_NAME(cpx_dp),OBJ_MACH_PREC_NAME(cpx_dp), OBJ_NAME(real_dp),OBJ_MACH_PREC_NAME(real_dp)); WARN(ERROR_STRING); return FALSE; } return TRUE; }
static PyObject * GMPy_MPANY_Pow_Slot(PyObject *base, PyObject *exp, PyObject *mod) { if (IS_INTEGER(base) && IS_INTEGER(exp)) return GMPy_Integer_Pow(base, exp, mod, NULL); if (IS_RATIONAL(base) && IS_RATIONAL(exp)) return GMPy_Rational_Pow(base, exp, mod, NULL); if (IS_REAL(base) && IS_REAL(exp)) return GMPy_Real_Pow(base, exp, mod, NULL); if (IS_COMPLEX(base) && IS_COMPLEX(exp)) return GMPy_Complex_Pow(base, exp, mod, NULL); Py_RETURN_NOTIMPLEMENTED; }
static PyObject * GMPy_Number_Add(PyObject *x, PyObject *y, CTXT_Object *context) { if (IS_INTEGER(x) && IS_INTEGER(y)) return GMPy_Integer_Add(x, y, context); if (IS_RATIONAL(x) && IS_RATIONAL(y)) return GMPy_Rational_Add(x, y, context); if (IS_REAL(x) && IS_REAL(y)) return GMPy_Real_Add(x, y, context); if (IS_COMPLEX(x) && IS_COMPLEX(y)) return GMPy_Complex_Add(x, y, context); TYPE_ERROR("add() argument type not supported"); return NULL; }
int realp(int x){ if(IS_INTEGER(x) || (IS_FLOAT(x)) || IS_RATIONAL(x) || IS_BIGNUM(x)) return(1); else if(IS_COMPLEX(x) && (GET_IMAG_FLT(x) == 0)) return(1); else return(0); }
static PyObject * GMPy_Context_Digits(PyObject *self, PyObject *args) { PyObject *arg0, *tuple, *temp, *result; Py_ssize_t argc; argc = PyTuple_GET_SIZE(args); if (argc == 0) { TYPE_ERROR("digits() requires at least one argument"); return NULL; } if (argc > 3) { TYPE_ERROR("digits() accepts at most three arguments"); return NULL; } arg0 = PyTuple_GET_ITEM(args, 0); if (!(tuple = PyTuple_GetSlice(args, 1, argc))) { return NULL; } if (IS_INTEGER(arg0)) { temp = (PyObject*)GMPy_MPZ_From_Integer(arg0, NULL); result = GMPy_MPZ_Digits_Method(temp, tuple); Py_DECREF(temp); Py_DECREF(tuple); return result; } if (IS_RATIONAL(arg0)) { temp = (PyObject*)GMPy_MPQ_From_Rational(arg0, NULL); result = GMPy_MPQ_Digits_Method(temp, tuple); Py_DECREF(temp); Py_DECREF(tuple); return result; } if (IS_REAL(arg0)) { temp = (PyObject*)GMPy_MPFR_From_Real(arg0, 1, NULL); result = GMPy_MPFR_Digits_Method(temp, tuple); Py_DECREF(temp); Py_DECREF(tuple); return result; } if (IS_COMPLEX(arg0)) { temp = (PyObject*)GMPy_MPC_From_Complex(arg0, 1, 1, NULL); result = GMPy_MPC_Digits_Method(temp, tuple); Py_DECREF(temp); Py_DECREF(tuple); return result; } TYPE_ERROR("digits() argument type not supported"); return NULL; }
int prodimg(QSP_ARG_DECL Data_Obj *dpto,Data_Obj *rowobj,Data_Obj *colobj) /** make the product image */ { Vec_Obj_Args oa1, *oap=&oa1; if( OBJ_COLS(rowobj) != OBJ_COLS(dpto) ){ sprintf(DEFAULT_ERROR_STRING, "prodimg: row size mismatch, target %s (%d) and row %s (%d)", OBJ_NAME(dpto),OBJ_COLS(dpto),OBJ_NAME(rowobj), OBJ_COLS(rowobj)); NWARN(DEFAULT_ERROR_STRING); return(-1); } else if( OBJ_ROWS(colobj) != OBJ_ROWS(dpto) ){ sprintf(DEFAULT_ERROR_STRING, "prodimg: column size mismatch, target %s (%d) and column %s (%d)", OBJ_NAME(dpto),OBJ_ROWS(dpto),OBJ_NAME(colobj), OBJ_ROWS(colobj)); NWARN(DEFAULT_ERROR_STRING); return(-1); } else if( !same_pixel_type(QSP_ARG dpto,rowobj) ){ NWARN("type/precision mismatch"); return(-1); } else if( !same_pixel_type(QSP_ARG dpto,colobj) ){ NWARN("type precision mismatch"); return(-1); } #ifdef FOOBAR else if( ! FLOATING_OBJ(dpto) ){ NWARN("sorry, only float and double supported for prodimg"); return(-1); } else if( IS_COMPLEX(dpto) || IS_COMPLEX(colobj) || IS_COMPLEX(rowobj) ){ NWARN("Sorry, complex not supported"); return(-1); } #endif /* FOOBAR */ setvarg3(oap,dpto,rowobj,colobj); vmul(QSP_ARG oap); return(0); }
static PyObject * GMPy_Number_Mul_2exp(PyObject *x, PyObject *y, CTXT_Object *context) { if (IS_REAL(x)) return GMPy_Real_Mul_2exp(x, y, context); if (IS_COMPLEX(x)) return GMPy_Complex_Mul_2exp(x, y, context); TYPE_ERROR("mul_2exp() argument type not supported"); return NULL; }
static PyObject * GMPy_Complex_Sub(PyObject *x, PyObject *y, CTXT_Object *context) { MPC_Object *result = NULL; CHECK_CONTEXT(context); if (!(result = GMPy_MPC_New(0, 0, context))) return NULL; if (MPC_Check(x) && MPC_Check(y)) { result->rc = mpc_sub(result->c, MPC(x), MPC(y), GET_MPC_ROUND(context)); goto done; } if (IS_COMPLEX(x) && IS_COMPLEX(y)) { MPC_Object *tempx, *tempy; tempx = GMPy_MPC_From_Complex(x, 1, 1, context); tempy = GMPy_MPC_From_Complex(y, 1, 1, context); if (!tempx || !tempy) { Py_XDECREF((PyObject*)tempx); Py_XDECREF((PyObject*)tempy); Py_DECREF((PyObject*)result); return NULL; } result->rc = mpc_sub(result->c, tempx->c, tempy->c, GET_MPC_ROUND(context)); Py_DECREF((PyObject*)tempx); Py_DECREF((PyObject*)tempy); goto done; } Py_DECREF((PyObject*)result); Py_RETURN_NOTIMPLEMENTED; done: GMPY_MPC_CLEANUP(result, context, "subtraction"); return (PyObject*)result; }
int cpx_fft_ok(QSP_ARG_DECL Data_Obj *dp, const char *funcname ) { if( ! IS_COMPLEX(dp) ){ sprintf(ERROR_STRING, "%s: image %s is not complex",funcname,OBJ_NAME(dp)); WARN(ERROR_STRING); return FALSE; } if( fft_size_ok(dp, funcname ) < 0 ) return FALSE; return TRUE; }
void inner(QSP_ARG_DECL Data_Obj *dpto,Data_Obj *dpfr1,Data_Obj *dpfr2) { //dimension_t _n; /* dot prod len */ dimension_t i,j; Vec_Obj_Args oa1, *oap=&oa1; //Dimension_Set sizes={{1,1,1,1,1}}; Dimension_Set *sizes; index_t dst_indices[N_DIMENSIONS]={0,0,0,0,0}; index_t src1_indices[N_DIMENSIONS]={0,0,0,0,0}; index_t src2_indices[N_DIMENSIONS]={0,0,0,0,0}; Data_Obj *col_dp; sizes=NEW_DIMSET; for(i=0;i<N_DIMENSIONS;i++) SET_DIMENSION(sizes,i,1); #ifdef CAUTIOUS clear_obj_args(oap); #endif /* CAUTIOUS */ /* The types and precisions should be whatever is allowed by vdot, * which is float, double, real and complex... */ if( ! is_good_for_inner(dpto,"inner") ) return; if( ! is_good_for_inner(dpfr1,"inner") ) return; if( ! is_good_for_inner(dpfr2,"inner") ) return; /* we need to make sure that the types and precisions MATCH! */ if( ! prec_and_type_match(dpto,dpfr1,"inner") ) return; if( ! prec_and_type_match(dpto,dpfr2,"inner") ) return; if( OBJ_ROWS(dpto) != OBJ_ROWS(dpfr1) ){ sprintf(DEFAULT_ERROR_STRING, "inner: dpto %s (%d) and first operand %s (%d) must have same # rows", OBJ_NAME(dpto),OBJ_ROWS(dpto),OBJ_NAME(dpfr1),OBJ_ROWS(dpfr1)); NWARN(DEFAULT_ERROR_STRING); return; } if( OBJ_COLS(dpto) != OBJ_COLS(dpfr2) ){ sprintf(DEFAULT_ERROR_STRING, "inner: target %s (%d) and second operand %s (%d) must have same # columns", OBJ_NAME(dpto),OBJ_COLS(dpto),OBJ_NAME(dpfr2),OBJ_COLS(dpfr2)); NWARN(DEFAULT_ERROR_STRING); return; } if( OBJ_COLS(dpfr1) != OBJ_ROWS(dpfr2) ){ sprintf(DEFAULT_ERROR_STRING, "inner: # cols of operand %s (%d) must match # rows of operand %s (%d)", OBJ_NAME(dpfr1),OBJ_COLS(dpfr1),OBJ_NAME(dpfr2),OBJ_ROWS(dpfr2)); NWARN(DEFAULT_ERROR_STRING); return; } //_n=OBJ_COLS(dpfr1); /* the length of each dot product we will compute */ if( IS_COMPLEX(dpto) ) SET_OA_ARGSTYPE(oap,COMPLEX_ARGS); else SET_OA_ARGSTYPE(oap,REAL_ARGS); /* vdot things it's inputs have the same shape, so if we are taking the inner * product of a column vector with a row vector, we have to transpose one of * the inputs... */ if( OBJ_ROWS(dpfr1) > 1 ) SET_OA_SRC1(oap,d_subscript(QSP_ARG dpfr1,0) ); /* subscript first row */ else SET_OA_SRC1(oap,dpfr1); /* object is a row */ if( OBJ_COLS(dpto) > 1 ) col_dp=c_subscript(QSP_ARG dpfr2,0); else col_dp=dpfr2; SET_OA_DEST(oap,mk_subimg(QSP_ARG dpto,0,0,"target pixel",1,1) ); //[sizes setDimensionAtIndex : 1 withValue : OBJ_ROWS(col_dp) ]; SET_DIMENSION(sizes,1,OBJ_ROWS(col_dp)); SET_DIMENSION(sizes,0,OBJ_COMPS(col_dp)); SET_OA_SRC2(oap,make_equivalence(QSP_ARG "_transposed_column", col_dp,sizes,OBJ_PREC_PTR(col_dp)) ); for(i=0;i<OBJ_ROWS(dpto);i++){ src1_indices[2]=i; SET_OBJ_DATA_PTR( OA_SRC1(oap), multiply_indexed_data(dpfr1,src1_indices) ); for(j=0;j<OBJ_COLS(dpto);j++){ dst_indices[2]=i; /* k_th component */ dst_indices[1]=j; /* k_th component */ SET_OBJ_DATA_PTR( OA_DEST(oap), multiply_indexed_data(dpto,dst_indices) ); src2_indices[1]=j; SET_OBJ_DATA_PTR( OA_SRC2(oap), multiply_indexed_data(dpfr2,src2_indices) ); vdot(QSP_ARG oap); } } delvec(QSP_ARG OA_SRC2(oap) ); /* "_transposed_column" */ if( OA_SRC1(oap) != dpfr1 ) delvec(QSP_ARG OA_SRC1(oap) ); if( col_dp != dpfr2 ) delvec(QSP_ARG col_dp); delvec(QSP_ARG OA_DEST(oap) ); }
int numberp(int x){ if(IS_INTEGER(x) || (IS_FLOAT(x)) || IS_RATIONAL(x) || IS_BIGNUM(x) ||(IS_COMPLEX(x))) return(1); else return(0); }
static void _ocl_offset_data(QSP_ARG_DECL Data_Obj *dp, index_t offset) { #ifndef USE_OPENCL_SUBREGION /* The original code used subBuffers, but overlapping subregions * don't work... * So instead we use a common memory buffer, but keep track * of the starting offset (in elements). This offset has * to be passed to the kernels. */ //fprintf(stderr,"ocl_offset_data: obj %s, offset = %d\n",OBJ_NAME(dp),offset); //fprintf(stderr,"\tparent obj %s, parent offset = %d\n",OBJ_NAME(OBJ_PARENT(dp)), //OBJ_OFFSET(OBJ_PARENT(dp))); if( IS_COMPLEX(dp) ){ assert( (offset & 1) == 0 ); offset /= 2; //fprintf(stderr,"Adjusted offset (%d) for complex object %s\n",offset,OBJ_NAME(dp)); } else if( IS_QUAT(dp) ){ assert( (offset & 3) == 0 ); offset /= 4; } SET_OBJ_DATA_PTR(dp,OBJ_DATA_PTR(OBJ_PARENT(dp))); SET_OBJ_OFFSET( dp, OBJ_OFFSET(OBJ_PARENT(dp)) + offset ); #else // USE_OPENCL_SUBREGION cl_mem buf; cl_mem parent_buf; cl_buffer_region reg; cl_int status; int extra_offset; parent_buf = find_parent_buf(OBJ_PARENT(dp),&extra_offset); assert( parent_buf != NULL ); reg.origin = (offset+extra_offset) * ELEMENT_SIZE(dp); // No - the region has to be big enough for all of the elements. // The safest thing is to include everything from the start // of the subregion to the end of the parent. Note that this // cannot handle negative increments!? // reg.size = OBJ_N_MACH_ELTS(dp) * ELEMENT_SIZE(dp); // p p p p p p p // p p c c c p p // p p p p p p p // p p c c c p p reg.size = OBJ_SEQ_INC(dp)*(OBJ_SEQS(dp)-1) + OBJ_FRM_INC(dp)*(OBJ_FRAMES(dp)-1) + OBJ_ROW_INC(dp)*(OBJ_ROWS(dp)-1) + OBJ_PXL_INC(dp)*(OBJ_COLS(dp)-1) + OBJ_COMP_INC(dp)*(OBJ_COMPS(dp)-1) + 1; reg.size *= ELEMENT_SIZE(dp); //fprintf(stderr,"requesting subregion of %ld bytes at offset %ld\n", //reg.size,reg.origin); buf = clCreateSubBuffer ( parent_buf, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, ®, &status); if( status != CL_SUCCESS ){ report_ocl_error(status, "clCreateSubBuffer"); SET_OBJ_DATA_PTR(dp,OBJ_DATA_PTR(OBJ_PARENT(dp))); } else { SET_OBJ_DATA_PTR(dp,buf); } // BUG - Because this object doesn't "own" the data, the sub-buffer // won't be released when the object is destroyed, a possible memory // leak... // We need to add a special case, or make data releasing a // platform-specific function... #endif // USE_OPENCL_SUBREGION }
static int GMPy_isComplex(PyObject *obj) { return IS_COMPLEX(obj) ? 1 : 0; }
void corr_matrix(Data_Obj *dpto,Data_Obj *dpfr) { int had_err=0; float *op1, *op2; float *dest, *dest2; dimension_t i,j; Vec_Args args; if( ! is_real(dpto,"corr_matrix") ) return; if( ! is_real(dpfr,"corr_matrix") ) return; if( OBJ_COLS(dpto) != OBJ_ROWS(dpto) ){ sprintf(ERROR_STRING,"target matrix %s (%dx%d) must be square",OBJ_NAME(dpto), OBJ_ROWS(dpto),OBJ_COLS(dpto)); WARN(ERROR_STRING); had_err++; } if( OBJ_COLS(dpto) != OBJ_ROWS(dpfr) ){ sprintf(ERROR_STRING, "target matrix %s size %d not equal to source matrix %s rows (%d)", OBJ_NAME(dpto),OBJ_COLS(dpto),OBJ_NAME(dpfr),OBJ_ROWS(dpfr)); WARN(ERROR_STRING); had_err++; } if( had_err ) return; if( IS_COMPLEX(dpto) ) args.arg_argstype = COMPLEX_ARGS; else args.arg_argstype = REAL_ARGS; args.arg_inc1 = OBJ_PXL_INC(dpfr); args.arg_inc2 = OBJ_PXL_INC(dpfr); args.arg_n1 = OBJ_COLS(dpfr); args.arg_n2 = OBJ_COLS(dpfr); args.arg_prec1 = OBJ_PREC(dpfr); args.arg_prec2 = OBJ_PREC(dpfr); op1 = OBJ_DATA_PTR(dpfr); for(i=0;i<OBJ_ROWS(dpfr);i++){ dest = dest2 = OBJ_DATA_PTR(dpto); dest += i*OBJ_ROW_INC(dpto); dest += i*OBJ_PXL_INC(dpto); dest2 += i*OBJ_PXL_INC(dpto); dest2 += i*OBJ_ROW_INC(dpto); op2 = OBJ_DATA_PTR(dpfr); op2 += i*OBJ_ROW_INC(dpfr); for(j=i;j<OBJ_ROWS(dpfr);j++){ args.arg_v1 = op1; args.arg_v2 = op2; vdot(&args); *dest2 = *dest; /* symmetric matrix */ op2 += OBJ_ROW_INC(dpfr); dest += OBJ_PXL_INC(dpto); dest2 += OBJ_ROW_INC(dpto); } op1 += OBJ_ROW_INC(dpfr); } } /* end corr_matrix() */
int complexp(int x){ if(IS_COMPLEX(x)) return(1); else return(0); }
bool is_complex (Value* self) { return IS_COMPLEX(self); }
static Data_Obj *insure_ram_obj(QSP_ARG_DECL Data_Obj *dp) { Data_Obj *tmp_dp; char *tname; Data_Area *save_ap; Data_Obj *c_dp=NULL; if( OBJ_IS_RAM(dp) ) return dp; // This object lives on a different platform. // We create a copy in RAM, and download the data // using the platform download function. save_ap = curr_ap; curr_ap = ram_area_p; tname = getbuf( strlen(OBJ_NAME(dp)) + strlen(DNAME_PREFIX) + 1 ); sprintf(tname,"%s%s",DNAME_PREFIX,OBJ_NAME(dp)); tmp_dp = dup_obj(QSP_ARG dp, tname); givbuf(tname); if( tmp_dp == NO_OBJ ){ // This can happen if the object is subscripted, // as the bracket characters are illegal in names return NO_OBJ; } curr_ap = save_ap; // We can't download if the source data is not contiguous... // // We have a problem with bit precision, because the bits can // be non-contiguous when the long words are - any time the number of columns // is not evenly divided by the bits-per-word if( (! IS_CONTIGUOUS(dp)) && ! HAS_CONTIGUOUS_DATA(dp) ){ Vec_Obj_Args oa1, *oap=&oa1; advise("object is not contiguous, and does not have contiguous data..."); longlist(QSP_ARG dp); save_ap = curr_ap; curr_ap = OBJ_AREA( dp ); tname = getbuf( strlen(OBJ_NAME(dp)) + strlen(CNAME_PREFIX) + 1 ); sprintf(tname,"%s%s",CNAME_PREFIX,OBJ_NAME(dp)); c_dp = dup_obj(QSP_ARG dp, tname ); givbuf(tname); curr_ap = save_ap; // Now do the move... setvarg2(oap,c_dp,dp); if( IS_BITMAP(dp) ){ SET_OA_SBM(oap,dp); SET_OA_SRC1(oap,NO_OBJ); } if( IS_REAL(dp) ) /* BUG case for QUAT too? */ OA_ARGSTYPE(oap) = REAL_ARGS; else if( IS_COMPLEX(dp) ) /* BUG case for QUAT too? */ OA_ARGSTYPE(oap) = COMPLEX_ARGS; else if( IS_QUAT(dp) ) /* BUG case for QUAT too? */ OA_ARGSTYPE(oap) = QUATERNION_ARGS; else //ERROR1("CAUTIOUS: insure_ram_obj: bad argset type!?"); assert( AERROR("insure_ram_obj: bad argset type!?") ); //fprintf(stderr,"insure_ram_obj: moving remote data to a contiguous object\n"); call_vfunc( QSP_ARG FIND_VEC_FUNC(FVMOV), oap ); //fprintf(stderr,"insure_ram_obj: DONE moving remote data to a contiguous object\n"); dp = c_dp; } gen_obj_dnload(QSP_ARG tmp_dp, dp); if( c_dp != NO_OBJ ) delvec(QSP_ARG c_dp); // BUG - when to delete? // We try using the VOLATILE flag. This will work as long as // the input object is not VOLATILE!? SET_OBJ_FLAG_BITS(tmp_dp, DT_VOLATILE ) ; return tmp_dp; }