ZVEC *zv_mltadd(const ZVEC *v1, const ZVEC *v2, complex scale, ZVEC *out) #endif { /* register unsigned int dim, i; */ /* complex *out_ve, *v1_ve, *v2_ve; */ if ( v1==ZVNULL || v2==ZVNULL ) error(E_NULL,"zv_mltadd"); if ( v1->dim != v2->dim ) error(E_SIZES,"zv_mltadd"); if ( scale.re == 0.0 && scale.im == 0.0 ) return zv_copy(v1,out); if ( scale.re == 1.0 && scale.im == 0.0 ) return zv_add(v1,v2,out); if ( v2 != out ) { tracecatch(out = zv_copy(v1,out),"zv_mltadd"); /* dim = v1->dim; */ __zmltadd__(out->ve,v2->ve,scale,(int)(v1->dim),0); } else { tracecatch(out = zv_mlt(scale,v2,out),"zv_mltadd"); out = zv_add(v1,out,out); } return (out); }
/* v_mltadd -- scalar/vector multiplication and addition -- out = v1 + scale.v2 */ VEC *v_mltadd(VEC *v1,VEC *v2,double scale,VEC *out) { /* register u_int dim, i; */ /* Real *out_ve, *v1_ve, *v2_ve; */ if ( v1==(VEC *)NULL || v2==(VEC *)NULL ) error(E_NULL,"v_mltadd"); if ( v1->dim != v2->dim ) error(E_SIZES,"v_mltadd"); if ( scale == 0.0 ) return v_copy(v1,out); if ( scale == 1.0 ) return v_add(v1,v2,out); if ( v2 != out ) { tracecatch(out = v_copy(v1,out),"v_mltadd"); /* dim = v1->dim; */ __mltadd__(out->ve,v2->ve,scale,(int)(v1->dim)); } else { tracecatch(out = sv_mlt(scale,v2,out),"v_mltadd"); out = v_add(v1,out,out); } /************************************************************ out_ve = out->ve; v1_ve = v1->ve; v2_ve = v2->ve; for ( i=0; i < dim ; i++ ) out->ve[i] = v1->ve[i] + scale*v2->ve[i]; (*out_ve++) = (*v1_ve++) + scale*(*v2_ve++); ************************************************************/ return (out); }
VEC *vm_mltadd(const VEC *v1, const VEC *v2, const MAT *A, double alpha, VEC *out) #endif { int /* i, */ j, m, n; Real tmp, /* *A_e, */ *out_ve; if ( ! v1 || ! v2 || ! A ) error(E_NULL,"vm_mltadd"); if ( v2 == out ) error(E_INSITU,"vm_mltadd"); if ( v1->dim != A->n || A->m != v2->dim ) error(E_SIZES,"vm_mltadd"); tracecatch(out = v_copy(v1,out),"vm_mltadd"); out_ve = out->ve; m = A->m; n = A->n; for ( j = 0; j < m; j++ ) { tmp = v2->ve[j]*alpha; if ( tmp != 0.0 ) __mltadd__(out_ve,A->me[j],tmp,(int)n); /************************************************** A_e = A->me[j]; for ( i = 0; i < n; i++ ) out_ve[i] += A_e[i]*tmp; **************************************************/ } return out; }
/* zQRfactor -- forms the QR factorisation of A -- factorisation stored in compact form as described above (not quite standard format) */ ZMAT *zQRfactor(ZMAT* A, ZVEC* diag) { unsigned int k,limit; Real beta; STATIC ZVEC *tmp1=ZVNULL, *w=ZVNULL; if ( ! A || ! diag ) error(E_NULL,"zQRfactor"); limit = min(A->m,A->n); if ( diag->dim < limit ) error(E_SIZES,"zQRfactor"); tmp1 = zv_resize(tmp1,A->m); w = zv_resize(w, A->n); MEM_STAT_REG(tmp1,TYPE_ZVEC); MEM_STAT_REG(w, TYPE_ZVEC); for ( k=0; k<limit; k++ ) { /* get H/holder vector for the k-th column */ zget_col(A,k,tmp1); zhhvec(tmp1,k,&beta,tmp1,&A->me[k][k]); diag->ve[k] = tmp1->ve[k]; /* apply H/holder vector to remaining columns */ tracecatch(_zhhtrcols(A,k,k+1,tmp1,beta,w),"zQRfactor"); } #ifdef THREADSAFE ZV_FREE(tmp1); ZV_FREE(w); #endif return (A); }
MAT *m_inverse(const MAT *A, MAT *out) #endif { int i; STATIC VEC *tmp = VNULL, *tmp2 = VNULL; STATIC MAT *A_cp = MNULL; STATIC PERM *pivot = PNULL; if ( ! A ) error(E_NULL,"m_inverse"); if ( A->m != A->n ) error(E_SQUARE,"m_inverse"); if ( ! out || out->m < A->m || out->n < A->n ) out = m_resize(out,A->m,A->n); A_cp = m_resize(A_cp,A->m,A->n); A_cp = m_copy(A,A_cp); tmp = v_resize(tmp,A->m); tmp2 = v_resize(tmp2,A->m); pivot = px_resize(pivot,A->m); MEM_STAT_REG(A_cp,TYPE_MAT); MEM_STAT_REG(tmp, TYPE_VEC); MEM_STAT_REG(tmp2,TYPE_VEC); MEM_STAT_REG(pivot,TYPE_PERM); tracecatch(LUfactor(A_cp,pivot),"m_inverse"); for ( i = 0; i < A->n; i++ ) { v_zero(tmp); tmp->ve[i] = 1.0; tracecatch(LUsolve(A_cp,pivot,tmp,tmp2),"m_inverse"); set_col(out,i,tmp2); } #ifdef THREADSAFE V_FREE(tmp); V_FREE(tmp2); M_FREE(A_cp); PX_FREE(pivot); #endif return out; }
VEC *mv_mltadd(const VEC *v1, const VEC *v2, const MAT *A, double alpha, VEC *out) #endif { /* register int j; */ int i, m, n; Real *v2_ve, *out_ve; if ( ! v1 || ! v2 || ! A ) error(E_NULL,"mv_mltadd"); if ( out == v2 ) error(E_INSITU,"mv_mltadd"); if ( v1->dim != A->m || v2->dim != A->n ) error(E_SIZES,"mv_mltadd"); tracecatch(out = v_copy(v1,out),"mv_mltadd"); v2_ve = v2->ve; out_ve = out->ve; m = A->m; n = A->n; if ( alpha == 0.0 ) return out; for ( i = 0; i < m; i++ ) { out_ve[i] += alpha*__ip__(A->me[i],v2_ve,(int)n); /************************************************** A_e = A->me[i]; sum = 0.0; for ( j = 0; j < n; j++ ) sum += A_e[j]*v2_ve[j]; out_ve[i] = v1->ve[i] + alpha*sum; **************************************************/ } return out; }
MAT *ms_mltadd(const MAT *A1, const MAT *A2, double s, MAT *out) #endif { /* register Real *A1_e, *A2_e, *out_e; */ /* register int j; */ int i, m, n; if ( ! A1 || ! A2 ) error(E_NULL,"ms_mltadd"); if ( A1->m != A2->m || A1->n != A2->n ) error(E_SIZES,"ms_mltadd"); if ( out != A1 && out != A2 ) out = m_resize(out,A1->m,A1->n); if ( s == 0.0 ) return m_copy(A1,out); if ( s == 1.0 ) return m_add(A1,A2,out); tracecatch(out = m_copy(A1,out),"ms_mltadd"); m = A1->m; n = A1->n; for ( i = 0; i < m; i++ ) { __mltadd__(out->me[i],A2->me[i],s,(int)n); /************************************************** A1_e = A1->me[i]; A2_e = A2->me[i]; out_e = out->me[i]; for ( j = 0; j < n; j++ ) out_e[j] = A1_e[j] + s*A2_e[j]; **************************************************/ } return out; }