/** * Alternating Minimzation * * Minimize residual by calling each min_op in turn. */ void altmin(unsigned int iter, float alpha, float redu, long N, const struct vec_iter_s* vops, unsigned int NI, struct iter_nlop_s op, struct iter_op_p_s min_ops[__VLA(NI)], float* x[__VLA(NI)], const float* y, struct iter_nlop_s callback) { float* r = vops->allocate(N); vops->clear(N, r); float* args[1+NI]; args[0] = r; for (long i = 0; i < NI; ++i) args[1+i] = x[i]; for (unsigned int i = 0; i < iter; i++) { for (unsigned int j = 0; j < NI; ++j) { iter_nlop_call(op, 1+NI, args); // r = F x vops->xpay(N, -1., r, y); // r = y - F x debug_printf(DP_DEBUG2, "Step: %u, Res: %f\n", i, vops->norm(N, r)); iter_op_p_call(min_ops[j], alpha, x[j], y); if (NULL != callback.fun) iter_nlop_call(callback, NI, x); } alpha /= redu; } vops->del(r); }
void ifftuc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src) { ifftc(D, dimensions, flags, dst, src); fftscale(D, dimensions, flags, dst, dst); }
void fftc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src) { fftmod(D, dimensions, flags, dst, src); fft(D, dimensions, flags, dst, dst); fftmod(D, dimensions, flags, dst, dst); }