static struct linop_s* linop_fft_create_priv(int N, const long dims[N], unsigned int flags, bool forward, bool center) { const struct operator_s* plan = fft_measure_create(N, dims, flags, true, false); const struct operator_s* iplan = fft_measure_create(N, dims, flags, true, true); PTR_ALLOC(struct fft_linop_s, data); SET_TYPEID(fft_linop_s, data); data->frw = plan; data->adj = iplan; data->N = N; data->center = center; data->dims = *TYPE_ALLOC(long[N]); md_copy_dims(N, data->dims, dims); data->strs = *TYPE_ALLOC(long[N]); md_calc_strides(N, data->strs, data->dims, CFL_SIZE); long fft_dims[N]; md_select_dims(N, flags, fft_dims, dims); data->nscale = (float)md_calc_size(N, fft_dims); lop_fun_t apply = forward ? fft_linop_apply : fft_linop_adjoint; lop_fun_t adjoint = forward ? fft_linop_adjoint : fft_linop_apply; struct linop_s* lop = linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), apply, adjoint, fft_linop_normal, NULL, fft_linop_free); if (center) { // FIXME: should only allocate flagged dims complex float* fftmod_mat = md_alloc(N, dims, CFL_SIZE); complex float* fftmodk_mat = md_alloc(N, dims, CFL_SIZE); // we need fftmodk only because we want to apply scaling only once complex float one[1] = { 1. }; md_fill(N, dims, fftmod_mat, one, CFL_SIZE); fftmod(N, dims, flags, fftmodk_mat, fftmod_mat); fftscale(N, dims, flags, fftmod_mat, fftmodk_mat); struct linop_s* mod = linop_cdiag_create(N, dims, ~0u, fftmod_mat); struct linop_s* modk = linop_cdiag_create(N, dims, ~0u, fftmodk_mat); struct linop_s* tmp = linop_chain(mod, lop); tmp = linop_chain(tmp, modk); linop_free(lop); linop_free(mod); linop_free(modk); lop = tmp; } return lop; }
const struct operator_s* sense_recon_create(const struct sense_conf* conf, const long dims[DIMS], const struct linop_s* sense_op, const long pat_dims[DIMS], const complex float* pattern, italgo_fun2_t italgo, iter_conf* iconf, unsigned int num_funs, const struct operator_p_s* thresh_op[num_funs], const struct linop_s* thresh_funs[num_funs], const long ksp_dims[DIMS], const struct operator_s* precond_op) { struct lsqr_conf lsqr_conf = { conf->cclambda }; const struct operator_s* op = NULL; long img_dims[DIMS]; md_select_dims(DIMS, ~COIL_FLAG, img_dims, dims); if (conf->rvc) { struct linop_s* rvc = rvc_create(DIMS, img_dims); struct linop_s* tmp_op = linop_chain(rvc, sense_op); linop_free(rvc); linop_free(sense_op); sense_op = tmp_op; } assert(1 == conf->rwiter); if (NULL == pattern) { op = lsqr2_create(&lsqr_conf, italgo, iconf, sense_op, precond_op, num_funs, thresh_op, thresh_funs); } else { complex float* weights = md_alloc(DIMS, pat_dims, CFL_SIZE); // FIXME: GPU #if 0 // buggy // md_zsqrt(DIMS, pat_dims, weights, pattern); #else long dimsR[DIMS + 1]; real_from_complex_dims(DIMS, dimsR, pat_dims); md_sqrt(DIMS + 1, dimsR, (float*)weights, (const float*)pattern); #endif struct linop_s* weights_op = linop_cdiag_create(DIMS, ksp_dims, FFT_FLAGS, weights); // FIXME: check pat_dims op = wlsqr2_create(&lsqr_conf, italgo, iconf, sense_op, weights_op, precond_op, num_funs, thresh_op, thresh_funs); } return op; }