static struct linop_s* linop_gdiag_create(unsigned int N, const long dims[N], unsigned int flags, const complex float* diag, bool rdiag) { PTR_ALLOC(struct cdiag_s, data); SET_TYPEID(cdiag_s, data); data->rmul = rdiag; data->N = N; PTR_ALLOC(long[N], dims2); PTR_ALLOC(long[N], dstrs); PTR_ALLOC(long[N], strs); long ddims[N]; md_select_dims(N, flags, ddims, dims); md_copy_dims(N, *dims2, dims); md_calc_strides(N, *strs, dims, CFL_SIZE); md_calc_strides(N, *dstrs, ddims, CFL_SIZE); data->dims = *PTR_PASS(dims2); data->strs = *PTR_PASS(strs); data->dstrs = *PTR_PASS(dstrs); data->diag = diag; // make a copy? #ifdef USE_CUDA data->gpu_diag = NULL; #endif return linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), cdiag_apply, cdiag_adjoint, cdiag_normal, NULL, cdiag_free); }
const struct linop_s* linop_zfinitediff_create(unsigned int D, const long dims[D], long diffdim, bool circular) { PTR_ALLOC(struct zfinitediff_data, data); SET_TYPEID(zfinitediff_data, data); data->D = D; data->dim_diff = diffdim; data->do_circdiff = circular; data->dims_in = *TYPE_ALLOC(long[D]); data->dims_adj = *TYPE_ALLOC(long[D]); data->strides_in = *TYPE_ALLOC(long[D]); data->strides_adj = *TYPE_ALLOC(long[D]); md_copy_dims(D, data->dims_in, dims); md_copy_dims(D, data->dims_adj, dims); md_calc_strides(D, data->strides_in, data->dims_in, CFL_SIZE); if (!data->do_circdiff) data->dims_adj[data->dim_diff] -= 1; md_calc_strides(D, data->strides_adj, data->dims_adj, CFL_SIZE); const long* dims_adj = data->dims_adj; const long* dims_in = data->dims_in; return linop_create(D, dims_adj, D, dims_in, CAST_UP(PTR_PASS(data)), zfinitediff_apply, zfinitediff_adjoint, zfinitediff_normal, NULL, zfinitediff_del); }
/** * Create undersampled/weighted fft operator */ const struct linop_s* ufft_create(const long ksp_dims[DIMS], const long pat_dims[DIMS], const complex float* pat, unsigned int flags, bool use_gpu) { struct ufft_data* data = ufft_create_data(ksp_dims, pat_dims, pat, flags, use_gpu); // Create operator interface return linop_create(DIMS, data->ksp_dims, DIMS, data->ksp_dims, &data->base, ufft_apply, ufft_apply_adjoint, ufft_apply_normal, ufft_apply_pinverse, ufft_free_data); }
/** * Operator interface for a true matrix: * out = mat * in * in: [x x x x 1 x x K x x] * mat: [x x x x T x x K x x] * out: [x x x x T x x 1 x x] * where the x's are arbitrary dimensions and T and K may be transposed * * @param N number of dimensions * @param out_dims output dimensions after applying the matrix (codomain) * @param in_dims input dimensions to apply the matrix (domain) * @param matrix_dims dimensions of the matrix * @param matrix matrix data */ struct linop_s* linop_matrix_create(unsigned int N, const long out_dims[N], const long in_dims[N], const long matrix_dims[N], const complex float* matrix) { struct operator_matrix_s* data = linop_matrix_priv(N, out_dims, in_dims, matrix_dims, matrix); return linop_create(N, out_dims, N, in_dims, CAST_UP(data), linop_matrix_apply, linop_matrix_apply_adjoint, linop_matrix_apply_normal, NULL, linop_matrix_del); }
static struct linop_s* linop_fft_create_priv(int N, const long dims[N], unsigned int flags, bool forward, bool center) { const struct operator_s* plan = fft_measure_create(N, dims, flags, true, false); const struct operator_s* iplan = fft_measure_create(N, dims, flags, true, true); PTR_ALLOC(struct fft_linop_s, data); SET_TYPEID(fft_linop_s, data); data->frw = plan; data->adj = iplan; data->N = N; data->center = center; data->dims = *TYPE_ALLOC(long[N]); md_copy_dims(N, data->dims, dims); data->strs = *TYPE_ALLOC(long[N]); md_calc_strides(N, data->strs, data->dims, CFL_SIZE); long fft_dims[N]; md_select_dims(N, flags, fft_dims, dims); data->nscale = (float)md_calc_size(N, fft_dims); lop_fun_t apply = forward ? fft_linop_apply : fft_linop_adjoint; lop_fun_t adjoint = forward ? fft_linop_adjoint : fft_linop_apply; struct linop_s* lop = linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), apply, adjoint, fft_linop_normal, NULL, fft_linop_free); if (center) { // FIXME: should only allocate flagged dims complex float* fftmod_mat = md_alloc(N, dims, CFL_SIZE); complex float* fftmodk_mat = md_alloc(N, dims, CFL_SIZE); // we need fftmodk only because we want to apply scaling only once complex float one[1] = { 1. }; md_fill(N, dims, fftmod_mat, one, CFL_SIZE); fftmod(N, dims, flags, fftmodk_mat, fftmod_mat); fftscale(N, dims, flags, fftmod_mat, fftmodk_mat); struct linop_s* mod = linop_cdiag_create(N, dims, ~0u, fftmod_mat); struct linop_s* modk = linop_cdiag_create(N, dims, ~0u, fftmodk_mat); struct linop_s* tmp = linop_chain(mod, lop); tmp = linop_chain(tmp, modk); linop_free(lop); linop_free(mod); linop_free(modk); lop = tmp; } return lop; }
/** * Create an Identity linear operator: I x * @param N number of dimensions * @param dims dimensions of input (domain) */ struct linop_s* linop_identity_create(unsigned int N, const long dims[N]) { PTR_ALLOC(struct identity_data_s, data); SET_TYPEID(identity_data_s, data); data->domain = iovec_create(N, dims, CFL_SIZE); return linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), identity_apply, identity_apply, identity_apply, NULL, identity_free); }
/** * Create sum operator */ const struct linop_s* sum_create(const long imgd_dims[DIMS], bool use_gpu) { struct sum_data* data = sum_create_data( imgd_dims, use_gpu ); // create operator interface return linop_create(DIMS, data->img_dims, DIMS, data->imgd_dims, data, sum_apply, sum_apply_adjoint, sum_apply_normal, sum_apply_pinverse, sum_free_data); }
/** * Convolution operator * * @param N number of dimensions * @param flags bitmask of the dimensions to apply convolution * @param ctype * @param cmode * @param odims output dimensions * @param idims input dimensions * @param kdims kernel dimensions * @param krn convolution kernel */ struct linop_s* linop_conv_create(int N, unsigned int flags, enum conv_type ctype, enum conv_mode cmode, const long odims[N], const long idims[N], const long kdims[N], const complex float* krn) { PTR_ALLOC(struct conv_data_s, data); SET_TYPEID(conv_data_s, data); data->plan = conv_plan(N, flags, ctype, cmode, odims, idims, kdims, krn); return linop_create(N, odims, N, idims, CAST_UP(PTR_PASS(data)), linop_conv_forward, linop_conv_adjoint, NULL, NULL, linop_conv_free); }
/** * Create maps operator, m = S x * * @param max_dims maximal dimensions across all data structures * @param sens_flags active map dimensions * @param sens sensitivities * @param gpu TRUE if using gpu */ struct linop_s* maps_create(const long max_dims[DIMS], unsigned int sens_flags, const complex float* sens, bool gpu) { struct maps_data* data = maps_create_data(max_dims, sens_flags, sens, gpu); // scale the sensitivity maps by the FFT scale factor fftscale(DIMS, data->mps_dims, FFT_FLAGS, data->sens, data->sens); return linop_create(DIMS, data->ksp_dims, DIMS, data->img_dims, data, maps_apply, maps_apply_adjoint, maps_apply_normal, maps_apply_pinverse, maps_free_data); }
struct linop_s* sampling_create(const long dims[DIMS], const long pat_dims[DIMS], const complex float* pattern) { struct sampling_data_s* data = xmalloc(sizeof(struct sampling_data_s)); md_select_dims(DIMS, ~MAPS_FLAG, data->dims, dims); // dimensions of kspace md_calc_strides(DIMS, data->strs, data->dims, CFL_SIZE); md_calc_strides(DIMS, data->pat_strs, pat_dims, CFL_SIZE); data->pattern = pattern; return linop_create(DIMS, data->dims, DIMS, data->dims, data, sampling_apply, sampling_apply, sampling_apply, NULL, sampling_free); }
struct linop_s* rvc_create(unsigned int N, const long dims[N]) { PTR_ALLOC(struct rvc_s, data); PTR_ALLOC(long[N], dims2); md_copy_dims(N, *dims2, dims); data->N = N; data->dims = *dims2; return linop_create(N, dims, N, dims, (void*)data, rvc_apply, rvc_apply, rvc_apply, NULL, rvc_free); }
struct linop_s* linop_realval_create(unsigned int N, const long dims[N]) { PTR_ALLOC(struct rvc_s, data); SET_TYPEID(rvc_s, data); PTR_ALLOC(long[N], dims2); md_copy_dims(N, *dims2, dims); data->N = N; data->dims = *PTR_PASS(dims2); return linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), rvc_apply, rvc_apply, rvc_apply, NULL, rvc_free); }
struct linop_s* tv_init(long N, const long dims[N], unsigned int flags) { struct tv_s* data = xmalloc(sizeof(struct tv_s)); data->N = N + 1; data->dims = xmalloc((N + 1) * sizeof(long)); data->flags = flags; md_copy_dims(N, data->dims, dims); data->dims[N] = bitcount(flags); return linop_create(N + 1, data->dims, N, dims, data, tv_op_apply, tv_op_adjoint, tv_op_normal, NULL, tv_op_free); }
/** * Create a resize linear operator: y = M x, * where M either crops or expands the the input dimensions to match the output dimensions. * Uses centered zero-padding and centered cropping * * @param N number of dimensions * @param out_dims output dimensions * @param in_dims input dimensions */ struct linop_s* linop_resize_create(unsigned int N, const long out_dims[N], const long in_dims[N]) { struct resize_op_s* data = xmalloc(sizeof(struct resize_op_s)); data->N = N; data->out_dims = xmalloc(N * sizeof(long)); data->in_dims = xmalloc(N * sizeof(long)); md_copy_dims(N, (long*)data->out_dims, out_dims); md_copy_dims(N, (long*)data->in_dims, in_dims); return linop_create(N, out_dims, N, in_dims, data, resize_forward, resize_adjoint, resize_normal, NULL, resize_free); }
/** * Wavelet CFD9/7 transform operator * * @param N number of dimensions * @param dims dimensions of input * @param flags bitmask of the dimensions to apply the Fourier transform */ struct linop_s* linop_cdf97_create(int N, const long dims[N], unsigned int flags) { PTR_ALLOC(struct linop_cdf97_s, data); SET_TYPEID(linop_cdf97_s, data); PTR_ALLOC(long[N], ndims); md_copy_dims(N, *ndims, dims); data->N = N; data->dims = *ndims; data->flags = flags; return linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), linop_cdf97_apply, linop_cdf97_adjoint, linop_cdf97_normal, NULL, linop_cdf97_free); }
/** * Create a resize linear operator: y = M x, * where M either crops or expands the the input dimensions to match the output dimensions. * Uses centered zero-padding and centered cropping * * @param N number of dimensions * @param out_dims output dimensions * @param in_dims input dimensions */ struct linop_s* linop_resize_create(unsigned int N, const long out_dims[N], const long in_dims[N]) { PTR_ALLOC(struct resize_op_s, data); SET_TYPEID(resize_op_s, data); data->N = N; data->out_dims = *TYPE_ALLOC(long[N]); data->in_dims = *TYPE_ALLOC(long[N]); md_copy_dims(N, (long*)data->out_dims, out_dims); md_copy_dims(N, (long*)data->in_dims, in_dims); return linop_create(N, out_dims, N, in_dims, CAST_UP(PTR_PASS(data)), resize_forward, resize_adjoint, resize_normal, NULL, resize_free); }
/** * Wavelet linear operator * * @param numdims number of dimensions * @param imSize dimensions of x * @param wave_flags bitmask for Wavelet transform * @param minSize minimium size of coarse Wavelet scale * @param randshift apply random shift before Wavelet transforming * @param use_gpu true if using gpu */ const struct linop_s* wavelet_create(int numdims, const long imSize[numdims], unsigned int wave_flags, const long minSize[numdims], bool randshift, bool use_gpu) { PTR_ALLOC(struct wavelet_data_s, data); SET_TYPEID(wavelet_data_s, data); data->plan = prepare_wavelet_plan(numdims, imSize, wave_flags, minSize, use_gpu); data->plan->randshift = randshift; long coeff_dims[numdims]; md_select_dims(numdims, ~wave_flags, coeff_dims, imSize); coeff_dims[0] = data->plan->numCoeff_tr; coeff_dims[1] = 1; coeff_dims[2] = 1; return linop_create(numdims, coeff_dims, numdims, imSize, CAST_UP(PTR_PASS(data)), wavelet_forward, wavelet_inverse, wavelet_normal, NULL, wavelet_del); }
struct linop_s* linop_grad_create(long N, const long dims[N], unsigned int flags) { PTR_ALLOC(struct grad_s, data); SET_TYPEID(grad_s, data); long dims2[N + 1]; grad_dims(N, dims2, flags, dims); data->N = N + 1; data->flags = flags; data->dims = *TYPE_ALLOC(long[N + 1]); md_copy_dims(N + 1, data->dims, dims2); return linop_create(N + 1, dims2, N, dims, CAST_UP(PTR_PASS(data)), grad_op_apply, grad_op_adjoint, grad_op_normal, NULL, grad_op_free); }
struct linop_s* linop_sampling_create(const long dims[DIMS], const long pat_dims[DIMS], const complex float* pattern) { PTR_ALLOC(struct sampling_data_s, data); SET_TYPEID(sampling_data_s, data); md_copy_dims(DIMS, data->pat_dims, pat_dims); md_select_dims(DIMS, ~MAPS_FLAG, data->dims, dims); // dimensions of kspace md_calc_strides(DIMS, data->strs, data->dims, CFL_SIZE); md_calc_strides(DIMS, data->pat_strs, data->pat_dims, CFL_SIZE); data->pattern = (complex float*)pattern; #ifdef USE_CUDA data->gpu_pattern = NULL; #endif const long* dims2 = data->dims; return linop_create(DIMS, dims2, DIMS, dims2, CAST_UP(PTR_PASS(data)), sampling_apply, sampling_apply, sampling_apply, NULL, sampling_free); }
/** * Initialize finite difference operator * * @param D number of dimensions * @param dim input dimensions * @param flags bitmask for applying operator * @param snip true: clear initial entry (i.c.); false: keep initial entry (i.c.) * * Returns a pointer to the finite difference operator */ extern const struct linop_s* linop_finitediff_create(unsigned int D, const long dim[D], const unsigned long flags, bool snip) { PTR_ALLOC(struct fdiff_s, data); SET_TYPEID(fdiff_s, data); data->D = D; data->flags = flags; data->order = 1; data->snip = snip; data->dims = *TYPE_ALLOC(long[D]); md_copy_dims(D, data->dims, dim); data->str = *TYPE_ALLOC(long[D]); md_calc_strides(D, data->str, data->dims, CFL_SIZE); return linop_create(D, dim, D, dim, CAST_UP(PTR_PASS(data)), fdiff_apply, fdiff_apply_adjoint, NULL, cumsum_apply, finite_diff_del); }
const struct linop_s* linop_fmac_create(unsigned int N, const long dims[N], unsigned int oflags, unsigned int iflags, unsigned int tflags, const complex float* tensor) { PTR_ALLOC(struct fmac_data, data); SET_TYPEID(fmac_data, data); data->N = N; data->dims = *TYPE_ALLOC(long[N]); md_copy_dims(N, data->dims, dims); data->idims = *TYPE_ALLOC(long[N]); data->istrs = *TYPE_ALLOC(long[N]); md_select_dims(N, ~iflags, data->idims, dims); md_calc_strides(N, data->istrs, data->idims, CFL_SIZE); data->odims = *TYPE_ALLOC(long[N]); data->ostrs = *TYPE_ALLOC(long[N]); md_select_dims(N, ~oflags, data->odims, dims); md_calc_strides(N, data->ostrs, data->odims, CFL_SIZE); data->tstrs = *TYPE_ALLOC(long[N]); data->tdims = *TYPE_ALLOC(long[N]); md_select_dims(N, ~tflags, data->tdims, dims); md_calc_strides(N, data->tstrs, data->tdims, CFL_SIZE); data->tensor = tensor; #ifdef USE_CUDA data->gpu_tensor = NULL; #endif long odims[N]; md_copy_dims(N, odims, data->odims); long idims[N]; md_copy_dims(N, idims, data->idims); return linop_create(N, odims, N, idims, CAST_UP(PTR_PASS(data)), fmac_apply, fmac_adjoint, NULL, NULL, fmac_free_data); }
struct linop_s* maps2_create(const long coilim_dims[DIMS], const long maps_dims[DIMS], const long img_dims[DIMS], const complex float* maps, bool use_gpu) { long max_dims[DIMS]; unsigned int sens_flags = 0; for (unsigned int i = 0; i < DIMS; i++) if (1 != maps_dims[i]) sens_flags = MD_SET(sens_flags, i); assert(1 == coilim_dims[MAPS_DIM]); assert(1 == img_dims[COIL_DIM]); assert(maps_dims[COIL_DIM] == coilim_dims[COIL_DIM]); assert(maps_dims[MAPS_DIM] == img_dims[MAPS_DIM]); for (unsigned int i = 0; i < DIMS; i++) max_dims[i] = MAX(coilim_dims[i], MAX(maps_dims[i], img_dims[i])); struct maps_data* data = maps_create_data(max_dims, sens_flags, maps, use_gpu); return linop_create(DIMS, coilim_dims, DIMS, img_dims, data, maps_apply, maps_apply_adjoint, maps_apply_normal, maps_apply_pinverse, maps_free_data); }
static struct linop_s* linop_gdiag_create(unsigned int N, const long dims[N], unsigned int flags, const _Complex float* diag, bool rdiag) { struct cdiag_s* data = xmalloc(sizeof(struct cdiag_s)); data->rmul = rdiag; data->N = N; long* dims2 = xmalloc(N * sizeof(long)); long* dstrs = xmalloc(N * sizeof(long)); long* strs = xmalloc(N * sizeof(long)); long ddims[N]; md_select_dims(N, flags, ddims, dims); md_copy_dims(N, dims2, dims); md_calc_strides(N, strs, dims, CFL_SIZE); md_calc_strides(N, dstrs, ddims, CFL_SIZE); data->dims = dims2; data->strs = strs; data->dstrs = dstrs; data->diag = diag; // make a copy? return linop_create(N, dims, N, dims, data, cdiag_apply, cdiag_adjoint, cdiag_normal, NULL, cdiag_free); }
/** * Efficiently chain two matrix linops by multiplying the actual matrices together. * Stores a copy of the new matrix. * Returns: C = B A * * @param a first matrix (applied to input) * @param b second matrix (applied to output of first matrix) */ struct linop_s* linop_matrix_chain(const struct linop_s* a, const struct linop_s* b) { const struct operator_matrix_s* a_data = CAST_DOWN(operator_matrix_s, linop_get_data(a)); const struct operator_matrix_s* b_data = CAST_DOWN(operator_matrix_s, linop_get_data(b)); // check compatibility assert(linop_codomain(a)->N == linop_domain(b)->N); assert(md_check_compat(linop_codomain(a)->N, 0u, linop_codomain(a)->dims, linop_domain(b)->dims)); unsigned int D = linop_domain(a)->N; unsigned long outB_flags = md_nontriv_dims(D, linop_codomain(b)->dims); unsigned long inB_flags = md_nontriv_dims(D, linop_domain(b)->dims); unsigned long delB_flags = inB_flags & ~outB_flags; unsigned int N = a_data->N; assert(N == 2 * D); long in_dims[N]; md_copy_dims(N, in_dims, a_data->in_dims); long matA_dims[N]; md_copy_dims(N, matA_dims, a_data->mat_dims); long matB_dims[N]; md_copy_dims(N, matB_dims, b_data->mat_dims); long out_dims[N]; md_copy_dims(N, out_dims, b_data->out_dims); for (unsigned int i = 0; i < D; i++) { if (MD_IS_SET(delB_flags, i)) { matA_dims[2 * i + 0] = a_data->mat_dims[2 * i + 1]; matA_dims[2 * i + 1] = a_data->mat_dims[2 * i + 0]; in_dims[2 * i + 0] = a_data->in_dims[2 * i + 1]; in_dims[2 * i + 1] = a_data->in_dims[2 * i + 0]; } } long matrix_dims[N]; md_singleton_dims(N, matrix_dims); unsigned long iflags = md_nontriv_dims(N, in_dims); unsigned long oflags = md_nontriv_dims(N, out_dims); unsigned long flags = iflags | oflags; // we combine a and b and sum over dims not in input or output md_max_dims(N, flags, matrix_dims, matA_dims, matB_dims); debug_printf(DP_DEBUG1, "tensor chain: %ld x %ld -> %ld\n", md_calc_size(N, matA_dims), md_calc_size(N, matB_dims), md_calc_size(N, matrix_dims)); complex float* matrix = md_alloc(N, matrix_dims, CFL_SIZE); debug_print_dims(DP_DEBUG2, N, matrix_dims); debug_print_dims(DP_DEBUG2, N, in_dims); debug_print_dims(DP_DEBUG2, N, matA_dims); debug_print_dims(DP_DEBUG2, N, matB_dims); debug_print_dims(DP_DEBUG2, N, out_dims); md_ztenmul(N, matrix_dims, matrix, matA_dims, a_data->mat, matB_dims, b_data->mat); // priv2 takes our doubled dimensions struct operator_matrix_s* data = linop_matrix_priv2(N, out_dims, in_dims, matrix_dims, matrix); /* although we internally use different dimensions we define the * correct interface */ struct linop_s* c = linop_create(linop_codomain(b)->N, linop_codomain(b)->dims, linop_domain(a)->N, linop_domain(a)->dims, CAST_UP(data), linop_matrix_apply, linop_matrix_apply_adjoint, linop_matrix_apply_normal, NULL, linop_matrix_del); md_free(matrix); return c; }
/** * Operator interface for a true matrix: * out = mat * in * in: [x x x x 1 x x K x x] * mat: [x x x x T x x K x x] * out: [x x x x T x x 1 x x] * where the x's are arbitrary dimensions and T and K may be transposed * * use this interface if K == 1 or T == 1 * * @param N number of dimensions * @param out_dims output dimensions after applying the matrix (codomain) * @param in_dims input dimensions to apply the matrix (domain) * @param T_dim dimension corresponding to the rows of A * @param K_dim dimension corresponding to the columns of A * @param matrix matrix data */ struct linop_s* linop_matrix_altcreate(unsigned int N, const long out_dims[N], const long in_dims[N], const unsigned int T_dim, const unsigned int K_dim, const complex float* matrix) { long matrix_dims[N]; md_singleton_dims(N, matrix_dims); matrix_dims[K_dim] = in_dims[K_dim]; matrix_dims[T_dim] = out_dims[T_dim]; unsigned int T = out_dims[T_dim]; unsigned int K = in_dims[K_dim]; PTR_ALLOC(long[N], max_dims); for (unsigned int i = 0; i < N; i++) { if ((in_dims[i] > 1) && (out_dims[i] == 1)) { (*max_dims)[i] = in_dims[i]; } else if ((in_dims[i] == 1) && (out_dims[i] > 1)) { (*max_dims)[i] = out_dims[i]; } else { assert(in_dims[i] == out_dims[i]); (*max_dims)[i] = in_dims[i]; } } complex float* mat = md_alloc_sameplace(N, matrix_dims, CFL_SIZE, matrix); complex float* matc = md_alloc_sameplace(N, matrix_dims, CFL_SIZE, matrix); md_copy(N, matrix_dims, mat, matrix, CFL_SIZE); md_zconj(N, matrix_dims, matc, mat); complex float* gram = NULL; const struct iovec_s* gram_iovec = compute_gram_matrix(N, T_dim, T, K_dim, K, &gram, matrix_dims, matrix); PTR_ALLOC(struct operator_matrix_s, data); SET_TYPEID(operator_matrix_s, data); data->mat_iovec = iovec_create(N, matrix_dims, CFL_SIZE); data->mat_gram_iovec = gram_iovec; data->max_dims = *max_dims; data->mat = mat; data->mat_conj = matc; data->mat_gram = gram; data->K_dim = K_dim; data->T_dim = T_dim; data->K = K; data->T = T; data->domain_iovec = iovec_create(N, in_dims, CFL_SIZE); data->codomain_iovec = iovec_create(N, out_dims, CFL_SIZE); return linop_create(N, out_dims, N, in_dims, CAST_UP(PTR_PASS(data)), linop_matrix_apply, linop_matrix_apply_adjoint, linop_matrix_apply_normal, NULL, linop_matrix_del); }
/** * * NUFFT operator initialization * * @param N - number of dimensions * @param ksp_dims - kspace dimension * @param cim_dims - coil images dimension * @param traj - trajectory * @param conf - configuration options * @param use_gpu - use gpu boolean * */ struct linop_s* nufft_create(unsigned int N, const long ksp_dims[N], const long cim_dims[N], const long traj_dims[N], const complex float* traj, const complex float* weights, struct nufft_conf_s conf, bool use_gpu) { struct nufft_data* data = (struct nufft_data*)xmalloc(sizeof(struct nufft_data)); data->N = N; data->use_gpu = use_gpu; data->traj = traj; data->conf = conf; data->width = 3.; data->beta = calc_beta(2., data->width); // get dims assert(md_check_compat(N - 3, 0, ksp_dims + 3, cim_dims + 3)); unsigned int ND = N + 3; data->ksp_dims = xmalloc(ND * sizeof(long)); data->cim_dims = xmalloc(ND * sizeof(long)); data->cml_dims = xmalloc(ND * sizeof(long)); data->img_dims = xmalloc(ND * sizeof(long)); data->trj_dims = xmalloc(ND * sizeof(long)); data->lph_dims = xmalloc(ND * sizeof(long)); data->psf_dims = xmalloc(ND * sizeof(long)); data->wgh_dims = xmalloc(ND * sizeof(long)); data->ksp_strs = xmalloc(ND * sizeof(long)); data->cim_strs = xmalloc(ND * sizeof(long)); data->cml_strs = xmalloc(ND * sizeof(long)); data->img_strs = xmalloc(ND * sizeof(long)); data->trj_strs = xmalloc(ND * sizeof(long)); data->lph_strs = xmalloc(ND * sizeof(long)); data->psf_strs = xmalloc(ND * sizeof(long)); data->wgh_strs = xmalloc(ND * sizeof(long)); md_singleton_dims(ND, data->cim_dims); md_singleton_dims(ND, data->ksp_dims); md_copy_dims(N, data->cim_dims, cim_dims); md_copy_dims(N, data->ksp_dims, ksp_dims); md_select_dims(ND, FFT_FLAGS, data->img_dims, data->cim_dims); assert(3 == traj_dims[0]); assert(traj_dims[1] == ksp_dims[1]); assert(traj_dims[2] == ksp_dims[2]); assert(md_check_compat(N - 3, ~0, traj_dims + 3, ksp_dims + 3)); assert(md_check_bounds(N - 3, ~0, traj_dims + 3, ksp_dims + 3)); md_singleton_dims(ND, data->trj_dims); md_copy_dims(N, data->trj_dims, traj_dims); // get strides md_calc_strides(ND, data->cim_strs, data->cim_dims, CFL_SIZE); md_calc_strides(ND, data->img_strs, data->img_dims, CFL_SIZE); md_calc_strides(ND, data->trj_strs, data->trj_dims, CFL_SIZE); md_calc_strides(ND, data->ksp_strs, data->ksp_dims, CFL_SIZE); data->weights = NULL; if (NULL != weights) { md_singleton_dims(ND, data->wgh_dims); md_select_dims(N, ~MD_BIT(0), data->wgh_dims, data->trj_dims); md_calc_strides(ND, data->wgh_strs, data->wgh_dims, CFL_SIZE); complex float* tmp = md_alloc(ND, data->wgh_dims, CFL_SIZE); md_copy(ND, data->wgh_dims, tmp, weights, CFL_SIZE); data->weights = tmp; } complex float* roll = md_alloc(ND, data->img_dims, CFL_SIZE); rolloff_correction(2., data->width, data->beta, data->img_dims, roll); data->roll = roll; complex float* linphase = compute_linphases(N, data->lph_dims, data->img_dims); md_calc_strides(ND, data->lph_strs, data->lph_dims, CFL_SIZE); if (!conf.toeplitz) md_zmul2(ND, data->lph_dims, data->lph_strs, linphase, data->lph_strs, linphase, data->img_strs, data->roll); fftmod(ND, data->lph_dims, FFT_FLAGS, linphase, linphase); fftscale(ND, data->lph_dims, FFT_FLAGS, linphase, linphase); // md_zsmul(ND, data->lph_dims, linphase, linphase, 1. / (float)(data->trj_dims[1] * data->trj_dims[2])); complex float* fftm = md_alloc(ND, data->img_dims, CFL_SIZE); md_zfill(ND, data->img_dims, fftm, 1.); fftmod(ND, data->img_dims, FFT_FLAGS, fftm, fftm); data->fftmod = fftm; data->linphase = linphase; data->psf = NULL; if (conf.toeplitz) { #if 0 md_copy_dims(ND, data->psf_dims, data->lph_dims); #else md_copy_dims(3, data->psf_dims, data->lph_dims); md_copy_dims(ND - 3, data->psf_dims + 3, data->trj_dims + 3); data->psf_dims[N] = data->lph_dims[N]; #endif md_calc_strides(ND, data->psf_strs, data->psf_dims, CFL_SIZE); data->psf = compute_psf2(N, data->psf_dims, data->trj_dims, data->traj, data->weights); } md_copy_dims(ND, data->cml_dims, data->cim_dims); data->cml_dims[N + 0] = data->lph_dims[N + 0]; md_calc_strides(ND, data->cml_strs, data->cml_dims, CFL_SIZE); data->cm2_dims = xmalloc(ND * sizeof(long)); // ! md_copy_dims(ND, data->cm2_dims, data->cim_dims); for (int i = 0; i < 3; i++) data->cm2_dims[i] = (1 == cim_dims[i]) ? 1 : (2 * cim_dims[i]); data->grid = md_alloc(ND, data->cml_dims, CFL_SIZE); data->fft_op = linop_fft_create(ND, data->cml_dims, FFT_FLAGS, use_gpu); return linop_create(N, ksp_dims, N, cim_dims, data, nufft_apply, nufft_apply_adjoint, nufft_apply_normal, NULL, nufft_free_data); }
/** * Create an Identity linear operator: I x * @param N number of dimensions * @param dims dimensions of input (domain) */ struct linop_s* linop_identity_create(unsigned int N, const long dims[N]) { const struct iovec_s* domain = iovec_create(N, dims, CFL_SIZE); return linop_create(N, dims, N, dims, (void*)domain, identity_apply, identity_apply, identity_apply, NULL, identity_free); }