/** * Proximal operator for l1-norm with unitary transform: f(x) = lambda || T x ||_1 * * @param D number of dimensions * @param dim dimensions of x * @param lambda threshold parameter * @param unitary_op unitary linear operator * @param flags bitmask for joint soft-thresholding */ extern const struct operator_p_s* prox_unithresh_create(unsigned int D, const struct linop_s* unitary_op, const float lambda, const unsigned long flags) { PTR_ALLOC(struct thresh_s, data); SET_TYPEID(thresh_s, data); data->lambda = lambda; data->D = D; data->flags = flags; data->unitary_op = unitary_op; const long* dims = linop_domain(unitary_op)->dims; PTR_ALLOC(long[D], ndim); md_copy_dims(D, *ndim, dims); data->dim = *PTR_PASS(ndim); PTR_ALLOC(long[D], nstr); md_calc_strides(D, *nstr, data->dim, CFL_SIZE); data->str = *PTR_PASS(nstr); // norm dimensions are the flagged transform dimensions // FIXME should use linop_codomain(unitary_op)->N PTR_ALLOC(long[D], norm_dim); md_select_dims(D, ~flags, *norm_dim, linop_codomain(unitary_op)->dims); data->norm_dim = *PTR_PASS(norm_dim); return operator_p_create(D, dims, D, dims, CAST_UP(PTR_PASS(data)), unisoftthresh_apply, thresh_del); }
/** * Thresholding operator for l0-norm: f(x) = || x ||_0 <= k, as used in NIHT algorithm. * y = HT(x, k) (hard thresholding, ie keeping the k largest elements). * * @param D number of dimensions * @param dim dimensions of x * @param k threshold parameter (non-zero elements to keep) * @param flags bitmask for joint thresholding */ const struct operator_p_s* prox_niht_thresh_create(unsigned int D, const long dim[D], const unsigned int k, const unsigned long flags) { PTR_ALLOC(struct thresh_s, data); SET_TYPEID(thresh_s, data); data->lambda = 0.; data->k = k; data->D = D; data->flags = flags; data->unitary_op = NULL; PTR_ALLOC(long[D], ndim); md_copy_dims(D, *ndim, dim); data->dim = *PTR_PASS(ndim); // norm dimensions are the flagged input dimensions PTR_ALLOC(long[D], norm_dim); md_select_dims(D, ~flags, *norm_dim, data->dim); data->norm_dim = *PTR_PASS(norm_dim); PTR_ALLOC(long[D], nstr); md_calc_strides(D, *nstr, data->dim, CFL_SIZE); data->str = *PTR_PASS(nstr); return operator_p_create(D, dim, D, dim, CAST_UP(PTR_PASS(data)), hardthresh_apply, thresh_del); }
static struct linop_s* linop_gdiag_create(unsigned int N, const long dims[N], unsigned int flags, const complex float* diag, bool rdiag) { PTR_ALLOC(struct cdiag_s, data); SET_TYPEID(cdiag_s, data); data->rmul = rdiag; data->N = N; PTR_ALLOC(long[N], dims2); PTR_ALLOC(long[N], dstrs); PTR_ALLOC(long[N], strs); long ddims[N]; md_select_dims(N, flags, ddims, dims); md_copy_dims(N, *dims2, dims); md_calc_strides(N, *strs, dims, CFL_SIZE); md_calc_strides(N, *dstrs, ddims, CFL_SIZE); data->dims = *PTR_PASS(dims2); data->strs = *PTR_PASS(strs); data->dstrs = *PTR_PASS(dstrs); data->diag = diag; // make a copy? #ifdef USE_CUDA data->gpu_diag = NULL; #endif return linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), cdiag_apply, cdiag_adjoint, cdiag_normal, NULL, cdiag_free); }
const struct operator_s* fft_create2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src, bool backwards) { PTR_ALLOC(struct fft_plan_s, plan); SET_TYPEID(fft_plan_s, plan); plan->fftw = fft_fftwf_plan(D, dimensions, flags, ostrides, dst, istrides, src, backwards, false); #ifdef USE_CUDA plan->cuplan = NULL; #ifndef LAZY_CUDA if (cuda_ondevice(src)) plan->cuplan = fft_cuda_plan(D, dimensions, flags, ostrides, istrides, backwards); #else plan->D = D; plan->flags = flags; plan->backwards = backwards; PTR_ALLOC(long[D], dims); md_copy_dims(D, *dims, dimensions); plan->dims = *PTR_PASS(dims); PTR_ALLOC(long[D], istrs); md_copy_strides(D, *istrs, istrides); plan->istrs = *PTR_PASS(istrs); PTR_ALLOC(long[D], ostrs); md_copy_strides(D, *ostrs, ostrides); plan->ostrs = *PTR_PASS(ostrs); #endif #endif return operator_create2(D, dimensions, ostrides, D, dimensions, istrides, CAST_UP(PTR_PASS(plan)), fft_apply, fft_free_plan); }
struct linop_s* linop_realval_create(unsigned int N, const long dims[N]) { PTR_ALLOC(struct rvc_s, data); SET_TYPEID(rvc_s, data); PTR_ALLOC(long[N], dims2); md_copy_dims(N, *dims2, dims); data->N = N; data->dims = *PTR_PASS(dims2); return linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), rvc_apply, rvc_apply, rvc_apply, NULL, rvc_free); }
const struct operator_s* nufft_precond_create(const struct linop_s* nufft_op) { const auto data = CAST_DOWN(nufft_data, linop_get_data(nufft_op)); PTR_ALLOC(struct nufft_precond_data, pdata); SET_TYPEID(nufft_precond_data, pdata); assert(data->conf.toeplitz); int N = data->N; int ND = N + 1; pdata->N = N; pdata->cim_dims = *TYPE_ALLOC(long[ND]); pdata->pre_dims = *TYPE_ALLOC(long[ND]); pdata->cim_strs = *TYPE_ALLOC(long[ND]); pdata->pre_strs = *TYPE_ALLOC(long[ND]); md_copy_dims(ND, pdata->cim_dims, data->cim_dims); md_select_dims(ND, data->flags, pdata->pre_dims, pdata->cim_dims); md_calc_strides(ND, pdata->cim_strs, pdata->cim_dims, CFL_SIZE); md_calc_strides(ND, pdata->pre_strs, pdata->pre_dims, CFL_SIZE); pdata->pre = compute_precond(pdata->N, pdata->pre_dims, pdata->pre_strs, data->psf_dims, data->psf_strs, data->psf, data->linphase); pdata->fft_op = linop_fft_create(pdata->N, pdata->cim_dims, data->flags); const long* cim_dims = pdata->cim_dims; // need to dereference pdata before PTR_PASS return operator_create(N, cim_dims, N, cim_dims, CAST_UP(PTR_PASS(pdata)), nufft_precond_apply, nufft_precond_del); }
/** * Intialize lrthresh data * * @param dims_decom - dimensions with levels at LEVEL_DIMS * @param randshift - randshift boolean * @param mflags - selects which dimensions gets reshaped as the first dimension in matrix * @param blkdims - contains block dimensions for all levels * */ static struct lrthresh_data_s* lrthresh_create_data(const long dims_decom[DIMS], bool randshift, unsigned long mflags, const long blkdims[MAX_LEV][DIMS], float lambda, bool noise, int remove_mean) { PTR_ALLOC(struct lrthresh_data_s, data); SET_TYPEID(lrthresh_data_s, data); data->randshift = randshift; data->mflags = mflags; data->lambda = lambda; data->noise = noise; data->remove_mean = remove_mean; // level dimensions md_copy_dims(DIMS, data->dims_decom, dims_decom); md_calc_strides(DIMS, data->strs_lev, dims_decom, CFL_SIZE); // image dimensions data->levels = dims_decom[LEVEL_DIM]; md_select_dims(DIMS, ~LEVEL_FLAG, data->dims, dims_decom); md_calc_strides(DIMS, data->strs, data->dims, CFL_SIZE); // blkdims for(long l = 0; l < data->levels; l++) { for (long i = 0; i < DIMS; i++) data->blkdims[l][i] = blkdims[l][i]; } return PTR_PASS(data); }
const struct linop_s* linop_zfinitediff_create(unsigned int D, const long dims[D], long diffdim, bool circular) { PTR_ALLOC(struct zfinitediff_data, data); SET_TYPEID(zfinitediff_data, data); data->D = D; data->dim_diff = diffdim; data->do_circdiff = circular; data->dims_in = *TYPE_ALLOC(long[D]); data->dims_adj = *TYPE_ALLOC(long[D]); data->strides_in = *TYPE_ALLOC(long[D]); data->strides_adj = *TYPE_ALLOC(long[D]); md_copy_dims(D, data->dims_in, dims); md_copy_dims(D, data->dims_adj, dims); md_calc_strides(D, data->strides_in, data->dims_in, CFL_SIZE); if (!data->do_circdiff) data->dims_adj[data->dim_diff] -= 1; md_calc_strides(D, data->strides_adj, data->dims_adj, CFL_SIZE); const long* dims_adj = data->dims_adj; const long* dims_in = data->dims_in; return linop_create(D, dims_adj, D, dims_in, CAST_UP(PTR_PASS(data)), zfinitediff_apply, zfinitediff_adjoint, zfinitediff_normal, NULL, zfinitediff_del); }
struct nlop_s* nlop_chain(const struct nlop_s* a, const struct nlop_s* b) { assert(1 == nlop_get_nr_in_args(a)); assert(1 == nlop_get_nr_out_args(a)); assert(1 == nlop_get_nr_in_args(b)); assert(1 == nlop_get_nr_out_args(b)); const struct linop_s* la = linop_from_nlop(a); const struct linop_s* lb = linop_from_nlop(b); if ((NULL != la) && (NULL != lb)) return nlop_from_linop(linop_chain(la, lb)); PTR_ALLOC(struct nlop_s, n); const struct linop_s* (*der)[1][1] = TYPE_ALLOC(const struct linop_s*[1][1]); n->derivative = &(*der)[0][0]; if (NULL == la) la = a->derivative[0]; if (NULL == lb) lb = b->derivative[0]; n->op = operator_chain(a->op, b->op); n->derivative[0] = linop_chain(la, lb); return PTR_PASS(n); }
static struct ufft_data* ufft_create_data(const long ksp_dims[DIMS], const long pat_dims[DIMS], const complex float* pat, unsigned int flags, bool use_gpu) { PTR_ALLOC(struct ufft_data, data); SET_TYPEID(ufft_data, data); data->flags = flags; data->use_gpu = use_gpu; md_copy_dims(DIMS, data->pat_dims, pat_dims); md_copy_dims(DIMS, data->ksp_dims, ksp_dims); md_calc_strides(DIMS, data->pat_strs, pat_dims, CFL_SIZE); md_calc_strides(DIMS, data->ksp_strs, ksp_dims, CFL_SIZE); #ifdef USE_CUDA data->pat = (use_gpu ? md_alloc_gpu : md_alloc)(DIMS, data->pat_dims, CFL_SIZE); #else data->pat = md_alloc(DIMS, data->pat_dims, CFL_SIZE); #endif md_copy(DIMS, data->pat_dims, data->pat, pat, CFL_SIZE); data->fft_op = linop_fftc_create(DIMS, ksp_dims, flags); return PTR_PASS(data); }
/* * Proximal function for real-value constraint */ const struct operator_p_s* prox_rvc_create(unsigned int N, const long dims[N]) { PTR_ALLOC(struct prox_rvc_data, pdata); SET_TYPEID(prox_rvc_data, pdata); pdata->size = md_calc_size(N, dims); return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(pdata)), prox_rvc_apply, prox_rvc_del); }
static struct linop_s* linop_fft_create_priv(int N, const long dims[N], unsigned int flags, bool forward, bool center) { const struct operator_s* plan = fft_measure_create(N, dims, flags, true, false); const struct operator_s* iplan = fft_measure_create(N, dims, flags, true, true); PTR_ALLOC(struct fft_linop_s, data); SET_TYPEID(fft_linop_s, data); data->frw = plan; data->adj = iplan; data->N = N; data->center = center; data->dims = *TYPE_ALLOC(long[N]); md_copy_dims(N, data->dims, dims); data->strs = *TYPE_ALLOC(long[N]); md_calc_strides(N, data->strs, data->dims, CFL_SIZE); long fft_dims[N]; md_select_dims(N, flags, fft_dims, dims); data->nscale = (float)md_calc_size(N, fft_dims); lop_fun_t apply = forward ? fft_linop_apply : fft_linop_adjoint; lop_fun_t adjoint = forward ? fft_linop_adjoint : fft_linop_apply; struct linop_s* lop = linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), apply, adjoint, fft_linop_normal, NULL, fft_linop_free); if (center) { // FIXME: should only allocate flagged dims complex float* fftmod_mat = md_alloc(N, dims, CFL_SIZE); complex float* fftmodk_mat = md_alloc(N, dims, CFL_SIZE); // we need fftmodk only because we want to apply scaling only once complex float one[1] = { 1. }; md_fill(N, dims, fftmod_mat, one, CFL_SIZE); fftmod(N, dims, flags, fftmodk_mat, fftmod_mat); fftscale(N, dims, flags, fftmod_mat, fftmodk_mat); struct linop_s* mod = linop_cdiag_create(N, dims, ~0u, fftmod_mat); struct linop_s* modk = linop_cdiag_create(N, dims, ~0u, fftmodk_mat); struct linop_s* tmp = linop_chain(mod, lop); tmp = linop_chain(tmp, modk); linop_free(lop); linop_free(mod); linop_free(modk); lop = tmp; } return lop; }
/** * Create an Identity linear operator: I x * @param N number of dimensions * @param dims dimensions of input (domain) */ struct linop_s* linop_identity_create(unsigned int N, const long dims[N]) { PTR_ALLOC(struct identity_data_s, data); SET_TYPEID(identity_data_s, data); data->domain = iovec_create(N, dims, CFL_SIZE); return linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), identity_apply, identity_apply, identity_apply, NULL, identity_free); }
const struct operator_p_s* prox_l2norm_create(unsigned int N, const long dims[N], float lambda) { PTR_ALLOC(struct prox_l2norm_data, pdata); SET_TYPEID(prox_l2norm_data, pdata); pdata->lambda = lambda; pdata->size = md_calc_size(N, dims) * 2; return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(pdata)), prox_l2norm_apply, prox_l2norm_del); }
/** * Convolution operator * * @param N number of dimensions * @param flags bitmask of the dimensions to apply convolution * @param ctype * @param cmode * @param odims output dimensions * @param idims input dimensions * @param kdims kernel dimensions * @param krn convolution kernel */ struct linop_s* linop_conv_create(int N, unsigned int flags, enum conv_type ctype, enum conv_mode cmode, const long odims[N], const long idims[N], const long kdims[N], const complex float* krn) { PTR_ALLOC(struct conv_data_s, data); SET_TYPEID(conv_data_s, data); data->plan = conv_plan(N, flags, ctype, cmode, odims, idims, kdims, krn); return linop_create(N, odims, N, idims, CAST_UP(PTR_PASS(data)), linop_conv_forward, linop_conv_adjoint, NULL, NULL, linop_conv_free); }
const struct operator_s* fft_measure_create(unsigned int D, const long dimensions[D], unsigned long flags, bool inplace, bool backwards) { PTR_ALLOC(struct fft_plan_s, plan); SET_TYPEID(fft_plan_s, plan); complex float* src = md_alloc(D, dimensions, CFL_SIZE); complex float* dst = inplace ? src : md_alloc(D, dimensions, CFL_SIZE); long strides[D]; md_calc_strides(D, strides, dimensions, CFL_SIZE); plan->fftw = fft_fftwf_plan(D, dimensions, flags, strides, dst, strides, src, backwards, true); md_free(src); if (!inplace) md_free(dst); #ifdef USE_CUDA plan->cuplan = NULL; #ifndef LAZY_CUDA if (cuda_ondevice(src)) plan->cuplan = fft_cuda_plan(D, dimensions, flags, strides, strides, backwards); #else plan->D = D; plan->flags = flags; plan->backwards = backwards; PTR_ALLOC(long[D], dims); md_copy_dims(D, *dims, dimensions); plan->dims = *PTR_PASS(dims); PTR_ALLOC(long[D], istrs); md_copy_strides(D, *istrs, strides); plan->istrs = *PTR_PASS(istrs); PTR_ALLOC(long[D], ostrs); md_copy_strides(D, *ostrs, strides); plan->ostrs = *PTR_PASS(ostrs); #endif #endif return operator_create2(D, dimensions, strides, D, dimensions, strides, CAST_UP(PTR_PASS(plan)), fft_apply, fft_free_plan); }
const struct cg_data_s* cg_data_init(long N, const struct vec_iter_s* vops) { PTR_ALLOC(struct cg_data_s, cgdata); cgdata->r = vops->allocate(N); cgdata->p = vops->allocate(N); cgdata->Ap = vops->allocate(N); return PTR_PASS(cgdata); }
static const struct operator_p_s* prox_ineq_create(unsigned int N, const long dims[N], const complex float* b, bool positive) { PTR_ALLOC(struct prox_ineq_data, pdata); SET_TYPEID(prox_ineq_data, pdata); pdata->size = md_calc_size(N, dims) * 2; pdata->b = (const float*)b; pdata->positive = positive; return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(pdata)), prox_ineq_apply, prox_ineq_del); }
const struct operator_p_s* prox_l2ball_create(unsigned int N, const long dims[N], float eps, const complex float* center) { PTR_ALLOC(struct prox_l2ball_data, pdata); SET_TYPEID(prox_l2ball_data, pdata); pdata->center = (const float*)center; pdata->eps = eps; pdata->size = md_calc_size(N, dims) * 2; return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(pdata)), prox_l2ball_apply, prox_l2ball_del); }
/** * Create an operator (with strides) */ const struct operator_s* operator_generic_create2(unsigned int N, const unsigned int D[N], const long* dims[N], const long* strs[N], operator_data_t* data, operator_fun_t apply, operator_del_t del) { PTR_ALLOC(struct operator_s, op); PTR_ALLOC(const struct iovec_s*[N], dom); for (unsigned int i = 0; i < N; i++) (*dom)[i] = iovec_create2(D[i], dims[i], strs[i], CFL_SIZE); op->N = N; op->domain = *PTR_PASS(dom); op->data = data; op->apply = apply; op->refcount = 1; op->del = del; return PTR_PASS(op); }
const struct operator_p_s* prox_leastsquares_create(unsigned int N, const long dims[N], float lambda, const complex float* y) { PTR_ALLOC(struct prox_leastsquares_data, pdata); SET_TYPEID(prox_leastsquares_data, pdata); pdata->y = (const float*)y; pdata->lambda = lambda; pdata->size = md_calc_size(N, dims) * 2; return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(pdata)), prox_leastsquares_apply, prox_leastsquares_del); }
const struct operator_p_s* prox_thresh_create(unsigned int N, const long dims[N], float lambda, void (*thresh)(void* _data, float lambda, float* _dst, const float* _src), void* data) { PTR_ALLOC(struct prox_thresh_data, pdata); pdata->thresh = thresh; pdata->lambda = lambda; pdata->data = data; return operator_p_create(N, dims, dims, PTR_PASS(pdata), prox_thresh_apply, prox_thresh_del); }
const struct operator_p_s* prox_normaleq_create(const struct linop_s* op, const complex float* y) { PTR_ALLOC(struct prox_normaleq_data, pdata); SET_TYPEID(prox_normaleq_data, pdata); PTR_ALLOC(struct iter_conjgrad_conf, cgconf); *cgconf = iter_conjgrad_defaults; cgconf->maxiter = 10; cgconf->l2lambda = 0; pdata->cgconf = PTR_PASS(cgconf); pdata->op = op; pdata->size = 2 * md_calc_size(linop_domain(op)->N, linop_domain(op)->dims); pdata->adj = md_alloc_sameplace(1, &(pdata->size), FL_SIZE, y); linop_adjoint_iter((struct linop_s*)op, pdata->adj, (const float*)y); return operator_p_create(linop_domain(op)->N, linop_domain(op)->dims, linop_domain(op)->N, linop_domain(op)->dims, CAST_UP(PTR_PASS(pdata)), prox_normaleq_apply, prox_normaleq_del); }
/** * Proximal operator for l1-norm with Wavelet transform: f(x) = lambda || W x ||_1 * * @param numdims number of dimensions * @param imSize dimensions of x * @param wave_flags bitmask for Wavelet transform * @param minSize minimium size of coarse Wavelet scale * @param lambda threshold parameter * @param randshift apply random shift before Wavelet transforming * @param use_gpu true if using gpu */ const struct operator_p_s* prox_wavethresh_create(int numdims, const long imSize[numdims], unsigned int wave_flags, const long minSize[numdims], float lambda, bool randshift, bool use_gpu) { PTR_ALLOC(struct wave_prox_s, data); SET_TYPEID(wave_prox_s, data); data->plan = prepare_wavelet_plan(numdims, imSize, wave_flags, minSize, use_gpu); data->plan->randshift = randshift; data->plan->lambda = lambda; return operator_p_create(numdims, imSize, numdims, imSize, CAST_UP(PTR_PASS(data)), wavelet_thresh, wavelet_prox_del); }
/** * Wavelet CFD9/7 transform operator * * @param N number of dimensions * @param dims dimensions of input * @param flags bitmask of the dimensions to apply the Fourier transform */ struct linop_s* linop_cdf97_create(int N, const long dims[N], unsigned int flags) { PTR_ALLOC(struct linop_cdf97_s, data); SET_TYPEID(linop_cdf97_s, data); PTR_ALLOC(long[N], ndims); md_copy_dims(N, *ndims, dims); data->N = N; data->dims = *ndims; data->flags = flags; return linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), linop_cdf97_apply, linop_cdf97_adjoint, linop_cdf97_normal, NULL, linop_cdf97_free); }
/** * Create a resize linear operator: y = M x, * where M either crops or expands the the input dimensions to match the output dimensions. * Uses centered zero-padding and centered cropping * * @param N number of dimensions * @param out_dims output dimensions * @param in_dims input dimensions */ struct linop_s* linop_resize_create(unsigned int N, const long out_dims[N], const long in_dims[N]) { PTR_ALLOC(struct resize_op_s, data); SET_TYPEID(resize_op_s, data); data->N = N; data->out_dims = *TYPE_ALLOC(long[N]); data->in_dims = *TYPE_ALLOC(long[N]); md_copy_dims(N, (long*)data->out_dims, out_dims); md_copy_dims(N, (long*)data->in_dims, in_dims); return linop_create(N, out_dims, N, in_dims, CAST_UP(PTR_PASS(data)), resize_forward, resize_adjoint, resize_normal, NULL, resize_free); }
const struct operator_s* fft_create2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src, bool backwards) { PTR_ALLOC(struct fft_plan_s, plan); plan->fftw = fft_fftwf_plan(D, dimensions, flags, ostrides, dst, istrides, src, backwards); #ifdef USE_CUDA plan->cuplan = NULL; if (cuda_ondevice(src)) plan->cuplan = fft_cuda_plan(D, dimensions, flags, ostrides, istrides, backwards); #endif return operator_create2(D, dimensions, ostrides, D, dimensions, istrides, &PTR_PASS(plan)->base, fft_apply, fft_free_plan); }
const struct operator_p_s* prox_lineq_create(const struct linop_s* op, const complex float* y) { PTR_ALLOC(struct prox_lineq_data, pdata); unsigned int N = linop_domain(op)->N; const long* dims = linop_domain(op)->dims; pdata->op = op; pdata->adj = md_alloc_sameplace(N, dims, CFL_SIZE, y); linop_adjoint(op, N, dims, pdata->adj, N, linop_codomain(op)->dims, y); pdata->tmp = md_alloc_sameplace(N, dims, CFL_SIZE, y); return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(pdata)), prox_lineq_apply, prox_lineq_del); }
struct linop_s* linop_grad_create(long N, const long dims[N], unsigned int flags) { PTR_ALLOC(struct grad_s, data); SET_TYPEID(grad_s, data); long dims2[N + 1]; grad_dims(N, dims2, flags, dims); data->N = N + 1; data->flags = flags; data->dims = *TYPE_ALLOC(long[N + 1]); md_copy_dims(N + 1, data->dims, dims2); return linop_create(N + 1, dims2, N, dims, CAST_UP(PTR_PASS(data)), grad_op_apply, grad_op_adjoint, grad_op_normal, NULL, grad_op_free); }
/** * Wavelet linear operator * * @param numdims number of dimensions * @param imSize dimensions of x * @param wave_flags bitmask for Wavelet transform * @param minSize minimium size of coarse Wavelet scale * @param randshift apply random shift before Wavelet transforming * @param use_gpu true if using gpu */ const struct linop_s* wavelet_create(int numdims, const long imSize[numdims], unsigned int wave_flags, const long minSize[numdims], bool randshift, bool use_gpu) { PTR_ALLOC(struct wavelet_data_s, data); SET_TYPEID(wavelet_data_s, data); data->plan = prepare_wavelet_plan(numdims, imSize, wave_flags, minSize, use_gpu); data->plan->randshift = randshift; long coeff_dims[numdims]; md_select_dims(numdims, ~wave_flags, coeff_dims, imSize); coeff_dims[0] = data->plan->numCoeff_tr; coeff_dims[1] = 1; coeff_dims[2] = 1; return linop_create(numdims, coeff_dims, numdims, imSize, CAST_UP(PTR_PASS(data)), wavelet_forward, wavelet_inverse, wavelet_normal, NULL, wavelet_del); }