/** * Proximal operator for l1-norm with unitary transform: f(x) = lambda || T x ||_1 * * @param D number of dimensions * @param dim dimensions of x * @param lambda threshold parameter * @param unitary_op unitary linear operator * @param flags bitmask for joint soft-thresholding */ extern const struct operator_p_s* prox_unithresh_create(unsigned int D, const struct linop_s* unitary_op, const float lambda, const unsigned long flags) { PTR_ALLOC(struct thresh_s, data); SET_TYPEID(thresh_s, data); data->lambda = lambda; data->D = D; data->flags = flags; data->unitary_op = unitary_op; const long* dims = linop_domain(unitary_op)->dims; PTR_ALLOC(long[D], ndim); md_copy_dims(D, *ndim, dims); data->dim = *PTR_PASS(ndim); PTR_ALLOC(long[D], nstr); md_calc_strides(D, *nstr, data->dim, CFL_SIZE); data->str = *PTR_PASS(nstr); // norm dimensions are the flagged transform dimensions // FIXME should use linop_codomain(unitary_op)->N PTR_ALLOC(long[D], norm_dim); md_select_dims(D, ~flags, *norm_dim, linop_codomain(unitary_op)->dims); data->norm_dim = *PTR_PASS(norm_dim); return operator_p_create(D, dims, D, dims, CAST_UP(PTR_PASS(data)), unisoftthresh_apply, thresh_del); }
/** * Thresholding operator for l0-norm: f(x) = || x ||_0 <= k, as used in NIHT algorithm. * y = HT(x, k) (hard thresholding, ie keeping the k largest elements). * * @param D number of dimensions * @param dim dimensions of x * @param k threshold parameter (non-zero elements to keep) * @param flags bitmask for joint thresholding */ const struct operator_p_s* prox_niht_thresh_create(unsigned int D, const long dim[D], const unsigned int k, const unsigned long flags) { PTR_ALLOC(struct thresh_s, data); SET_TYPEID(thresh_s, data); data->lambda = 0.; data->k = k; data->D = D; data->flags = flags; data->unitary_op = NULL; PTR_ALLOC(long[D], ndim); md_copy_dims(D, *ndim, dim); data->dim = *PTR_PASS(ndim); // norm dimensions are the flagged input dimensions PTR_ALLOC(long[D], norm_dim); md_select_dims(D, ~flags, *norm_dim, data->dim); data->norm_dim = *PTR_PASS(norm_dim); PTR_ALLOC(long[D], nstr); md_calc_strides(D, *nstr, data->dim, CFL_SIZE); data->str = *PTR_PASS(nstr); return operator_p_create(D, dim, D, dim, CAST_UP(PTR_PASS(data)), hardthresh_apply, thresh_del); }
static struct linop_s* linop_gdiag_create(unsigned int N, const long dims[N], unsigned int flags, const complex float* diag, bool rdiag) { PTR_ALLOC(struct cdiag_s, data); SET_TYPEID(cdiag_s, data); data->rmul = rdiag; data->N = N; PTR_ALLOC(long[N], dims2); PTR_ALLOC(long[N], dstrs); PTR_ALLOC(long[N], strs); long ddims[N]; md_select_dims(N, flags, ddims, dims); md_copy_dims(N, *dims2, dims); md_calc_strides(N, *strs, dims, CFL_SIZE); md_calc_strides(N, *dstrs, ddims, CFL_SIZE); data->dims = *PTR_PASS(dims2); data->strs = *PTR_PASS(strs); data->dstrs = *PTR_PASS(dstrs); data->diag = diag; // make a copy? #ifdef USE_CUDA data->gpu_diag = NULL; #endif return linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), cdiag_apply, cdiag_adjoint, cdiag_normal, NULL, cdiag_free); }
const struct operator_s* nufft_precond_create(const struct linop_s* nufft_op) { const auto data = CAST_DOWN(nufft_data, linop_get_data(nufft_op)); PTR_ALLOC(struct nufft_precond_data, pdata); SET_TYPEID(nufft_precond_data, pdata); assert(data->conf.toeplitz); int N = data->N; int ND = N + 1; pdata->N = N; pdata->cim_dims = *TYPE_ALLOC(long[ND]); pdata->pre_dims = *TYPE_ALLOC(long[ND]); pdata->cim_strs = *TYPE_ALLOC(long[ND]); pdata->pre_strs = *TYPE_ALLOC(long[ND]); md_copy_dims(ND, pdata->cim_dims, data->cim_dims); md_select_dims(ND, data->flags, pdata->pre_dims, pdata->cim_dims); md_calc_strides(ND, pdata->cim_strs, pdata->cim_dims, CFL_SIZE); md_calc_strides(ND, pdata->pre_strs, pdata->pre_dims, CFL_SIZE); pdata->pre = compute_precond(pdata->N, pdata->pre_dims, pdata->pre_strs, data->psf_dims, data->psf_strs, data->psf, data->linphase); pdata->fft_op = linop_fft_create(pdata->N, pdata->cim_dims, data->flags); const long* cim_dims = pdata->cim_dims; // need to dereference pdata before PTR_PASS return operator_create(N, cim_dims, N, cim_dims, CAST_UP(PTR_PASS(pdata)), nufft_precond_apply, nufft_precond_del); }
const struct operator_s* fft_create2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src, bool backwards) { PTR_ALLOC(struct fft_plan_s, plan); SET_TYPEID(fft_plan_s, plan); plan->fftw = fft_fftwf_plan(D, dimensions, flags, ostrides, dst, istrides, src, backwards, false); #ifdef USE_CUDA plan->cuplan = NULL; #ifndef LAZY_CUDA if (cuda_ondevice(src)) plan->cuplan = fft_cuda_plan(D, dimensions, flags, ostrides, istrides, backwards); #else plan->D = D; plan->flags = flags; plan->backwards = backwards; PTR_ALLOC(long[D], dims); md_copy_dims(D, *dims, dimensions); plan->dims = *PTR_PASS(dims); PTR_ALLOC(long[D], istrs); md_copy_strides(D, *istrs, istrides); plan->istrs = *PTR_PASS(istrs); PTR_ALLOC(long[D], ostrs); md_copy_strides(D, *ostrs, ostrides); plan->ostrs = *PTR_PASS(ostrs); #endif #endif return operator_create2(D, dimensions, ostrides, D, dimensions, istrides, CAST_UP(PTR_PASS(plan)), fft_apply, fft_free_plan); }
const struct linop_s* linop_zfinitediff_create(unsigned int D, const long dims[D], long diffdim, bool circular) { PTR_ALLOC(struct zfinitediff_data, data); SET_TYPEID(zfinitediff_data, data); data->D = D; data->dim_diff = diffdim; data->do_circdiff = circular; data->dims_in = *TYPE_ALLOC(long[D]); data->dims_adj = *TYPE_ALLOC(long[D]); data->strides_in = *TYPE_ALLOC(long[D]); data->strides_adj = *TYPE_ALLOC(long[D]); md_copy_dims(D, data->dims_in, dims); md_copy_dims(D, data->dims_adj, dims); md_calc_strides(D, data->strides_in, data->dims_in, CFL_SIZE); if (!data->do_circdiff) data->dims_adj[data->dim_diff] -= 1; md_calc_strides(D, data->strides_adj, data->dims_adj, CFL_SIZE); const long* dims_adj = data->dims_adj; const long* dims_in = data->dims_in; return linop_create(D, dims_adj, D, dims_in, CAST_UP(PTR_PASS(data)), zfinitediff_apply, zfinitediff_adjoint, zfinitediff_normal, NULL, zfinitediff_del); }
/** * Proximal function for f(z) = 0.5 || y - A z ||_2^2. * Solution is (A^H A + (1/mu) I)z = A^H y + (1/mu)(x_plus_u) * * @param prox_data should be of type prox_normaleq_data * @param mu proximal penalty * @param z output * @param x_plus_u input */ static void prox_normaleq_fun(const operator_data_t* prox_data, float mu, float* z, const float* x_plus_u) { struct prox_normaleq_data* pdata = CAST_DOWN(prox_normaleq_data, prox_data); if (0 == mu) { md_copy(1, MD_DIMS(pdata->size), z, x_plus_u, FL_SIZE); } else { float rho = 1. / mu; float* b = md_alloc_sameplace(1, MD_DIMS(pdata->size), FL_SIZE, x_plus_u); md_copy(1, MD_DIMS(pdata->size), b, pdata->adj, FL_SIZE); md_axpy(1, MD_DIMS(pdata->size), b, rho, x_plus_u); if (NULL == pdata->op->norm_inv) { struct iter_conjgrad_conf* cg_conf = pdata->cgconf; cg_conf->l2lambda = rho; iter_conjgrad(CAST_UP(cg_conf), pdata->op->normal, NULL, pdata->size, z, (float*)b, NULL); } else { linop_norm_inv_iter((struct linop_s*)pdata->op, rho, z, b); } md_free(b); } }
/** * Create undersampled/weighted fft operator */ const struct linop_s* linop_ufft_create(const long ksp_dims[DIMS], const long pat_dims[DIMS], const complex float* pat, unsigned int flags, bool use_gpu) { struct ufft_data* data = ufft_create_data(ksp_dims, pat_dims, pat, flags, use_gpu); // Create operator interface return linop_create(DIMS, data->ksp_dims, DIMS, data->ksp_dims, CAST_UP(data), ufft_apply, ufft_apply_adjoint, ufft_apply_normal, ufft_apply_pinverse, ufft_free_data); }
/* * Proximal function for real-value constraint */ const struct operator_p_s* prox_rvc_create(unsigned int N, const long dims[N]) { PTR_ALLOC(struct prox_rvc_data, pdata); SET_TYPEID(prox_rvc_data, pdata); pdata->size = md_calc_size(N, dims); return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(pdata)), prox_rvc_apply, prox_rvc_del); }
/** * Operator interface for a true matrix: * out = mat * in * in: [x x x x 1 x x K x x] * mat: [x x x x T x x K x x] * out: [x x x x T x x 1 x x] * where the x's are arbitrary dimensions and T and K may be transposed * * @param N number of dimensions * @param out_dims output dimensions after applying the matrix (codomain) * @param in_dims input dimensions to apply the matrix (domain) * @param matrix_dims dimensions of the matrix * @param matrix matrix data */ struct linop_s* linop_matrix_create(unsigned int N, const long out_dims[N], const long in_dims[N], const long matrix_dims[N], const complex float* matrix) { struct operator_matrix_s* data = linop_matrix_priv(N, out_dims, in_dims, matrix_dims, matrix); return linop_create(N, out_dims, N, in_dims, CAST_UP(data), linop_matrix_apply, linop_matrix_apply_adjoint, linop_matrix_apply_normal, NULL, linop_matrix_del); }
static struct linop_s* linop_fft_create_priv(int N, const long dims[N], unsigned int flags, bool forward, bool center) { const struct operator_s* plan = fft_measure_create(N, dims, flags, true, false); const struct operator_s* iplan = fft_measure_create(N, dims, flags, true, true); PTR_ALLOC(struct fft_linop_s, data); SET_TYPEID(fft_linop_s, data); data->frw = plan; data->adj = iplan; data->N = N; data->center = center; data->dims = *TYPE_ALLOC(long[N]); md_copy_dims(N, data->dims, dims); data->strs = *TYPE_ALLOC(long[N]); md_calc_strides(N, data->strs, data->dims, CFL_SIZE); long fft_dims[N]; md_select_dims(N, flags, fft_dims, dims); data->nscale = (float)md_calc_size(N, fft_dims); lop_fun_t apply = forward ? fft_linop_apply : fft_linop_adjoint; lop_fun_t adjoint = forward ? fft_linop_adjoint : fft_linop_apply; struct linop_s* lop = linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), apply, adjoint, fft_linop_normal, NULL, fft_linop_free); if (center) { // FIXME: should only allocate flagged dims complex float* fftmod_mat = md_alloc(N, dims, CFL_SIZE); complex float* fftmodk_mat = md_alloc(N, dims, CFL_SIZE); // we need fftmodk only because we want to apply scaling only once complex float one[1] = { 1. }; md_fill(N, dims, fftmod_mat, one, CFL_SIZE); fftmod(N, dims, flags, fftmodk_mat, fftmod_mat); fftscale(N, dims, flags, fftmod_mat, fftmodk_mat); struct linop_s* mod = linop_cdiag_create(N, dims, ~0u, fftmod_mat); struct linop_s* modk = linop_cdiag_create(N, dims, ~0u, fftmodk_mat); struct linop_s* tmp = linop_chain(mod, lop); tmp = linop_chain(tmp, modk); linop_free(lop); linop_free(mod); linop_free(modk); lop = tmp; } return lop; }
/** * Create an Identity linear operator: I x * @param N number of dimensions * @param dims dimensions of input (domain) */ struct linop_s* linop_identity_create(unsigned int N, const long dims[N]) { PTR_ALLOC(struct identity_data_s, data); SET_TYPEID(identity_data_s, data); data->domain = iovec_create(N, dims, CFL_SIZE); return linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), identity_apply, identity_apply, identity_apply, NULL, identity_free); }
/** * Convolution operator * * @param N number of dimensions * @param flags bitmask of the dimensions to apply convolution * @param ctype * @param cmode * @param odims output dimensions * @param idims input dimensions * @param kdims kernel dimensions * @param krn convolution kernel */ struct linop_s* linop_conv_create(int N, unsigned int flags, enum conv_type ctype, enum conv_mode cmode, const long odims[N], const long idims[N], const long kdims[N], const complex float* krn) { PTR_ALLOC(struct conv_data_s, data); SET_TYPEID(conv_data_s, data); data->plan = conv_plan(N, flags, ctype, cmode, odims, idims, kdims, krn); return linop_create(N, odims, N, idims, CAST_UP(PTR_PASS(data)), linop_conv_forward, linop_conv_adjoint, NULL, NULL, linop_conv_free); }
const struct operator_p_s* prox_l2norm_create(unsigned int N, const long dims[N], float lambda) { PTR_ALLOC(struct prox_l2norm_data, pdata); SET_TYPEID(prox_l2norm_data, pdata); pdata->lambda = lambda; pdata->size = md_calc_size(N, dims) * 2; return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(pdata)), prox_l2norm_apply, prox_l2norm_del); }
const struct operator_p_s* prox_l2ball_create(unsigned int N, const long dims[N], float eps, const complex float* center) { PTR_ALLOC(struct prox_l2ball_data, pdata); SET_TYPEID(prox_l2ball_data, pdata); pdata->center = (const float*)center; pdata->eps = eps; pdata->size = md_calc_size(N, dims) * 2; return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(pdata)), prox_l2ball_apply, prox_l2ball_del); }
static const struct operator_p_s* prox_ineq_create(unsigned int N, const long dims[N], const complex float* b, bool positive) { PTR_ALLOC(struct prox_ineq_data, pdata); SET_TYPEID(prox_ineq_data, pdata); pdata->size = md_calc_size(N, dims) * 2; pdata->b = (const float*)b; pdata->positive = positive; return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(pdata)), prox_ineq_apply, prox_ineq_del); }
const struct operator_p_s* prox_leastsquares_create(unsigned int N, const long dims[N], float lambda, const complex float* y) { PTR_ALLOC(struct prox_leastsquares_data, pdata); SET_TYPEID(prox_leastsquares_data, pdata); pdata->y = (const float*)y; pdata->lambda = lambda; pdata->size = md_calc_size(N, dims) * 2; return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(pdata)), prox_leastsquares_apply, prox_leastsquares_del); }
/** * Proximal operator for l1-norm with Wavelet transform: f(x) = lambda || W x ||_1 * * @param numdims number of dimensions * @param imSize dimensions of x * @param wave_flags bitmask for Wavelet transform * @param minSize minimium size of coarse Wavelet scale * @param lambda threshold parameter * @param randshift apply random shift before Wavelet transforming * @param use_gpu true if using gpu */ const struct operator_p_s* prox_wavethresh_create(int numdims, const long imSize[numdims], unsigned int wave_flags, const long minSize[numdims], float lambda, bool randshift, bool use_gpu) { PTR_ALLOC(struct wave_prox_s, data); SET_TYPEID(wave_prox_s, data); data->plan = prepare_wavelet_plan(numdims, imSize, wave_flags, minSize, use_gpu); data->plan->randshift = randshift; data->plan->lambda = lambda; return operator_p_create(numdims, imSize, numdims, imSize, CAST_UP(PTR_PASS(data)), wavelet_thresh, wavelet_prox_del); }
struct linop_s* linop_realval_create(unsigned int N, const long dims[N]) { PTR_ALLOC(struct rvc_s, data); SET_TYPEID(rvc_s, data); PTR_ALLOC(long[N], dims2); md_copy_dims(N, *dims2, dims); data->N = N; data->dims = *PTR_PASS(dims2); return linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), rvc_apply, rvc_apply, rvc_apply, NULL, rvc_free); }
/** * Create a resize linear operator: y = M x, * where M either crops or expands the the input dimensions to match the output dimensions. * Uses centered zero-padding and centered cropping * * @param N number of dimensions * @param out_dims output dimensions * @param in_dims input dimensions */ struct linop_s* linop_resize_create(unsigned int N, const long out_dims[N], const long in_dims[N]) { PTR_ALLOC(struct resize_op_s, data); SET_TYPEID(resize_op_s, data); data->N = N; data->out_dims = *TYPE_ALLOC(long[N]); data->in_dims = *TYPE_ALLOC(long[N]); md_copy_dims(N, (long*)data->out_dims, out_dims); md_copy_dims(N, (long*)data->in_dims, in_dims); return linop_create(N, out_dims, N, in_dims, CAST_UP(PTR_PASS(data)), resize_forward, resize_adjoint, resize_normal, NULL, resize_free); }
/** * Wavelet CFD9/7 transform operator * * @param N number of dimensions * @param dims dimensions of input * @param flags bitmask of the dimensions to apply the Fourier transform */ struct linop_s* linop_cdf97_create(int N, const long dims[N], unsigned int flags) { PTR_ALLOC(struct linop_cdf97_s, data); SET_TYPEID(linop_cdf97_s, data); PTR_ALLOC(long[N], ndims); md_copy_dims(N, *ndims, dims); data->N = N; data->dims = *ndims; data->flags = flags; return linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), linop_cdf97_apply, linop_cdf97_adjoint, linop_cdf97_normal, NULL, linop_cdf97_free); }
const struct operator_p_s* prox_lineq_create(const struct linop_s* op, const complex float* y) { PTR_ALLOC(struct prox_lineq_data, pdata); unsigned int N = linop_domain(op)->N; const long* dims = linop_domain(op)->dims; pdata->op = op; pdata->adj = md_alloc_sameplace(N, dims, CFL_SIZE, y); linop_adjoint(op, N, dims, pdata->adj, N, linop_codomain(op)->dims, y); pdata->tmp = md_alloc_sameplace(N, dims, CFL_SIZE, y); return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(pdata)), prox_lineq_apply, prox_lineq_del); }
/** * Wavelet linear operator * * @param numdims number of dimensions * @param imSize dimensions of x * @param wave_flags bitmask for Wavelet transform * @param minSize minimium size of coarse Wavelet scale * @param randshift apply random shift before Wavelet transforming * @param use_gpu true if using gpu */ const struct linop_s* wavelet_create(int numdims, const long imSize[numdims], unsigned int wave_flags, const long minSize[numdims], bool randshift, bool use_gpu) { PTR_ALLOC(struct wavelet_data_s, data); SET_TYPEID(wavelet_data_s, data); data->plan = prepare_wavelet_plan(numdims, imSize, wave_flags, minSize, use_gpu); data->plan->randshift = randshift; long coeff_dims[numdims]; md_select_dims(numdims, ~wave_flags, coeff_dims, imSize); coeff_dims[0] = data->plan->numCoeff_tr; coeff_dims[1] = 1; coeff_dims[2] = 1; return linop_create(numdims, coeff_dims, numdims, imSize, CAST_UP(PTR_PASS(data)), wavelet_forward, wavelet_inverse, wavelet_normal, NULL, wavelet_del); }
struct linop_s* linop_grad_create(long N, const long dims[N], unsigned int flags) { PTR_ALLOC(struct grad_s, data); SET_TYPEID(grad_s, data); long dims2[N + 1]; grad_dims(N, dims2, flags, dims); data->N = N + 1; data->flags = flags; data->dims = *TYPE_ALLOC(long[N + 1]); md_copy_dims(N + 1, data->dims, dims2); return linop_create(N + 1, dims2, N, dims, CAST_UP(PTR_PASS(data)), grad_op_apply, grad_op_adjoint, grad_op_normal, NULL, grad_op_free); }
struct linop_s* linop_sampling_create(const long dims[DIMS], const long pat_dims[DIMS], const complex float* pattern) { PTR_ALLOC(struct sampling_data_s, data); SET_TYPEID(sampling_data_s, data); md_copy_dims(DIMS, data->pat_dims, pat_dims); md_select_dims(DIMS, ~MAPS_FLAG, data->dims, dims); // dimensions of kspace md_calc_strides(DIMS, data->strs, data->dims, CFL_SIZE); md_calc_strides(DIMS, data->pat_strs, data->pat_dims, CFL_SIZE); data->pattern = (complex float*)pattern; #ifdef USE_CUDA data->gpu_pattern = NULL; #endif const long* dims2 = data->dims; return linop_create(DIMS, dims2, DIMS, dims2, CAST_UP(PTR_PASS(data)), sampling_apply, sampling_apply, sampling_apply, NULL, sampling_free); }
/** * Initialize finite difference operator * * @param D number of dimensions * @param dim input dimensions * @param flags bitmask for applying operator * @param snip true: clear initial entry (i.c.); false: keep initial entry (i.c.) * * Returns a pointer to the finite difference operator */ extern const struct linop_s* linop_finitediff_create(unsigned int D, const long dim[D], const unsigned long flags, bool snip) { PTR_ALLOC(struct fdiff_s, data); SET_TYPEID(fdiff_s, data); data->D = D; data->flags = flags; data->order = 1; data->snip = snip; data->dims = *TYPE_ALLOC(long[D]); md_copy_dims(D, data->dims, dim); data->str = *TYPE_ALLOC(long[D]); md_calc_strides(D, data->str, data->dims, CFL_SIZE); return linop_create(D, dim, D, dim, CAST_UP(PTR_PASS(data)), fdiff_apply, fdiff_apply_adjoint, NULL, cumsum_apply, finite_diff_del); }
const struct linop_s* linop_fmac_create(unsigned int N, const long dims[N], unsigned int oflags, unsigned int iflags, unsigned int tflags, const complex float* tensor) { PTR_ALLOC(struct fmac_data, data); SET_TYPEID(fmac_data, data); data->N = N; data->dims = *TYPE_ALLOC(long[N]); md_copy_dims(N, data->dims, dims); data->idims = *TYPE_ALLOC(long[N]); data->istrs = *TYPE_ALLOC(long[N]); md_select_dims(N, ~iflags, data->idims, dims); md_calc_strides(N, data->istrs, data->idims, CFL_SIZE); data->odims = *TYPE_ALLOC(long[N]); data->ostrs = *TYPE_ALLOC(long[N]); md_select_dims(N, ~oflags, data->odims, dims); md_calc_strides(N, data->ostrs, data->odims, CFL_SIZE); data->tstrs = *TYPE_ALLOC(long[N]); data->tdims = *TYPE_ALLOC(long[N]); md_select_dims(N, ~tflags, data->tdims, dims); md_calc_strides(N, data->tstrs, data->tdims, CFL_SIZE); data->tensor = tensor; #ifdef USE_CUDA data->gpu_tensor = NULL; #endif long odims[N]; md_copy_dims(N, odims, data->odims); long idims[N]; md_copy_dims(N, idims, data->idims); return linop_create(N, odims, N, idims, CAST_UP(PTR_PASS(data)), fmac_apply, fmac_adjoint, NULL, NULL, fmac_free_data); }
const struct operator_s* fft_measure_create(unsigned int D, const long dimensions[D], unsigned long flags, bool inplace, bool backwards) { PTR_ALLOC(struct fft_plan_s, plan); SET_TYPEID(fft_plan_s, plan); complex float* src = md_alloc(D, dimensions, CFL_SIZE); complex float* dst = inplace ? src : md_alloc(D, dimensions, CFL_SIZE); long strides[D]; md_calc_strides(D, strides, dimensions, CFL_SIZE); plan->fftw = fft_fftwf_plan(D, dimensions, flags, strides, dst, strides, src, backwards, true); md_free(src); if (!inplace) md_free(dst); #ifdef USE_CUDA plan->cuplan = NULL; #ifndef LAZY_CUDA if (cuda_ondevice(src)) plan->cuplan = fft_cuda_plan(D, dimensions, flags, strides, strides, backwards); #else plan->D = D; plan->flags = flags; plan->backwards = backwards; PTR_ALLOC(long[D], dims); md_copy_dims(D, *dims, dimensions); plan->dims = *PTR_PASS(dims); PTR_ALLOC(long[D], istrs); md_copy_strides(D, *istrs, strides); plan->istrs = *PTR_PASS(istrs); PTR_ALLOC(long[D], ostrs); md_copy_strides(D, *ostrs, strides); plan->ostrs = *PTR_PASS(ostrs); #endif #endif return operator_create2(D, dimensions, strides, D, dimensions, strides, CAST_UP(PTR_PASS(plan)), fft_apply, fft_free_plan); }
const struct operator_p_s* prox_normaleq_create(const struct linop_s* op, const complex float* y) { PTR_ALLOC(struct prox_normaleq_data, pdata); SET_TYPEID(prox_normaleq_data, pdata); PTR_ALLOC(struct iter_conjgrad_conf, cgconf); *cgconf = iter_conjgrad_defaults; cgconf->maxiter = 10; cgconf->l2lambda = 0; pdata->cgconf = PTR_PASS(cgconf); pdata->op = op; pdata->size = 2 * md_calc_size(linop_domain(op)->N, linop_domain(op)->dims); pdata->adj = md_alloc_sameplace(1, &(pdata->size), FL_SIZE, y); linop_adjoint_iter((struct linop_s*)op, pdata->adj, (const float*)y); return operator_p_create(linop_domain(op)->N, linop_domain(op)->dims, linop_domain(op)->N, linop_domain(op)->dims, CAST_UP(PTR_PASS(pdata)), prox_normaleq_apply, prox_normaleq_del); }
/** * Proximal operator for l1-norm with Wavelet transform: f(x) = lambda || W x ||_1 * * @param N number of dimensions * @param dims dimensions of x * @param flags bitmask for Wavelet transform * @param minsize minimium size of coarse Wavelet scale * @param lambda threshold parameter * @param randshift random shifting */ const struct operator_p_s* prox_wavelet3_thresh_create(unsigned int N, const long dims[N], unsigned int flags, const long minsize[N], float lambda, bool randshift) { PTR_ALLOC(struct wavelet3_thresh_s, data); SET_TYPEID(wavelet3_thresh_s, data); data->N = N; long (*ndims)[N] = TYPE_ALLOC(long[N]); md_copy_dims(N, (*ndims), dims); data->dims = *ndims; long (*nminsize)[N] = TYPE_ALLOC(long[N]); md_copy_dims(N, (*nminsize), minsize); data->minsize = *nminsize; data->flags = flags; data->lambda = lambda; data->randshift = randshift; data->rand_state = 1; return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), wavelet3_thresh_apply, wavelet3_thresh_del); }