/** * Intialize lrthresh data * * @param dims_decom - dimensions with levels at LEVEL_DIMS * @param randshift - randshift boolean * @param mflags - selects which dimensions gets reshaped as the first dimension in matrix * @param blkdims - contains block dimensions for all levels * */ static struct lrthresh_data_s* lrthresh_create_data(const long dims_decom[DIMS], bool randshift, unsigned long mflags, const long blkdims[MAX_LEV][DIMS], float lambda, bool noise, int remove_mean) { PTR_ALLOC(struct lrthresh_data_s, data); SET_TYPEID(lrthresh_data_s, data); data->randshift = randshift; data->mflags = mflags; data->lambda = lambda; data->noise = noise; data->remove_mean = remove_mean; // level dimensions md_copy_dims(DIMS, data->dims_decom, dims_decom); md_calc_strides(DIMS, data->strs_lev, dims_decom, CFL_SIZE); // image dimensions data->levels = dims_decom[LEVEL_DIM]; md_select_dims(DIMS, ~LEVEL_FLAG, data->dims, dims_decom); md_calc_strides(DIMS, data->strs, data->dims, CFL_SIZE); // blkdims for(long l = 0; l < data->levels; l++) { for (long i = 0; i < DIMS; i++) data->blkdims[l][i] = blkdims[l][i]; } return PTR_PASS(data); }
/** * Proximal operator for l1-norm with unitary transform: f(x) = lambda || T x ||_1 * * @param D number of dimensions * @param dim dimensions of x * @param lambda threshold parameter * @param unitary_op unitary linear operator * @param flags bitmask for joint soft-thresholding */ extern const struct operator_p_s* prox_unithresh_create(unsigned int D, const struct linop_s* unitary_op, const float lambda, const unsigned long flags) { PTR_ALLOC(struct thresh_s, data); SET_TYPEID(thresh_s, data); data->lambda = lambda; data->D = D; data->flags = flags; data->unitary_op = unitary_op; const long* dims = linop_domain(unitary_op)->dims; PTR_ALLOC(long[D], ndim); md_copy_dims(D, *ndim, dims); data->dim = *PTR_PASS(ndim); PTR_ALLOC(long[D], nstr); md_calc_strides(D, *nstr, data->dim, CFL_SIZE); data->str = *PTR_PASS(nstr); // norm dimensions are the flagged transform dimensions // FIXME should use linop_codomain(unitary_op)->N PTR_ALLOC(long[D], norm_dim); md_select_dims(D, ~flags, *norm_dim, linop_codomain(unitary_op)->dims); data->norm_dim = *PTR_PASS(norm_dim); return operator_p_create(D, dims, D, dims, CAST_UP(PTR_PASS(data)), unisoftthresh_apply, thresh_del); }
static struct ufft_data* ufft_create_data(const long ksp_dims[DIMS], const long pat_dims[DIMS], const complex float* pat, unsigned int flags, bool use_gpu) { PTR_ALLOC(struct ufft_data, data); SET_TYPEID(ufft_data, data); data->flags = flags; data->use_gpu = use_gpu; md_copy_dims(DIMS, data->pat_dims, pat_dims); md_copy_dims(DIMS, data->ksp_dims, ksp_dims); md_calc_strides(DIMS, data->pat_strs, pat_dims, CFL_SIZE); md_calc_strides(DIMS, data->ksp_strs, ksp_dims, CFL_SIZE); #ifdef USE_CUDA data->pat = (use_gpu ? md_alloc_gpu : md_alloc)(DIMS, data->pat_dims, CFL_SIZE); #else data->pat = md_alloc(DIMS, data->pat_dims, CFL_SIZE); #endif md_copy(DIMS, data->pat_dims, data->pat, pat, CFL_SIZE); data->fft_op = linop_fftc_create(DIMS, ksp_dims, flags); return PTR_PASS(data); }
const struct operator_s* fft_create2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src, bool backwards) { PTR_ALLOC(struct fft_plan_s, plan); SET_TYPEID(fft_plan_s, plan); plan->fftw = fft_fftwf_plan(D, dimensions, flags, ostrides, dst, istrides, src, backwards, false); #ifdef USE_CUDA plan->cuplan = NULL; #ifndef LAZY_CUDA if (cuda_ondevice(src)) plan->cuplan = fft_cuda_plan(D, dimensions, flags, ostrides, istrides, backwards); #else plan->D = D; plan->flags = flags; plan->backwards = backwards; PTR_ALLOC(long[D], dims); md_copy_dims(D, *dims, dimensions); plan->dims = *PTR_PASS(dims); PTR_ALLOC(long[D], istrs); md_copy_strides(D, *istrs, istrides); plan->istrs = *PTR_PASS(istrs); PTR_ALLOC(long[D], ostrs); md_copy_strides(D, *ostrs, ostrides); plan->ostrs = *PTR_PASS(ostrs); #endif #endif return operator_create2(D, dimensions, ostrides, D, dimensions, istrides, CAST_UP(PTR_PASS(plan)), fft_apply, fft_free_plan); }
const struct operator_s* nufft_precond_create(const struct linop_s* nufft_op) { const auto data = CAST_DOWN(nufft_data, linop_get_data(nufft_op)); PTR_ALLOC(struct nufft_precond_data, pdata); SET_TYPEID(nufft_precond_data, pdata); assert(data->conf.toeplitz); int N = data->N; int ND = N + 1; pdata->N = N; pdata->cim_dims = *TYPE_ALLOC(long[ND]); pdata->pre_dims = *TYPE_ALLOC(long[ND]); pdata->cim_strs = *TYPE_ALLOC(long[ND]); pdata->pre_strs = *TYPE_ALLOC(long[ND]); md_copy_dims(ND, pdata->cim_dims, data->cim_dims); md_select_dims(ND, data->flags, pdata->pre_dims, pdata->cim_dims); md_calc_strides(ND, pdata->cim_strs, pdata->cim_dims, CFL_SIZE); md_calc_strides(ND, pdata->pre_strs, pdata->pre_dims, CFL_SIZE); pdata->pre = compute_precond(pdata->N, pdata->pre_dims, pdata->pre_strs, data->psf_dims, data->psf_strs, data->psf, data->linphase); pdata->fft_op = linop_fft_create(pdata->N, pdata->cim_dims, data->flags); const long* cim_dims = pdata->cim_dims; // need to dereference pdata before PTR_PASS return operator_create(N, cim_dims, N, cim_dims, CAST_UP(PTR_PASS(pdata)), nufft_precond_apply, nufft_precond_del); }
const struct linop_s* linop_zfinitediff_create(unsigned int D, const long dims[D], long diffdim, bool circular) { PTR_ALLOC(struct zfinitediff_data, data); SET_TYPEID(zfinitediff_data, data); data->D = D; data->dim_diff = diffdim; data->do_circdiff = circular; data->dims_in = *TYPE_ALLOC(long[D]); data->dims_adj = *TYPE_ALLOC(long[D]); data->strides_in = *TYPE_ALLOC(long[D]); data->strides_adj = *TYPE_ALLOC(long[D]); md_copy_dims(D, data->dims_in, dims); md_copy_dims(D, data->dims_adj, dims); md_calc_strides(D, data->strides_in, data->dims_in, CFL_SIZE); if (!data->do_circdiff) data->dims_adj[data->dim_diff] -= 1; md_calc_strides(D, data->strides_adj, data->dims_adj, CFL_SIZE); const long* dims_adj = data->dims_adj; const long* dims_in = data->dims_in; return linop_create(D, dims_adj, D, dims_in, CAST_UP(PTR_PASS(data)), zfinitediff_apply, zfinitediff_adjoint, zfinitediff_normal, NULL, zfinitediff_del); }
/** * Thresholding operator for l0-norm: f(x) = || x ||_0 <= k, as used in NIHT algorithm. * y = HT(x, k) (hard thresholding, ie keeping the k largest elements). * * @param D number of dimensions * @param dim dimensions of x * @param k threshold parameter (non-zero elements to keep) * @param flags bitmask for joint thresholding */ const struct operator_p_s* prox_niht_thresh_create(unsigned int D, const long dim[D], const unsigned int k, const unsigned long flags) { PTR_ALLOC(struct thresh_s, data); SET_TYPEID(thresh_s, data); data->lambda = 0.; data->k = k; data->D = D; data->flags = flags; data->unitary_op = NULL; PTR_ALLOC(long[D], ndim); md_copy_dims(D, *ndim, dim); data->dim = *PTR_PASS(ndim); // norm dimensions are the flagged input dimensions PTR_ALLOC(long[D], norm_dim); md_select_dims(D, ~flags, *norm_dim, data->dim); data->norm_dim = *PTR_PASS(norm_dim); PTR_ALLOC(long[D], nstr); md_calc_strides(D, *nstr, data->dim, CFL_SIZE); data->str = *PTR_PASS(nstr); return operator_p_create(D, dim, D, dim, CAST_UP(PTR_PASS(data)), hardthresh_apply, thresh_del); }
static struct linop_s* linop_gdiag_create(unsigned int N, const long dims[N], unsigned int flags, const complex float* diag, bool rdiag) { PTR_ALLOC(struct cdiag_s, data); SET_TYPEID(cdiag_s, data); data->rmul = rdiag; data->N = N; PTR_ALLOC(long[N], dims2); PTR_ALLOC(long[N], dstrs); PTR_ALLOC(long[N], strs); long ddims[N]; md_select_dims(N, flags, ddims, dims); md_copy_dims(N, *dims2, dims); md_calc_strides(N, *strs, dims, CFL_SIZE); md_calc_strides(N, *dstrs, ddims, CFL_SIZE); data->dims = *PTR_PASS(dims2); data->strs = *PTR_PASS(strs); data->dstrs = *PTR_PASS(dstrs); data->diag = diag; // make a copy? #ifdef USE_CUDA data->gpu_diag = NULL; #endif return linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), cdiag_apply, cdiag_adjoint, cdiag_normal, NULL, cdiag_free); }
/* * Proximal function for real-value constraint */ const struct operator_p_s* prox_rvc_create(unsigned int N, const long dims[N]) { PTR_ALLOC(struct prox_rvc_data, pdata); SET_TYPEID(prox_rvc_data, pdata); pdata->size = md_calc_size(N, dims); return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(pdata)), prox_rvc_apply, prox_rvc_del); }
static struct linop_s* linop_fft_create_priv(int N, const long dims[N], unsigned int flags, bool forward, bool center) { const struct operator_s* plan = fft_measure_create(N, dims, flags, true, false); const struct operator_s* iplan = fft_measure_create(N, dims, flags, true, true); PTR_ALLOC(struct fft_linop_s, data); SET_TYPEID(fft_linop_s, data); data->frw = plan; data->adj = iplan; data->N = N; data->center = center; data->dims = *TYPE_ALLOC(long[N]); md_copy_dims(N, data->dims, dims); data->strs = *TYPE_ALLOC(long[N]); md_calc_strides(N, data->strs, data->dims, CFL_SIZE); long fft_dims[N]; md_select_dims(N, flags, fft_dims, dims); data->nscale = (float)md_calc_size(N, fft_dims); lop_fun_t apply = forward ? fft_linop_apply : fft_linop_adjoint; lop_fun_t adjoint = forward ? fft_linop_adjoint : fft_linop_apply; struct linop_s* lop = linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), apply, adjoint, fft_linop_normal, NULL, fft_linop_free); if (center) { // FIXME: should only allocate flagged dims complex float* fftmod_mat = md_alloc(N, dims, CFL_SIZE); complex float* fftmodk_mat = md_alloc(N, dims, CFL_SIZE); // we need fftmodk only because we want to apply scaling only once complex float one[1] = { 1. }; md_fill(N, dims, fftmod_mat, one, CFL_SIZE); fftmod(N, dims, flags, fftmodk_mat, fftmod_mat); fftscale(N, dims, flags, fftmod_mat, fftmodk_mat); struct linop_s* mod = linop_cdiag_create(N, dims, ~0u, fftmod_mat); struct linop_s* modk = linop_cdiag_create(N, dims, ~0u, fftmodk_mat); struct linop_s* tmp = linop_chain(mod, lop); tmp = linop_chain(tmp, modk); linop_free(lop); linop_free(mod); linop_free(modk); lop = tmp; } return lop; }
/** * Create an Identity linear operator: I x * @param N number of dimensions * @param dims dimensions of input (domain) */ struct linop_s* linop_identity_create(unsigned int N, const long dims[N]) { PTR_ALLOC(struct identity_data_s, data); SET_TYPEID(identity_data_s, data); data->domain = iovec_create(N, dims, CFL_SIZE); return linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), identity_apply, identity_apply, identity_apply, NULL, identity_free); }
const struct operator_p_s* prox_l2norm_create(unsigned int N, const long dims[N], float lambda) { PTR_ALLOC(struct prox_l2norm_data, pdata); SET_TYPEID(prox_l2norm_data, pdata); pdata->lambda = lambda; pdata->size = md_calc_size(N, dims) * 2; return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(pdata)), prox_l2norm_apply, prox_l2norm_del); }
/** * Convolution operator * * @param N number of dimensions * @param flags bitmask of the dimensions to apply convolution * @param ctype * @param cmode * @param odims output dimensions * @param idims input dimensions * @param kdims kernel dimensions * @param krn convolution kernel */ struct linop_s* linop_conv_create(int N, unsigned int flags, enum conv_type ctype, enum conv_mode cmode, const long odims[N], const long idims[N], const long kdims[N], const complex float* krn) { PTR_ALLOC(struct conv_data_s, data); SET_TYPEID(conv_data_s, data); data->plan = conv_plan(N, flags, ctype, cmode, odims, idims, kdims, krn); return linop_create(N, odims, N, idims, CAST_UP(PTR_PASS(data)), linop_conv_forward, linop_conv_adjoint, NULL, NULL, linop_conv_free); }
static const struct operator_p_s* prox_ineq_create(unsigned int N, const long dims[N], const complex float* b, bool positive) { PTR_ALLOC(struct prox_ineq_data, pdata); SET_TYPEID(prox_ineq_data, pdata); pdata->size = md_calc_size(N, dims) * 2; pdata->b = (const float*)b; pdata->positive = positive; return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(pdata)), prox_ineq_apply, prox_ineq_del); }
const struct operator_p_s* prox_l2ball_create(unsigned int N, const long dims[N], float eps, const complex float* center) { PTR_ALLOC(struct prox_l2ball_data, pdata); SET_TYPEID(prox_l2ball_data, pdata); pdata->center = (const float*)center; pdata->eps = eps; pdata->size = md_calc_size(N, dims) * 2; return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(pdata)), prox_l2ball_apply, prox_l2ball_del); }
const struct operator_p_s* prox_leastsquares_create(unsigned int N, const long dims[N], float lambda, const complex float* y) { PTR_ALLOC(struct prox_leastsquares_data, pdata); SET_TYPEID(prox_leastsquares_data, pdata); pdata->y = (const float*)y; pdata->lambda = lambda; pdata->size = md_calc_size(N, dims) * 2; return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(pdata)), prox_leastsquares_apply, prox_leastsquares_del); }
/** * Proximal operator for l1-norm with Wavelet transform: f(x) = lambda || W x ||_1 * * @param numdims number of dimensions * @param imSize dimensions of x * @param wave_flags bitmask for Wavelet transform * @param minSize minimium size of coarse Wavelet scale * @param lambda threshold parameter * @param randshift apply random shift before Wavelet transforming * @param use_gpu true if using gpu */ const struct operator_p_s* prox_wavethresh_create(int numdims, const long imSize[numdims], unsigned int wave_flags, const long minSize[numdims], float lambda, bool randshift, bool use_gpu) { PTR_ALLOC(struct wave_prox_s, data); SET_TYPEID(wave_prox_s, data); data->plan = prepare_wavelet_plan(numdims, imSize, wave_flags, minSize, use_gpu); data->plan->randshift = randshift; data->plan->lambda = lambda; return operator_p_create(numdims, imSize, numdims, imSize, CAST_UP(PTR_PASS(data)), wavelet_thresh, wavelet_prox_del); }
struct linop_s* linop_realval_create(unsigned int N, const long dims[N]) { PTR_ALLOC(struct rvc_s, data); SET_TYPEID(rvc_s, data); PTR_ALLOC(long[N], dims2); md_copy_dims(N, *dims2, dims); data->N = N; data->dims = *PTR_PASS(dims2); return linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), rvc_apply, rvc_apply, rvc_apply, NULL, rvc_free); }
/** * Wavelet CFD9/7 transform operator * * @param N number of dimensions * @param dims dimensions of input * @param flags bitmask of the dimensions to apply the Fourier transform */ struct linop_s* linop_cdf97_create(int N, const long dims[N], unsigned int flags) { PTR_ALLOC(struct linop_cdf97_s, data); SET_TYPEID(linop_cdf97_s, data); PTR_ALLOC(long[N], ndims); md_copy_dims(N, *ndims, dims); data->N = N; data->dims = *ndims; data->flags = flags; return linop_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), linop_cdf97_apply, linop_cdf97_adjoint, linop_cdf97_normal, NULL, linop_cdf97_free); }
/** * Create a resize linear operator: y = M x, * where M either crops or expands the the input dimensions to match the output dimensions. * Uses centered zero-padding and centered cropping * * @param N number of dimensions * @param out_dims output dimensions * @param in_dims input dimensions */ struct linop_s* linop_resize_create(unsigned int N, const long out_dims[N], const long in_dims[N]) { PTR_ALLOC(struct resize_op_s, data); SET_TYPEID(resize_op_s, data); data->N = N; data->out_dims = *TYPE_ALLOC(long[N]); data->in_dims = *TYPE_ALLOC(long[N]); md_copy_dims(N, (long*)data->out_dims, out_dims); md_copy_dims(N, (long*)data->in_dims, in_dims); return linop_create(N, out_dims, N, in_dims, CAST_UP(PTR_PASS(data)), resize_forward, resize_adjoint, resize_normal, NULL, resize_free); }
/** * Wavelet linear operator * * @param numdims number of dimensions * @param imSize dimensions of x * @param wave_flags bitmask for Wavelet transform * @param minSize minimium size of coarse Wavelet scale * @param randshift apply random shift before Wavelet transforming * @param use_gpu true if using gpu */ const struct linop_s* wavelet_create(int numdims, const long imSize[numdims], unsigned int wave_flags, const long minSize[numdims], bool randshift, bool use_gpu) { PTR_ALLOC(struct wavelet_data_s, data); SET_TYPEID(wavelet_data_s, data); data->plan = prepare_wavelet_plan(numdims, imSize, wave_flags, minSize, use_gpu); data->plan->randshift = randshift; long coeff_dims[numdims]; md_select_dims(numdims, ~wave_flags, coeff_dims, imSize); coeff_dims[0] = data->plan->numCoeff_tr; coeff_dims[1] = 1; coeff_dims[2] = 1; return linop_create(numdims, coeff_dims, numdims, imSize, CAST_UP(PTR_PASS(data)), wavelet_forward, wavelet_inverse, wavelet_normal, NULL, wavelet_del); }
struct linop_s* linop_grad_create(long N, const long dims[N], unsigned int flags) { PTR_ALLOC(struct grad_s, data); SET_TYPEID(grad_s, data); long dims2[N + 1]; grad_dims(N, dims2, flags, dims); data->N = N + 1; data->flags = flags; data->dims = *TYPE_ALLOC(long[N + 1]); md_copy_dims(N + 1, data->dims, dims2); return linop_create(N + 1, dims2, N, dims, CAST_UP(PTR_PASS(data)), grad_op_apply, grad_op_adjoint, grad_op_normal, NULL, grad_op_free); }
struct linop_s* linop_sampling_create(const long dims[DIMS], const long pat_dims[DIMS], const complex float* pattern) { PTR_ALLOC(struct sampling_data_s, data); SET_TYPEID(sampling_data_s, data); md_copy_dims(DIMS, data->pat_dims, pat_dims); md_select_dims(DIMS, ~MAPS_FLAG, data->dims, dims); // dimensions of kspace md_calc_strides(DIMS, data->strs, data->dims, CFL_SIZE); md_calc_strides(DIMS, data->pat_strs, data->pat_dims, CFL_SIZE); data->pattern = (complex float*)pattern; #ifdef USE_CUDA data->gpu_pattern = NULL; #endif const long* dims2 = data->dims; return linop_create(DIMS, dims2, DIMS, dims2, CAST_UP(PTR_PASS(data)), sampling_apply, sampling_apply, sampling_apply, NULL, sampling_free); }
/** * Initialize finite difference operator * * @param D number of dimensions * @param dim input dimensions * @param flags bitmask for applying operator * @param snip true: clear initial entry (i.c.); false: keep initial entry (i.c.) * * Returns a pointer to the finite difference operator */ extern const struct linop_s* linop_finitediff_create(unsigned int D, const long dim[D], const unsigned long flags, bool snip) { PTR_ALLOC(struct fdiff_s, data); SET_TYPEID(fdiff_s, data); data->D = D; data->flags = flags; data->order = 1; data->snip = snip; data->dims = *TYPE_ALLOC(long[D]); md_copy_dims(D, data->dims, dim); data->str = *TYPE_ALLOC(long[D]); md_calc_strides(D, data->str, data->dims, CFL_SIZE); return linop_create(D, dim, D, dim, CAST_UP(PTR_PASS(data)), fdiff_apply, fdiff_apply_adjoint, NULL, cumsum_apply, finite_diff_del); }
const struct linop_s* linop_fmac_create(unsigned int N, const long dims[N], unsigned int oflags, unsigned int iflags, unsigned int tflags, const complex float* tensor) { PTR_ALLOC(struct fmac_data, data); SET_TYPEID(fmac_data, data); data->N = N; data->dims = *TYPE_ALLOC(long[N]); md_copy_dims(N, data->dims, dims); data->idims = *TYPE_ALLOC(long[N]); data->istrs = *TYPE_ALLOC(long[N]); md_select_dims(N, ~iflags, data->idims, dims); md_calc_strides(N, data->istrs, data->idims, CFL_SIZE); data->odims = *TYPE_ALLOC(long[N]); data->ostrs = *TYPE_ALLOC(long[N]); md_select_dims(N, ~oflags, data->odims, dims); md_calc_strides(N, data->ostrs, data->odims, CFL_SIZE); data->tstrs = *TYPE_ALLOC(long[N]); data->tdims = *TYPE_ALLOC(long[N]); md_select_dims(N, ~tflags, data->tdims, dims); md_calc_strides(N, data->tstrs, data->tdims, CFL_SIZE); data->tensor = tensor; #ifdef USE_CUDA data->gpu_tensor = NULL; #endif long odims[N]; md_copy_dims(N, odims, data->odims); long idims[N]; md_copy_dims(N, idims, data->idims); return linop_create(N, odims, N, idims, CAST_UP(PTR_PASS(data)), fmac_apply, fmac_adjoint, NULL, NULL, fmac_free_data); }
const struct operator_s* fft_measure_create(unsigned int D, const long dimensions[D], unsigned long flags, bool inplace, bool backwards) { PTR_ALLOC(struct fft_plan_s, plan); SET_TYPEID(fft_plan_s, plan); complex float* src = md_alloc(D, dimensions, CFL_SIZE); complex float* dst = inplace ? src : md_alloc(D, dimensions, CFL_SIZE); long strides[D]; md_calc_strides(D, strides, dimensions, CFL_SIZE); plan->fftw = fft_fftwf_plan(D, dimensions, flags, strides, dst, strides, src, backwards, true); md_free(src); if (!inplace) md_free(dst); #ifdef USE_CUDA plan->cuplan = NULL; #ifndef LAZY_CUDA if (cuda_ondevice(src)) plan->cuplan = fft_cuda_plan(D, dimensions, flags, strides, strides, backwards); #else plan->D = D; plan->flags = flags; plan->backwards = backwards; PTR_ALLOC(long[D], dims); md_copy_dims(D, *dims, dimensions); plan->dims = *PTR_PASS(dims); PTR_ALLOC(long[D], istrs); md_copy_strides(D, *istrs, strides); plan->istrs = *PTR_PASS(istrs); PTR_ALLOC(long[D], ostrs); md_copy_strides(D, *ostrs, strides); plan->ostrs = *PTR_PASS(ostrs); #endif #endif return operator_create2(D, dimensions, strides, D, dimensions, strides, CAST_UP(PTR_PASS(plan)), fft_apply, fft_free_plan); }
const struct operator_p_s* prox_normaleq_create(const struct linop_s* op, const complex float* y) { PTR_ALLOC(struct prox_normaleq_data, pdata); SET_TYPEID(prox_normaleq_data, pdata); PTR_ALLOC(struct iter_conjgrad_conf, cgconf); *cgconf = iter_conjgrad_defaults; cgconf->maxiter = 10; cgconf->l2lambda = 0; pdata->cgconf = PTR_PASS(cgconf); pdata->op = op; pdata->size = 2 * md_calc_size(linop_domain(op)->N, linop_domain(op)->dims); pdata->adj = md_alloc_sameplace(1, &(pdata->size), FL_SIZE, y); linop_adjoint_iter((struct linop_s*)op, pdata->adj, (const float*)y); return operator_p_create(linop_domain(op)->N, linop_domain(op)->dims, linop_domain(op)->N, linop_domain(op)->dims, CAST_UP(PTR_PASS(pdata)), prox_normaleq_apply, prox_normaleq_del); }
/** * Proximal operator for l1-norm with Wavelet transform: f(x) = lambda || W x ||_1 * * @param N number of dimensions * @param dims dimensions of x * @param flags bitmask for Wavelet transform * @param minsize minimium size of coarse Wavelet scale * @param lambda threshold parameter * @param randshift random shifting */ const struct operator_p_s* prox_wavelet3_thresh_create(unsigned int N, const long dims[N], unsigned int flags, const long minsize[N], float lambda, bool randshift) { PTR_ALLOC(struct wavelet3_thresh_s, data); SET_TYPEID(wavelet3_thresh_s, data); data->N = N; long (*ndims)[N] = TYPE_ALLOC(long[N]); md_copy_dims(N, (*ndims), dims); data->dims = *ndims; long (*nminsize)[N] = TYPE_ALLOC(long[N]); md_copy_dims(N, (*nminsize), minsize); data->minsize = *nminsize; data->flags = flags; data->lambda = lambda; data->randshift = randshift; data->rand_state = 1; return operator_p_create(N, dims, N, dims, CAST_UP(PTR_PASS(data)), wavelet3_thresh_apply, wavelet3_thresh_del); }
/** * Operator interface for a true matrix: * out = mat * in * in: [x x x x 1 x x K x x] * mat: [x x x x T x x K x x] * out: [x x x x T x x 1 x x] * where the x's are arbitrary dimensions and T and K may be transposed * * use this interface if K == 1 or T == 1 * * @param N number of dimensions * @param out_dims output dimensions after applying the matrix (codomain) * @param in_dims input dimensions to apply the matrix (domain) * @param T_dim dimension corresponding to the rows of A * @param K_dim dimension corresponding to the columns of A * @param matrix matrix data */ struct linop_s* linop_matrix_altcreate(unsigned int N, const long out_dims[N], const long in_dims[N], const unsigned int T_dim, const unsigned int K_dim, const complex float* matrix) { long matrix_dims[N]; md_singleton_dims(N, matrix_dims); matrix_dims[K_dim] = in_dims[K_dim]; matrix_dims[T_dim] = out_dims[T_dim]; unsigned int T = out_dims[T_dim]; unsigned int K = in_dims[K_dim]; PTR_ALLOC(long[N], max_dims); for (unsigned int i = 0; i < N; i++) { if ((in_dims[i] > 1) && (out_dims[i] == 1)) { (*max_dims)[i] = in_dims[i]; } else if ((in_dims[i] == 1) && (out_dims[i] > 1)) { (*max_dims)[i] = out_dims[i]; } else { assert(in_dims[i] == out_dims[i]); (*max_dims)[i] = in_dims[i]; } } complex float* mat = md_alloc_sameplace(N, matrix_dims, CFL_SIZE, matrix); complex float* matc = md_alloc_sameplace(N, matrix_dims, CFL_SIZE, matrix); md_copy(N, matrix_dims, mat, matrix, CFL_SIZE); md_zconj(N, matrix_dims, matc, mat); complex float* gram = NULL; const struct iovec_s* gram_iovec = compute_gram_matrix(N, T_dim, T, K_dim, K, &gram, matrix_dims, matrix); PTR_ALLOC(struct operator_matrix_s, data); SET_TYPEID(operator_matrix_s, data); data->mat_iovec = iovec_create(N, matrix_dims, CFL_SIZE); data->mat_gram_iovec = gram_iovec; data->max_dims = *max_dims; data->mat = mat; data->mat_conj = matc; data->mat_gram = gram; data->K_dim = K_dim; data->T_dim = T_dim; data->K = K; data->T = T; data->domain_iovec = iovec_create(N, in_dims, CFL_SIZE); data->codomain_iovec = iovec_create(N, out_dims, CFL_SIZE); return linop_create(N, out_dims, N, in_dims, CAST_UP(PTR_PASS(data)), linop_matrix_apply, linop_matrix_apply_adjoint, linop_matrix_apply_normal, NULL, linop_matrix_del); }
/* O I M G * 1 1 1 1 - not used * 1 1 A ! - forbidden * 1 A 1 ! - forbidden * A 1 1 ! - forbidden * A A 1 1 - replicated * A 1 A 1 - output * 1 A A A/A - input * A A A A - batch */ static struct operator_matrix_s* linop_matrix_priv2(unsigned int N, const long out_dims[N], const long in_dims[N], const long matrix_dims[N], const complex float* matrix) { // to get assertions and cost estimate long max_dims[N]; md_tenmul_dims(N, max_dims, out_dims, in_dims, matrix_dims); PTR_ALLOC(struct operator_matrix_s, data); SET_TYPEID(operator_matrix_s, data); data->N = N; PTR_ALLOC(long[N], out_dims1); md_copy_dims(N, *out_dims1, out_dims); data->out_dims = *PTR_PASS(out_dims1); PTR_ALLOC(long[N], mat_dims1); md_copy_dims(N, *mat_dims1, matrix_dims); data->mat_dims = *PTR_PASS(mat_dims1); PTR_ALLOC(long[N], in_dims1); md_copy_dims(N, *in_dims1, in_dims); data->in_dims = *PTR_PASS(in_dims1); complex float* mat = md_alloc(N, matrix_dims, CFL_SIZE); md_copy(N, matrix_dims, mat, matrix, CFL_SIZE); data->mat = mat; data->mat_gram = NULL; #ifdef USE_CUDA data->mat_gpu = NULL; data->mat_gram_gpu = NULL; #endif #if 1 // pre-multiply gram matrix (if there is a cost reduction) unsigned long out_flags = md_nontriv_dims(N, out_dims); unsigned long in_flags = md_nontriv_dims(N, in_dims); unsigned long del_flags = in_flags & ~out_flags; unsigned long new_flags = out_flags & ~in_flags; /* we double (again) for the gram matrix */ PTR_ALLOC(long[2 * N], mat_dims2); PTR_ALLOC(long[2 * N], in_dims2); PTR_ALLOC(long[2 * N], gmt_dims2); PTR_ALLOC(long[2 * N], gin_dims2); PTR_ALLOC(long[2 * N], grm_dims2); PTR_ALLOC(long[2 * N], gout_dims2); shadow_dims(N, *gmt_dims2, matrix_dims); shadow_dims(N, *mat_dims2, matrix_dims); shadow_dims(N, *in_dims2, in_dims); shadow_dims(N, *gout_dims2, in_dims); shadow_dims(N, *gin_dims2, in_dims); shadow_dims(N, *grm_dims2, matrix_dims); /* move removed input dims into shadow position * for the gram matrix can have an output there */ for (unsigned int i = 0; i < N; i++) { if (MD_IS_SET(del_flags, i)) { assert((*mat_dims2)[2 * i + 0] == (*in_dims2)[2 * i + 0]); (*mat_dims2)[2 * i + 1] = (*mat_dims2)[2 * i + 0]; (*mat_dims2)[2 * i + 0] = 1; (*in_dims2)[2 * i + 1] = (*gin_dims2)[2 * i + 0]; (*in_dims2)[2 * i + 0] = 1; } } for (unsigned int i = 0; i < N; i++) { if (MD_IS_SET(new_flags, i)) { (*grm_dims2)[2 * i + 0] = 1; (*grm_dims2)[2 * i + 1] = 1; } if (MD_IS_SET(del_flags, i)) { (*gout_dims2)[2 * i + 1] = (*gin_dims2)[2 * i + 0]; (*gout_dims2)[2 * i + 0] = 1; (*grm_dims2)[2 * i + 0] = in_dims[i]; (*grm_dims2)[2 * i + 1] = in_dims[i]; } } long gmx_dims[2 * N]; md_tenmul_dims(2 * N, gmx_dims, *gout_dims2, *gin_dims2, *grm_dims2); long mult_mat = md_calc_size(N, max_dims); long mult_gram = md_calc_size(2 * N, gmx_dims); if (mult_gram < 2 * mult_mat) { // FIXME: rethink debug_printf(DP_DEBUG2, "Gram matrix: 2x %ld vs %ld\n", mult_mat, mult_gram); complex float* mat_gram = md_alloc(2 * N, *grm_dims2, CFL_SIZE); md_ztenmulc(2 * N, *grm_dims2, mat_gram, *gmt_dims2, matrix, *mat_dims2, matrix); data->mat_gram = mat_gram; } PTR_FREE(gmt_dims2); PTR_FREE(mat_dims2); PTR_FREE(in_dims2); data->gin_dims = *PTR_PASS(gin_dims2); data->gout_dims = *PTR_PASS(gout_dims2); data->grm_dims = *PTR_PASS(grm_dims2); #else data->gin_dims = NULL; data->gout_dims = NULL; data->grm_dims = NULL; #endif return PTR_PASS(data); }