// Forward: from image to kspace
static void nufft_apply(const void* _data, complex float* dst, const complex float* src)
{
	struct nufft_data* data = (struct nufft_data*)_data;

	assert(!data->conf.toeplitz); // if toeplitz linphase has no roll, so would need to be added

	unsigned int ND = data->N + 3;

	md_zmul2(ND, data->cml_dims, data->cml_strs, data->grid, data->cim_strs, src, data->lph_strs, data->linphase);
	linop_forward(data->fft_op, ND, data->cml_dims, data->grid, ND, data->cml_dims, data->grid);
	md_zmul2(ND, data->cml_dims, data->cml_strs, data->grid, data->cml_strs, data->grid, data->img_strs, data->fftmod);

	md_clear(ND, data->ksp_dims, dst, CFL_SIZE);

	complex float* gridX = md_alloc(data->N, data->cm2_dims, CFL_SIZE);

	long factors[data->N];

	for (unsigned int i = 0; i < data->N; i++)
		factors[i] = ((data->img_dims[i] > 1) && (i < 3)) ? 2 : 1;

	md_recompose(data->N, factors, data->cm2_dims, gridX, data->cml_dims, data->grid, CFL_SIZE);

	grid2H(2., data->width, data->beta, ND, data->trj_dims, data->traj, data->ksp_dims, dst, data->cm2_dims, gridX);

	md_free(gridX);

	if (NULL != data->weights)
		md_zmul2(data->N, data->ksp_dims, data->ksp_strs, dst, data->ksp_strs, dst, data->wgh_strs, data->weights);
}
static void toeplitz_mult(const struct nufft_data* data, complex float* dst, const complex float* src)
{
	unsigned int ND = data->N + 3;

	md_zmul2(ND, data->cml_dims, data->cml_strs, data->grid, data->cim_strs, src, data->lph_strs, data->linphase);

	linop_forward(data->fft_op, ND, data->cml_dims, data->grid, ND, data->cml_dims, data->grid);
	md_zmul2(ND, data->cml_dims, data->cml_strs, data->grid, data->cml_strs, data->grid, data->psf_strs, data->psf);
	linop_adjoint(data->fft_op, ND, data->cml_dims, data->grid, ND, data->cml_dims, data->grid);

	md_clear(ND, data->cim_dims, dst, CFL_SIZE);
	md_zfmacc2(ND, data->cml_dims, data->cim_strs, dst, data->cml_strs, data->grid, data->lph_strs, data->linphase);
}
Exemple #3
0
static void homodyne(struct wdata wdata, unsigned int flags, unsigned int N, const long dims[N],
		const long strs[N], complex float* data, const complex float* idata)
{
	long cdims[N];
	md_copy_dims(N, cdims, dims);
	// cdims[0] = cdims[1] = cdims[2] = 24;
	cdims[wdata.pfdim] = (wdata.frac - 0.5) * dims[wdata.pfdim];

	complex float* center = md_alloc(N, cdims, CFL_SIZE);
	complex float* phase = md_alloc(N, dims, CFL_SIZE);

	md_resize_center(N, cdims, center, dims, idata, CFL_SIZE);
	md_resize_center(N, dims, phase, cdims, center, CFL_SIZE);
	md_free(center);

	ifftuc(N, dims, flags, phase, phase);
	md_zphsr(N, dims, phase, phase);

	md_zmul2(N, dims, strs, data, strs, idata, wdata.wstrs, wdata.weights);

	ifftuc(N, dims, flags, data, data);

	md_zmulc(N, dims, data, data, phase);
	md_zreal(N, dims, data, data);

	md_free(phase);
}
Exemple #4
0
/**
 * Undersampled FFT adjoint operator
 */
void ufft_apply_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src)
{
        const struct ufft_data* data = CONTAINER_OF(_data, const struct ufft_data, base);

	md_zmul2(DIMS, data->ksp_dims, data->ksp_strs, dst, data->ksp_strs, src, data->pat_strs, data->pat);

	linop_adjoint(data->fft_op, DIMS, data->ksp_dims, dst, DIMS, data->ksp_dims, dst);
}
Exemple #5
0
/**
 * Undersampled FFT adjoint operator
 */
void ufft_apply_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src)
{
        struct ufft_data* data = CAST_DOWN(ufft_data, _data);

	md_zmul2(DIMS, data->ksp_dims, data->ksp_strs, dst, data->ksp_strs, src, data->pat_strs, data->pat);

	linop_adjoint(data->fft_op, DIMS, data->ksp_dims, dst, DIMS, data->ksp_dims, dst);
}
Exemple #6
0
static void homodyne(struct wdata wdata, unsigned int flags, unsigned int N, const long dims[N],
		const long strs[N], complex float* data, const complex float* idata,
		const long pstrs[N], const complex float* phase)
{
	md_zmul2(N, dims, strs, data, strs, idata, wdata.wstrs, wdata.weights);
	ifftuc(N, dims, flags, data, data);

	md_zmulc2(N, dims, strs, data, strs, data, pstrs, phase);
	md_zreal(N, dims, data, data);
}
Exemple #7
0
static void sampling_apply(const linop_data_t* _data, complex float* dst, const complex float* src)
{
	const auto data = CAST_DOWN(sampling_data_s, _data);

#ifdef USE_CUDA
	const complex float* pattern = get_pat(data, cuda_ondevice(src));
#else
	const complex float* pattern = data->pattern;
#endif

	md_zmul2(DIMS, data->dims, data->strs, dst, data->strs, src, data->pat_strs, pattern);
}
static void sense_forward(const void* _data, complex float* out, const complex float* imgs)
{
	const struct sense_data* data = _data;

	md_clear(DIMS, data->data_dims, out, CFL_SIZE);
	md_zfmac2(DIMS, data->sens_dims, data->data_strs, out, data->sens_strs, data->sens, data->imgs_strs, imgs); 

	fftc(DIMS, data->data_dims, FFT_FLAGS, out, out);
	fftscale(DIMS, data->data_dims, FFT_FLAGS, out, out);

	md_zmul2(DIMS, data->data_dims, data->data_strs, out, data->data_strs, out, data->mask_strs, data->pattern);
}
Exemple #9
0
void noir_fun(struct noir_data* data, complex float* dst, const complex float* src)
{	
	long split = md_calc_size(DIMS, data->imgs_dims);

	md_copy(DIMS, data->imgs_dims, data->xn, src, CFL_SIZE);
	noir_forw_coils(data, data->sens, src + split);

	md_clear(DIMS, data->sign_dims, data->tmp, CFL_SIZE);
	md_zfmac2(DIMS, data->sign_dims, data->sign_strs, data->tmp, data->imgs_strs, src, data->coil_strs, data->sens);

	// could be moved to the benning, but see comment below
	md_zmul2(DIMS, data->sign_dims, data->sign_strs, data->tmp, data->sign_strs, data->tmp, data->mask_strs, data->mask);

	fft(DIMS, data->sign_dims, FFT_FLAGS, data->tmp, data->tmp);

	md_clear(DIMS, data->data_dims, dst, CFL_SIZE);
	md_zfmac2(DIMS, data->sign_dims, data->data_strs, dst, data->sign_strs, data->tmp, data->ptrn_strs, data->pattern);
}
Exemple #10
0
void data_consistency(const long dims[DIMS], complex float* dst, const complex float* pattern, const complex float* kspace1, const complex float* kspace2)
{
	assert(1 == dims[MAPS_DIM]);

	long strs[DIMS];
	long dims1[DIMS];
	long strs1[DIMS];

	md_select_dims(DIMS, ~COIL_FLAG, dims1, dims);
	md_calc_strides(DIMS, strs1, dims1, CFL_SIZE);
	md_calc_strides(DIMS, strs, dims, CFL_SIZE);

	complex float* tmp = md_alloc_sameplace(DIMS, dims, CFL_SIZE, dst);
	md_zmul2(DIMS, dims, strs, tmp, strs, kspace2, strs1, pattern);
	md_zsub(DIMS, dims, tmp, kspace2, tmp);
	md_zfmac2(DIMS, dims, strs, tmp, strs, kspace1, strs1, pattern);
	md_copy(DIMS, dims, dst, tmp, CFL_SIZE);
	md_free(tmp);
}
// Adjoint: from kspace to image
static void nufft_apply_adjoint(const void* _data, complex float* dst, const complex float* src)
{
	const struct nufft_data* data = _data;

	unsigned int ND = data->N + 3;

	complex float* gridX = md_alloc(data->N, data->cm2_dims, CFL_SIZE);
	md_clear(data->N, data->cm2_dims, gridX, CFL_SIZE);

	complex float* wdat = NULL;

	if (NULL != data->weights) {

		wdat = md_alloc(data->N, data->ksp_dims, CFL_SIZE);
		md_zmulc2(data->N, data->ksp_dims, data->ksp_strs, wdat, data->ksp_strs, src, data->wgh_strs, data->weights);
		src = wdat;
	}

	grid2(2., data->width, data->beta, ND, data->trj_dims, data->traj, data->cm2_dims, gridX, data->ksp_dims, src);

	md_free(wdat);

	long factors[data->N];

	for (unsigned int i = 0; i < data->N; i++)
		factors[i] = ((data->img_dims[i] > 1) && (i < 3)) ? 2 : 1;

	md_decompose(data->N, factors, data->cml_dims, data->grid, data->cm2_dims, gridX, CFL_SIZE);
	md_free(gridX);
	md_zmulc2(ND, data->cml_dims, data->cml_strs, data->grid, data->cml_strs, data->grid, data->img_strs, data->fftmod);
	linop_adjoint(data->fft_op, ND, data->cml_dims, data->grid, ND, data->cml_dims, data->grid);

	md_clear(ND, data->cim_dims, dst, CFL_SIZE);
	md_zfmacc2(ND, data->cml_dims, data->cim_strs, dst, data->cml_strs, data->grid, data->lph_strs, data->linphase);

	if (data->conf.toeplitz)
		md_zmul2(ND, data->cim_dims, data->cim_strs, dst, data->cim_strs, dst, data->img_strs, data->roll);
}
Exemple #12
0
void overlapandsave2NE(int N, unsigned int flags, const long blk[N], const long odims[N], complex float* dst, const long dims1[N], complex float* src1, const long dims2[N], complex float* src2, const long mdims[N], complex float* msk)
{
	long dims1B[N];

	long tdims[2 * N];
	long nodims[2 * N];
	long ndims1[2 * N];
	long ndims2[2 * N];

	long shift[2 * N];

	unsigned int nflags = 0;

	for (int i = 0; i < N; i++) {

		if (MD_IS_SET(flags, i)) {

			nflags = MD_SET(nflags, 2 * i);

			assert(1 == dims2[i] % 2);
			assert(0 == blk[i] % 2);
			assert(0 == dims1[i] % 2);
			assert(0 == odims[i] % blk[i]);
			assert(0 == dims1[i] % blk[i]);
			assert(dims1[i] == odims[i]);
			assert(dims2[i] <= blk[i]);
			assert(dims1[i] >= dims2[i]);

			// blocked output

			nodims[i * 2 + 1] = odims[i] / blk[i];
			nodims[i * 2 + 0] = blk[i];

			// expanded temporary storage

			tdims[i * 2 + 1] = dims1[i] / blk[i];
			tdims[i * 2 + 0] = blk[i] + dims2[i] - 1;

			// blocked input

			// ---|---,---,---|---
			//   + +++ +
			//       + +++ +

			// resized input

			dims1B[i] = dims1[i] + 2 * blk[i];

			ndims1[i * 2 + 1] = dims1[i] / blk[i] + 2; // do we need two full blocks?
			ndims1[i * 2 + 0] = blk[i];

			shift[i * 2 + 1] = 0;
			shift[i * 2 + 0] = blk[i] - (dims2[i] - 1) / 2;

			// kernel

			ndims2[i * 2 + 1] = 1;
			ndims2[i * 2 + 0] = dims2[i];

		} else {

			nodims[i * 2 + 1] = 1;
			nodims[i * 2 + 0] = odims[i];

			tdims[i * 2 + 1] = 1;
			tdims[i * 2 + 0] = dims1[i];

			ndims1[i * 2 + 1] = 1;
			ndims1[i * 2 + 0] = dims1[i];

			shift[i * 2 + 1] = 0;
			shift[i * 2 + 0] = 0;


			dims1B[i] = dims1[i];

			ndims2[i * 2 + 1] = 1;
			ndims2[i * 2 + 0] = dims2[i];
		}
	}

	complex float* src1B = md_alloc(N, dims1B, CFL_SIZE);
	complex float* tmp = md_alloc(2 * N, tdims, CFL_SIZE);
	complex float* tmpX = md_alloc(N, odims, CFL_SIZE);

	long str1[2 * N];
	long str2[2 * N];

	md_calc_strides(2 * N, str1, ndims1, sizeof(complex float));
	md_calc_strides(2 * N, str2, tdims, sizeof(complex float));

	long off = md_calc_offset(2 * N, str1, shift);

	md_resize_center(N, dims1B, src1B, dims1, src1, sizeof(complex float));

	// we can loop here

	md_copy2(2 * N, tdims, str2, tmp, str1, ((void*)src1B) + off, sizeof(complex float));

	conv(2 * N, nflags, CONV_VALID, CONV_SYMMETRIC, nodims, tmpX, tdims, tmp, ndims2, src2);

	long ostr[N];
	long mstr[N];

	md_calc_strides(N, ostr, odims, sizeof(complex float));
	md_calc_strides(N, mstr, mdims, sizeof(complex float));

	md_zmul2(N, odims, ostr, tmpX, ostr, tmpX, mstr, msk);

	convH(2 * N, nflags, CONV_VALID, CONV_SYMMETRIC, tdims, tmp, nodims, tmpX, ndims2, src2);

	md_clear(N, dims1B, src1B, sizeof(complex float));
	md_zadd2(2 * N, tdims, str1, ((void*)src1B) + off, str1, ((void*)src1B) + off, str2, tmp);

	//

	md_resize_center(N, dims1, dst, dims1B, src1B, sizeof(complex float));

	md_free(src1B);
	md_free(tmpX);
	md_free(tmp);
}
/**
 *
 * NUFFT operator initialization
 *
 * @param N		-	number of dimensions
 * @param ksp_dims      -	kspace dimension
 * @param cim_dims	-	coil images dimension
 * @param traj		-	trajectory
 * @param conf          -	configuration options
 * @param use_gpu       -	use gpu boolean
 *
 */
struct linop_s* nufft_create(unsigned int N, const long ksp_dims[N], const long cim_dims[N], const long traj_dims[N], const complex float* traj, const complex float* weights, struct nufft_conf_s conf, bool use_gpu)

{
	struct nufft_data* data = (struct nufft_data*)xmalloc(sizeof(struct nufft_data));

	data->N = N;
	data->use_gpu = use_gpu;
	data->traj = traj;
	data->conf = conf;

	data->width = 3.;
	data->beta = calc_beta(2., data->width);

	// get dims

	assert(md_check_compat(N - 3, 0, ksp_dims + 3, cim_dims + 3));

	unsigned int ND = N + 3;

	data->ksp_dims = xmalloc(ND * sizeof(long));
	data->cim_dims = xmalloc(ND * sizeof(long));
	data->cml_dims = xmalloc(ND * sizeof(long));
	data->img_dims = xmalloc(ND * sizeof(long));
	data->trj_dims = xmalloc(ND * sizeof(long));
	data->lph_dims = xmalloc(ND * sizeof(long));
	data->psf_dims = xmalloc(ND * sizeof(long));
	data->wgh_dims = xmalloc(ND * sizeof(long));

	data->ksp_strs = xmalloc(ND * sizeof(long));
	data->cim_strs = xmalloc(ND * sizeof(long));
	data->cml_strs = xmalloc(ND * sizeof(long));
	data->img_strs = xmalloc(ND * sizeof(long));
	data->trj_strs = xmalloc(ND * sizeof(long));
	data->lph_strs = xmalloc(ND * sizeof(long));
	data->psf_strs = xmalloc(ND * sizeof(long));
	data->wgh_strs = xmalloc(ND * sizeof(long));

	md_singleton_dims(ND, data->cim_dims);
	md_singleton_dims(ND, data->ksp_dims);

	md_copy_dims(N, data->cim_dims, cim_dims);
	md_copy_dims(N, data->ksp_dims, ksp_dims);


	md_select_dims(ND, FFT_FLAGS, data->img_dims, data->cim_dims);

	assert(3 == traj_dims[0]);
	assert(traj_dims[1] == ksp_dims[1]);
	assert(traj_dims[2] == ksp_dims[2]);
	assert(md_check_compat(N - 3, ~0, traj_dims + 3, ksp_dims + 3));
	assert(md_check_bounds(N - 3, ~0, traj_dims + 3, ksp_dims + 3));

	md_singleton_dims(ND, data->trj_dims);
	md_copy_dims(N, data->trj_dims, traj_dims);


	// get strides

	md_calc_strides(ND, data->cim_strs, data->cim_dims, CFL_SIZE);
	md_calc_strides(ND, data->img_strs, data->img_dims, CFL_SIZE);
	md_calc_strides(ND, data->trj_strs, data->trj_dims, CFL_SIZE);
	md_calc_strides(ND, data->ksp_strs, data->ksp_dims, CFL_SIZE);


	data->weights = NULL;

	if (NULL != weights) {

		md_singleton_dims(ND, data->wgh_dims);
		md_select_dims(N, ~MD_BIT(0), data->wgh_dims, data->trj_dims);
		md_calc_strides(ND, data->wgh_strs, data->wgh_dims, CFL_SIZE);

		complex float* tmp = md_alloc(ND, data->wgh_dims, CFL_SIZE);
		md_copy(ND, data->wgh_dims, tmp, weights, CFL_SIZE);
		data->weights = tmp;
	}


	complex float* roll = md_alloc(ND, data->img_dims, CFL_SIZE);
	rolloff_correction(2., data->width, data->beta, data->img_dims, roll);
	data->roll = roll;


	complex float* linphase = compute_linphases(N, data->lph_dims, data->img_dims);

	md_calc_strides(ND, data->lph_strs, data->lph_dims, CFL_SIZE);

	if (!conf.toeplitz)
		md_zmul2(ND, data->lph_dims, data->lph_strs, linphase, data->lph_strs, linphase, data->img_strs, data->roll);


	fftmod(ND, data->lph_dims, FFT_FLAGS, linphase, linphase);
	fftscale(ND, data->lph_dims, FFT_FLAGS, linphase, linphase);
//	md_zsmul(ND, data->lph_dims, linphase, linphase, 1. / (float)(data->trj_dims[1] * data->trj_dims[2]));

	complex float* fftm = md_alloc(ND, data->img_dims, CFL_SIZE);
	md_zfill(ND, data->img_dims, fftm, 1.);
	fftmod(ND, data->img_dims, FFT_FLAGS, fftm, fftm);
	data->fftmod = fftm;



	data->linphase = linphase;
	data->psf = NULL;

	if (conf.toeplitz) {

#if 0
		md_copy_dims(ND, data->psf_dims, data->lph_dims);
#else
		md_copy_dims(3, data->psf_dims, data->lph_dims);
		md_copy_dims(ND - 3, data->psf_dims + 3, data->trj_dims + 3);
		data->psf_dims[N] = data->lph_dims[N];
#endif
		md_calc_strides(ND, data->psf_strs, data->psf_dims, CFL_SIZE);
		data->psf = compute_psf2(N, data->psf_dims, data->trj_dims, data->traj, data->weights);
	}


	md_copy_dims(ND, data->cml_dims, data->cim_dims);
	data->cml_dims[N + 0] = data->lph_dims[N + 0];

	md_calc_strides(ND, data->cml_strs, data->cml_dims, CFL_SIZE);


	data->cm2_dims = xmalloc(ND * sizeof(long));
	// !
	md_copy_dims(ND, data->cm2_dims, data->cim_dims);
	for (int i = 0; i < 3; i++)
		data->cm2_dims[i] = (1 == cim_dims[i]) ? 1 : (2 * cim_dims[i]);



	data->grid = md_alloc(ND, data->cml_dims, CFL_SIZE);

	data->fft_op = linop_fft_create(ND, data->cml_dims, FFT_FLAGS, use_gpu);



	return linop_create(N, ksp_dims, N, cim_dims,
		data, nufft_apply, nufft_apply_adjoint, nufft_apply_normal, NULL, nufft_free_data);
}
Exemple #14
0
/**
 * Default transfer function. dst = src .* pattern
 *
 * @param _data transfer function data
 * @param pattern sampling pattern
 * @param dst destination pointer
 * @param src source pointer
 */
void transfer_function(void* _data, const complex float* pattern, complex float* dst, const complex float* src)
{
	struct transfer_data_s* data = _data;
	md_zmul2(DIMS, data->dims, data->strs, dst, data->strs, src, data->strs_tf, pattern);
}
Exemple #15
0
static void sampling_apply(const void* _data, complex float* dst, const complex float* src)
{
	const struct sampling_data_s* data = _data;
	md_zmul2(DIMS, data->dims, data->strs, dst, data->strs, src, data->pat_strs, data->pattern);
}
Exemple #16
0
void overlapandsave2HB(const struct vec_ops* ops, int N, unsigned int flags, const long blk[N], const long dims1[N], complex float* dst, const long odims[N], const complex float* src1, const long dims2[N], const complex float* src2, const long mdims[N], const complex float* msk)
{
	long dims1B[N];

	long tdims[2 * N];
	long nodims[2 * N];
	long ndims2[2 * N];
	long nmdims[2 * N];


	int e = N;

	for (int i = 0; i < N; i++) {

		if (MD_IS_SET(flags, i)) {

			assert(1 == dims2[i] % 2);
			assert(0 == blk[i] % 2);
			assert(0 == dims1[i] % 2);
			assert(0 == odims[i] % blk[i]);
			assert(0 == dims1[i] % blk[i]);
			assert(dims1[i] == odims[i]);
			assert(dims2[i] <= blk[i]);
			assert(dims1[i] >= dims2[i]);
			assert((1 == mdims[i]) || (mdims[i] == dims1[i]));

			// blocked output

			nodims[e] = odims[i] / blk[i];
			nodims[i] = blk[i];

			// expanded temporary storage

			tdims[e] = dims1[i] / blk[i];
			tdims[i] = blk[i] + dims2[i] - 1;

			// blocked input

			// ---|---,---,---|---
			//   + +++ +
			//       + +++ +

			if (1 == mdims[i]) {

				nmdims[2 * i + 1] = 1;
				nmdims[2 * i + 1] = 1;

			} else {

				nmdims[2 * i + 1] = mdims[i] / blk[i];
				nmdims[2 * i + 0] = blk[i];
			}

			// resized input
			// minimal padding
			dims1B[i] = dims1[i] + (dims2[i] - 1);

			// kernel

			ndims2[e] = 1;
			ndims2[i] = dims2[i];

			e++;

		} else {

			nodims[i] = odims[i];
			tdims[i] = dims1[i];
			nmdims[2 * i + 1] = 1;
			nmdims[2 * i + 0] = mdims[i];

			dims1B[i] = dims1[i];
			ndims2[i] = dims2[i];
		}
	}

	int NE = e;

	// long S = md_calc_size(N, dims1B, 1);

	long str1[NE];

	long str1B[N];
	md_calc_strides(N, str1B, dims1B, sizeof(complex float));

	e = N;
	for (int i = 0; i < N; i++) {

		str1[i] = str1B[i];

		if (MD_IS_SET(flags, i))
			str1[e++] = str1B[i] * blk[i];
	}
	assert(NE == e);



	long str2[NE];
	md_calc_strides(NE, str2, tdims, sizeof(complex float));


	long ostr[NE];
	long mstr[NE];
	long mstrB[2 * N];

	md_calc_strides(NE, ostr, nodims, sizeof(complex float));
	md_calc_strides(2 * N, mstrB, nmdims, sizeof(complex float));

	e = N;
	for (int i = 0; i < N; i++) {

		mstr[i] = mstrB[2 * i + 0];

		if (MD_IS_SET(flags, i))
			mstr[e++] = mstrB[2 * i + 1];
	}
	assert(NE == e);
	
	// we can loop here
	assert(NE == N + 3);
	assert(1 == ndims2[N + 0]);
	assert(1 == ndims2[N + 1]);
	assert(1 == ndims2[N + 2]);
	assert(tdims[N + 0] == nodims[N + 0]);
	assert(tdims[N + 1] == nodims[N + 1]);
	assert(tdims[N + 2] == nodims[N + 2]);

	long R = md_calc_size(N, nodims);
	long T = md_calc_size(N, tdims);


	//complex float* src1C = xmalloc(S * sizeof(complex float));
	complex float* src1C = dst;

	md_clear(N, dims1B, src1C, CFL_SIZE);	// must be done here

	#pragma omp parallel for collapse(3)
	for (int k = 0; k < nodims[N + 2]; k++) {
	for (int j = 0; j < nodims[N + 1]; j++) {
	for (int i = 0; i < nodims[N + 0]; i++) {

		    complex float* tmp = (complex float*)ops->allocate(2 * T);
		    complex float* tmpX = (complex float*)ops->allocate(2 * R);

		    long off1 = str1[N + 0] * i + str1[N + 1] * j + str1[N + 2] * k;
		    long off2 = mstr[N + 0] * i + mstr[N + 1] * j + mstr[N + 2] * k;
		    long off3 = ostr[N + 0] * i + ostr[N + 1] * j + ostr[N + 2] * k;

		    md_zmul2(N, nodims, ostr, tmpX, ostr, ((const void*)src1) + off3, mstr, ((const void*)msk) + off2);
		    convH(N, flags, CONV_VALID, CONV_SYMMETRIC, tdims, tmp, nodims, tmpX, ndims2, src2);

		    #pragma omp critical
		    md_zadd2(N, tdims, str1, ((void*)src1C) + off1, str1, ((void*)src1C) + off1, str2,  tmp);

		    ops->del((void*)tmpX);
		    ops->del((void*)tmp);
	}}}
}
Exemple #17
0
int main_nlinv(int argc, char* argv[])
{
    int iter = 8;
    float l1 = -1.;
    bool waterfat = false;
    bool rvc = false;
    bool normalize = true;
    float restrict_fov = -1.;
    float csh[3] = { 0., 0., 0. };
    bool usegpu = false;
    const char* psf = NULL;

    const struct opt_s opts[] = {

        { 'l', true, opt_float, &l1, NULL },
        { 'i', true, opt_int, &iter, NULL },
        { 'c', false, opt_set, &rvc, NULL },
        { 'N', false, opt_clear, &normalize, NULL },
        { 'f', true, opt_float, &restrict_fov, NULL },
        { 'p', true, opt_string, &psf, NULL },
        { 'g', false, opt_set, &usegpu, NULL },
    };

    cmdline(&argc, argv, 2, 3, usage_str, help_str, ARRAY_SIZE(opts), opts);

    num_init();

    assert(iter > 0);


    long ksp_dims[DIMS];
    complex float* kspace_data = load_cfl(argv[1], DIMS, ksp_dims);

    long dims[DIMS];
    md_copy_dims(DIMS, dims, ksp_dims);

    if (waterfat)
        dims[CSHIFT_DIM] = 2;

    long img_dims[DIMS];
    md_select_dims(DIMS, FFT_FLAGS|CSHIFT_FLAG, img_dims, dims);

    long img_strs[DIMS];
    md_calc_strides(DIMS, img_strs, img_dims, CFL_SIZE);


    complex float* image = create_cfl(argv[2], DIMS, img_dims);

    long msk_dims[DIMS];
    md_select_dims(DIMS, FFT_FLAGS, msk_dims, dims);

    long msk_strs[DIMS];
    md_calc_strides(DIMS, msk_strs, msk_dims, CFL_SIZE);

    complex float* mask;
    complex float* norm = md_alloc(DIMS, msk_dims, CFL_SIZE);
    complex float* sens;

    if (4 == argc) {

        sens = create_cfl(argv[3], DIMS, ksp_dims);

    } else {

        sens = md_alloc(DIMS, ksp_dims, CFL_SIZE);
    }


    complex float* pattern = NULL;
    long pat_dims[DIMS];

    if (NULL != psf) {

        pattern = load_cfl(psf, DIMS, pat_dims);

        // FIXME: check compatibility
    } else {

        pattern = md_alloc(DIMS, img_dims, CFL_SIZE);
        estimate_pattern(DIMS, ksp_dims, COIL_DIM, pattern, kspace_data);
    }


    if (waterfat) {

        size_t size = md_calc_size(DIMS, msk_dims);
        md_copy(DIMS, msk_dims, pattern + size, pattern, CFL_SIZE);

        long shift_dims[DIMS];
        md_select_dims(DIMS, FFT_FLAGS, shift_dims, msk_dims);

        long shift_strs[DIMS];
        md_calc_strides(DIMS, shift_strs, shift_dims, CFL_SIZE);

        complex float* shift = md_alloc(DIMS, shift_dims, CFL_SIZE);

        unsigned int X = shift_dims[READ_DIM];
        unsigned int Y = shift_dims[PHS1_DIM];
        unsigned int Z = shift_dims[PHS2_DIM];

        for (unsigned int x = 0; x < X; x++)
            for (unsigned int y = 0; y < Y; y++)
                for (unsigned int z = 0; z < Z; z++)
                    shift[(z * Z + y) * Y + x] = cexp(2.i * M_PI * ((csh[0] * x) / X + (csh[1] * y) / Y + (csh[2] * z) / Z));

        md_zmul2(DIMS, msk_dims, msk_strs, pattern + size, msk_strs, pattern + size, shift_strs, shift);
        md_free(shift);
    }

#if 0
    float scaling = 1. / estimate_scaling(ksp_dims, NULL, kspace_data);
#else
    float scaling = 100. / md_znorm(DIMS, ksp_dims, kspace_data);
#endif
    debug_printf(DP_INFO, "Scaling: %f\n", scaling);
    md_zsmul(DIMS, ksp_dims, kspace_data, kspace_data, scaling);

    if (-1. == restrict_fov) {

        mask = md_alloc(DIMS, msk_dims, CFL_SIZE);
        md_zfill(DIMS, msk_dims, mask, 1.);

    } else {

        float restrict_dims[DIMS] = { [0 ... DIMS - 1] = 1. };
        restrict_dims[0] = restrict_fov;
        restrict_dims[1] = restrict_fov;
        restrict_dims[2] = restrict_fov;
        mask = compute_mask(DIMS, msk_dims, restrict_dims);
    }

#ifdef  USE_CUDA
    if (usegpu) {

        complex float* kspace_gpu = md_alloc_gpu(DIMS, ksp_dims, CFL_SIZE);
        md_copy(DIMS, ksp_dims, kspace_gpu, kspace_data, CFL_SIZE);
        noir_recon(dims, iter, l1, image, NULL, pattern, mask, kspace_gpu, rvc, usegpu);
        md_free(kspace_gpu);

        md_zfill(DIMS, ksp_dims, sens, 1.);

    } else
#endif
        noir_recon(dims, iter, l1, image, sens, pattern, mask, kspace_data, rvc, usegpu);

    if (normalize) {

        md_zrss(DIMS, ksp_dims, COIL_FLAG, norm, sens);
        md_zmul2(DIMS, img_dims, img_strs, image, img_strs, image, msk_strs, norm);
    }

    if (4 == argc) {

        long strs[DIMS];

        md_calc_strides(DIMS, strs, ksp_dims, CFL_SIZE);

        if (norm)
            md_zdiv2(DIMS, ksp_dims, strs, sens, strs, sens, img_strs, norm);

        fftmod(DIMS, ksp_dims, FFT_FLAGS, sens, sens);

        unmap_cfl(DIMS, ksp_dims, sens);

    } else {

        md_free(sens);
    }

    md_free(norm);
    md_free(mask);

    if (NULL != psf)
        unmap_cfl(DIMS, pat_dims, pattern);
    else
        md_free(pattern);


    unmap_cfl(DIMS, img_dims, image);
    unmap_cfl(DIMS, ksp_dims, kspace_data);
    exit(0);
}
Exemple #18
0
void noir_forw_coils(struct noir_data* data, complex float* dst, const complex float* src)
{
	md_zmul2(DIMS, data->coil_dims, data->coil_strs, dst, data->coil_strs, src, data->wght_strs, data->weights);
	ifft(DIMS, data->coil_dims, FFT_FLAGS, dst, dst);
//	fftmod(DIMS, data->coil_dims, 7, dst);
}