Example #1
0
void circshift(struct wavelet_plan_s* plan, data_t* data) 
{
	bool shift = false;
	int N = plan->numdims_tr;

	for (int i = 0; i < N; i++)
		shift |= (0 != plan->randShift_tr[i]);

	if (shift) {

		void* data_copy = md_alloc_sameplace(N, plan->imSize_tr, sizeof(data_t), data);
		md_copy(N, plan->imSize_tr, data_copy, data, sizeof(data_t));
		md_circ_shift(N, plan->imSize_tr, plan->randShift_tr, data, data_copy, sizeof(data_t));
		md_free(data_copy);

	}
}
Example #2
0
File: lrthresh.c Project: hcmh/bart
/*
 * Low rank threhsolding for arbitrary block sizes
 */
static void lrthresh_apply(const operator_data_t* _data, float mu, complex float* dst, const complex float* src)
{
	struct lrthresh_data_s* data = CAST_DOWN(lrthresh_data_s, _data);

	float lambda = mu * data->lambda;

	long strs1[DIMS];
	md_calc_strides(DIMS, strs1, data->dims_decom, 1);

//#pragma omp parallel for
	for (int l = 0; l < data->levels; l++) {

		complex float* dstl = dst + l * strs1[LEVEL_DIM];
		const complex float* srcl = src + l * strs1[LEVEL_DIM];

		long blkdims[DIMS];
		long shifts[DIMS];
		long unshifts[DIMS];
		long zpad_dims[DIMS];
		long M = 1;

		for (unsigned int i = 0; i < DIMS; i++) {

			blkdims[i] = data->blkdims[l][i];
			zpad_dims[i] = (data->dims[i] + blkdims[i] - 1) / blkdims[i];
			zpad_dims[i] *= blkdims[i];

			if (MD_IS_SET(data->mflags, i))
				M *= blkdims[i];

			if (data->randshift)
				shifts[i] = rand_lim(MIN(blkdims[i] - 1, zpad_dims[i] - blkdims[i]));
			else
				shifts[i] = 0;

			unshifts[i] = -shifts[i];
		}

		long zpad_strs[DIMS];
		md_calc_strides(DIMS, zpad_strs, zpad_dims, CFL_SIZE);

		long blk_size = md_calc_size(DIMS, blkdims);
		long img_size = md_calc_size(DIMS, zpad_dims);
		long N = blk_size / M;
		long B = img_size / blk_size;

		if (data->noise && (l == data->levels - 1)) {

			M = img_size;
			N = 1;
			B = 1;
		}


		complex float* tmp = md_alloc_sameplace(DIMS, zpad_dims, CFL_SIZE, dst);

		md_circ_ext(DIMS, zpad_dims, tmp, data->dims, srcl, CFL_SIZE);

		md_circ_shift(DIMS, zpad_dims, shifts, tmp, tmp, CFL_SIZE);


		long mat_dims[2];
		basorati_dims(DIMS, mat_dims, blkdims, zpad_dims);

		complex float* tmp_mat = md_alloc_sameplace(2, mat_dims, CFL_SIZE, dst);

		// Reshape image into a blk_size x number of blocks matrix

		basorati_matrix(DIMS, blkdims, mat_dims, tmp_mat, zpad_dims, zpad_strs, tmp);

		batch_svthresh(M, N, mat_dims[1], lambda * GWIDTH(M, N, B), *(complex float (*)[mat_dims[1]][M][N])tmp_mat);

		//	for ( int b = 0; b < mat_dims[1]; b++ )
		//	svthresh(M, N, lambda * GWIDTH(M, N, B), tmp_mat, tmp_mat);

		basorati_matrixH(DIMS, blkdims, zpad_dims, zpad_strs, tmp, mat_dims, tmp_mat);

		md_circ_shift(DIMS, zpad_dims, unshifts, tmp, tmp, CFL_SIZE);

		md_resize(DIMS, data->dims, dstl, zpad_dims, tmp, CFL_SIZE);

		md_free(tmp);
		md_free(tmp_mat);
	}
}
Example #3
0
void walsh(const long bsize[3], const long dims[DIMS], complex float* sens, const long caldims[DIMS], const complex float* data)
{
	assert(1 == caldims[MAPS_DIM]);
	assert(1 == dims[MAPS_DIM]);

	int channels = caldims[COIL_DIM];
	int cosize = channels * (channels + 1) / 2;
	assert(dims[COIL_DIM] == cosize);

	long dims1[DIMS];
	md_copy_dims(DIMS, dims1, dims);
	dims1[COIL_DIM] = channels;
	
	long kdims[4];
	kdims[0] = MIN(bsize[0], dims[0]);
	kdims[1] = MIN(bsize[1], dims[1]);
	kdims[2] = MIN(bsize[2], dims[2]);

	md_resize_center(DIMS, dims1, sens, caldims, data, CFL_SIZE);
	ifftc(DIMS, dims1, FFT_FLAGS, sens, sens);

	long odims[DIMS];
	md_copy_dims(DIMS, odims, dims1);

	for (int i = 0; i < 3; i++)
		odims[i] = dims[i] + kdims[i] - 1;

	complex float* tmp = md_alloc(DIMS, odims, CFL_SIZE);
#if 0
	md_resizec(DIMS, odims, tmp, dims1, sens, CFL_SIZE);
#else
	long cen[DIMS] = { 0 };

	for (int i = 0; i < 3; i++)
		cen[i] = (odims[i] - dims[i] + 1) / 2;

	complex float* tmp1 = md_alloc(DIMS, odims, CFL_SIZE);
	md_circ_ext(DIMS, odims, tmp1, dims1, sens, CFL_SIZE);
//	md_resize(DIMS, odims, tmp1, dims1, sens, CFL_SIZE);
	md_circ_shift(DIMS, odims, cen, tmp, tmp1, CFL_SIZE);
	md_free(tmp1);
#endif

	long calmat_dims[2];
	complex float* cm = calibration_matrix(calmat_dims, kdims, odims, tmp);
	md_free(tmp);

	int xh = dims[0];
	int yh = dims[1];
	int zh = dims[2];

	int pixels = calmat_dims[1] / channels;

#pragma omp parallel for
	for (int k = 0; k < zh; k++) {

		complex float in[channels][pixels];
		complex float cov[cosize];

		for (int j = 0; j < yh; j++) {
			for (int i = 0; i < xh; i++) {

				for (int c = 0; c < channels; c++)
					for (int p = 0; p < pixels; p++)
						in[c][p] = cm[((((c * pixels + p) * zh) + k) * yh + j) * xh + i];

				gram_matrix2(channels, cov, pixels, in);

				for (int l = 0; l < cosize; l++)
					sens[(((l * zh) + k) * yh + j) * xh + i] = cov[l];
			}
		}
	}
}
Example #4
0
/*
 * Low rank threhsolding for arbitrary block sizes
 */
static void lrthresh_apply(const void* _data, float mu, complex float* dst, const complex float* src)
{
	struct lrthresh_data_s* data = (struct lrthresh_data_s*)_data;

	float lambda = mu * data->lambda;

	long strs1[DIMS];
	md_calc_strides(DIMS, strs1, data->dims_decom, 1);

//#pragma omp parallel for
	for (int l = 0; l < data->levels; l++) {

		complex float* dstl = dst + l * strs1[LEVEL_DIM];
		const complex float* srcl = src + l * strs1[LEVEL_DIM];

		// Initialize
		long blkdims[DIMS];
		long shifts[DIMS];
		long unshifts[DIMS];
		long zpad_dims[DIMS];
		long M = 1;

		for (unsigned int i = 0; i < DIMS; i++) {

			blkdims[i] = data->blkdims[l][i];
			zpad_dims[i] = (data->dims[i] + blkdims[i] - 1) / blkdims[i];
			zpad_dims[i] *= blkdims[i];

			if (MD_IS_SET(data->mflags, i))
				M *= blkdims[i];

			if (data->randshift)
				shifts[i] = rand_lim(MIN(blkdims[i] - 1, zpad_dims[i] - blkdims[i]));
			else
				shifts[i] = 0;

			unshifts[i] = -shifts[i];
		}

		long zpad_strs[DIMS];
		md_calc_strides(DIMS, zpad_strs, zpad_dims, CFL_SIZE);

		long blk_size = md_calc_size( DIMS, blkdims );
		long img_size = md_calc_size( DIMS, zpad_dims );
		long N = blk_size / M;
		long B = img_size / blk_size;

		if (data->noise && (l == data->levels - 1)) {

			M = img_size;
			N = 1;
			B = 1;
		}

		
		// Initialize tmp
		complex float* tmp_ext;
#ifdef USE_CUDA
		tmp_ext = (data->use_gpu ? md_alloc_gpu : md_alloc)(DIMS, zpad_dims, CFL_SIZE);
#else
		tmp_ext = md_alloc(DIMS, zpad_dims, CFL_SIZE);
#endif

		complex float* tmp;
#ifdef USE_CUDA
		tmp = (data->use_gpu ? md_alloc_gpu : md_alloc)(DIMS, zpad_dims, CFL_SIZE);
#else
		tmp = md_alloc(DIMS, zpad_dims, CFL_SIZE);
#endif
		// Copy to tmp
		md_circ_ext(DIMS, zpad_dims, tmp_ext, data->dims, srcl, CFL_SIZE);

		if (data->randshift)
			md_circ_shift(DIMS, zpad_dims, shifts, tmp, tmp_ext, CFL_SIZE);

		// Initialize tmp_mat
		long mat_dims[2];
		basorati_dims(DIMS, mat_dims, blkdims, zpad_dims);

		complex float* tmp_mat;
#ifdef USE_CUDA
		tmp_mat = (data->use_gpu ? md_alloc_gpu : md_alloc)(2, mat_dims, CFL_SIZE);
#else
		tmp_mat = md_alloc(2, mat_dims, CFL_SIZE);
#endif
		// Reshape image into a blk_size x number of blocks matrix

		basorati_matrix(DIMS, blkdims, mat_dims, tmp_mat, zpad_dims, zpad_strs, tmp);

		batch_svthresh(M, N, mat_dims[1], lambda * GWIDTH(M, N, B), tmp_mat, tmp_mat);

		//	for ( int b = 0; b < mat_dims[1]; b++ )
		//	svthresh(M, N, lambda * GWIDTH(M, N, B), tmp_mat, tmp_mat);

		basorati_matrixH(DIMS, blkdims, zpad_dims, zpad_strs, tmp, mat_dims, tmp_mat);


		// Copy to tmp

		if (data->randshift)
			md_circ_shift(DIMS, zpad_dims, unshifts, tmp_ext, tmp, CFL_SIZE);

		md_resize(DIMS, data->dims, dstl, zpad_dims, tmp_ext, CFL_SIZE);

		// Free data
		md_free(tmp);
		md_free(tmp_ext);
		md_free(tmp_mat);
	}
}