Beispiel #1
0
// [AC-Adaptive]
static void radial_self_delays(unsigned int N, float shifts[N], const float phi[N], const long dims[DIMS], const complex float* in)
{
	unsigned int d = 2;
	unsigned int flags = (1 << d);

	assert(N == dims[d]);

	long dims1[DIMS];
	md_select_dims(DIMS, ~flags, dims1, dims);

	complex float* tmp1 = md_alloc(DIMS, dims1, CFL_SIZE);
	complex float* tmp2 = md_alloc(DIMS, dims1, CFL_SIZE);

	long pos[DIMS] = { 0 };

	for (unsigned int i = 0; i < dims[d]; i++) {

		pos[d] = i;
		md_copy_block(DIMS, pos, dims1, tmp1, dims, in, CFL_SIZE);

		// find opposing spoke

		float mdelta = 0.;
		int mindex = 0;

		for (unsigned int j = 0; j < dims[d]; j++) {

			float delta = cabsf(cexpf(1.i * phi[j]) - cexpf(1.i * phi[i]));

			if (mdelta <= delta) {

				mdelta = delta;
				mindex = j;
			}
		}

		pos[d] = mindex;
		md_copy_block(DIMS, pos, dims1, tmp2, dims, in, CFL_SIZE);


		unsigned int d2 = 1;
		float rshifts[DIMS];
		md_flip(DIMS, dims1, MD_BIT(d2), tmp2, tmp2, CFL_SIZE); // could be done by iFFT in est_subpixel_shift
		est_subpixel_shift(DIMS, rshifts, dims1, MD_BIT(d2), tmp2, tmp1);

		float mshift = rshifts[d2] / 2.; // mdelta

		shifts[i] = mshift;
	}

	md_free(tmp1);
	md_free(tmp2);
}
Beispiel #2
0
Datei: slice.c Projekt: hcmh/bart
int main_slice(int argc, char* argv[])
{
	mini_cmdline(&argc, argv, 4, usage_str, help_str);

	num_init();

	long in_dims[DIMS];
	long out_dims[DIMS];
	
	complex float* in_data = load_cfl(argv[3], DIMS, in_dims);

	int dim = atoi(argv[1]);
	int pos = atoi(argv[2]);

	assert(dim < DIMS);
	assert(pos >= 0);
	assert(pos < in_dims[dim]);

	for (int i = 0; i < DIMS; i++)
		out_dims[i] = in_dims[i];

	out_dims[dim] = 1;

	complex float* out_data = create_cfl(argv[4], DIMS, out_dims);

	long pos2[DIMS] = { [0 ... DIMS - 1] = 0 };
	pos2[dim] = pos;
	md_slice(DIMS, MD_BIT(dim), pos2, in_dims, out_data, in_data, CFL_SIZE);

	unmap_cfl(DIMS, out_dims, out_data);
	unmap_cfl(DIMS, in_dims, in_data);
	return 0;
}
Beispiel #3
0
/*
 * Adjoint of finite difference operator along specified dimensions.
 * Equivalent to finite difference in reverse order
 * 
 * @param snip if false: keeps the original value for the last entry;
 * if true: implements the adjoint of the difference matrix with all zero first row
 *
 * optr = [-diff(iptr); iptr(end)] = flip(fdiff_apply(flip(iptr)))
 */
static void fdiff_apply_adjoint(const linop_data_t* _data, complex float* optr, const complex float* iptr)
{
	const auto data = CAST_DOWN(fdiff_s, _data);

	md_copy2(data->D, data->dims, data->str, optr, data->str, iptr, CFL_SIZE);

	for (unsigned int i=0; i < data->D; i++) {

		unsigned int single_flag = data->flags & MD_BIT(i);

		if (single_flag) {

			complex float* tmp = md_alloc_sameplace(data->D, data->dims, CFL_SIZE, optr);
			complex float* tmp2 = md_alloc_sameplace(data->D, data->dims, CFL_SIZE, optr);
			md_flip2(data->D, data->dims, single_flag, data->str, tmp2, data->str, optr, CFL_SIZE);
			md_zfinitediff_core2(data->D, data->dims, single_flag, false, tmp, data->str, tmp2, data->str, tmp2);
			md_flip2(data->D, data->dims, single_flag, data->str, optr, data->str, tmp2, CFL_SIZE);

			md_free(tmp2);
			md_free(tmp);

			if (data->snip) {

				long zdims[data->D];
				md_select_dims(data->D, ~0, zdims, data->dims);

				zdims[i] = 1;
				md_zsub2(data->D, zdims, data->str, optr, data->str, optr, data->str, iptr);
			}
		}
	}
}
Beispiel #4
0
int main_estdelay(int argc, char* argv[])
{
	bool ring = false;
	int pad_factor = 100;
	unsigned int no_intersec_sp = 1;
	float size = 1.5;

	const struct opt_s opts[] = {

		OPT_SET('R', &ring, "RING method"),
		OPT_INT('p', &pad_factor, "p", "[RING] Padding"),
		OPT_UINT('n', &no_intersec_sp, "n", "[RING] Number of intersecting spokes"),
		OPT_FLOAT('r', &size, "r", "[RING] Central region size"),
	};

	cmdline(&argc, argv, 2, 2, usage_str, help_str, ARRAY_SIZE(opts), opts);

	num_init();

	if (pad_factor % 2 != 0)
		error("Pad_factor -p should be even\n");


	long tdims[DIMS];
	const complex float* traj = load_cfl(argv[1], DIMS, tdims);

	long tdims1[DIMS];
	md_select_dims(DIMS, ~MD_BIT(1), tdims1, tdims);

	complex float* traj1 = md_alloc(DIMS, tdims1, CFL_SIZE);
	md_slice(DIMS, MD_BIT(1), (long[DIMS]){ 0 }, tdims, traj1, traj, CFL_SIZE);
Beispiel #5
0
int main_estdelay(int argc, char* argv[])
{
	mini_cmdline(&argc, argv, 2, usage_str, help_str);

	long tdims[DIMS];
	const complex float* traj = load_cfl(argv[1], DIMS, tdims);

	long tdims1[DIMS];
	md_select_dims(DIMS, ~MD_BIT(1), tdims1, tdims);

	complex float* traj1 = md_alloc(DIMS, tdims1, CFL_SIZE);
	md_slice(DIMS, MD_BIT(1), (long[DIMS]){ 0 }, tdims, traj1, traj, CFL_SIZE);
Beispiel #6
0
void estimate_pattern(unsigned int D, const long dims[D], unsigned int dim, complex float* pattern, const complex float* kspace_data)
{
	md_zrss(D, dims, MD_BIT(dim), pattern, kspace_data);

	long dims2[D];
	long strs2[D];
	assert(dim < D);
	md_select_dims(D, ~MD_BIT(dim), dims2, dims);
	md_calc_strides(D, strs2, dims2, CFL_SIZE);

	long strs1[D];
	md_singleton_strides(D, strs1);

	md_zcmp2(D, dims2, strs2, pattern, strs2, pattern, strs1, &(complex float){ 0. });
Beispiel #7
0
struct prox_4pt_dfwavelet_data* prepare_prox_4pt_dfwavelet_data(const long im_dims[DIMS], const long min_size[3], const complex float res[3], unsigned int flow_dim, float lambda, bool use_gpu)
{
	PTR_ALLOC(struct prox_4pt_dfwavelet_data, data);

        md_copy_dims(DIMS, data->im_dims, im_dims);
        md_select_dims(DIMS, FFT_FLAGS, data->tim_dims, im_dims);
        md_calc_strides(DIMS, data->im_strs, im_dims, CFL_SIZE);

        assert(4 == im_dims[flow_dim]);

        // initialize temp
        
#ifdef USE_CUDA
        if (use_gpu) {

                data->vx = md_alloc_gpu(DIMS, data->tim_dims, CFL_SIZE);
                data->vy = md_alloc_gpu(DIMS, data->tim_dims, CFL_SIZE);
                data->vz = md_alloc_gpu(DIMS, data->tim_dims, CFL_SIZE);
                data->ph0 = md_alloc_gpu(DIMS, data->tim_dims, CFL_SIZE);
                data->pc0 = md_alloc_gpu(DIMS, data->tim_dims, CFL_SIZE);
                data->pc1 = md_alloc_gpu(DIMS, data->tim_dims, CFL_SIZE);
                data->pc2 = md_alloc_gpu(DIMS, data->tim_dims, CFL_SIZE);
                data->pc3 = md_alloc_gpu(DIMS, data->tim_dims, CFL_SIZE);

        } else
 #endif
        {
                data->vx = md_alloc(DIMS, data->tim_dims, CFL_SIZE);
                data->vy = md_alloc(DIMS, data->tim_dims, CFL_SIZE);
                data->vz = md_alloc(DIMS, data->tim_dims, CFL_SIZE);
                data->ph0 = md_alloc(DIMS, data->tim_dims, CFL_SIZE);
                data->pc0 = md_alloc(DIMS, data->tim_dims, CFL_SIZE);
                data->pc1 = md_alloc(DIMS, data->tim_dims, CFL_SIZE);
                data->pc2 = md_alloc(DIMS, data->tim_dims, CFL_SIZE);
                data->pc3 = md_alloc(DIMS, data->tim_dims, CFL_SIZE);
        }

        data->flow_dim = flow_dim;
        data->slice_flag = ~FFT_FLAGS;
        data->lambda = lambda;

        data->plan = prepare_dfwavelet_plan( 3, data->tim_dims, (long*) min_size, (complex float*) res, use_gpu );
        
	data->w_op = wavelet_create(DIMS, data->tim_dims, FFT_FLAGS, min_size, true, use_gpu);
        data->wthresh_op = prox_unithresh_create(DIMS, data->w_op, lambda, MD_BIT(data->flow_dim), use_gpu);

        return data;
}
Beispiel #8
0
int main_homodyne(int argc, char* argv[])
{
	bool clear = false;
	bool image = false;
	const char* phase_ref = NULL;

	float alpha = 0.;

	num_init();

	const struct opt_s opts[] = {

		{ 'r', true, opt_float, &alpha, " <alpha>\tOffset of ramp filter, between 0 and 1. alpha=0 is a full ramp, alpha=1 is a horizontal line" },
		{ 'I', false, opt_set, &image, "\tInput is in image domain" },
		{ 'C', false, opt_set, &clear, "\tClear unacquired portion of kspace" },
		{ 'P', true, opt_string, &phase_ref, " <phase_ref>\tUse <phase_ref> as phase reference" },
	};

	cmdline(&argc, argv, 4, 4, usage_str, help_str, ARRAY_SIZE(opts), opts);


	const int N = DIMS;
	long dims[N];
	complex float* idata = load_cfl(argv[3], N, dims);
	complex float* data = create_cfl(argv[4], N, dims);

	int pfdim = atoi(argv[1]);
	float frac = atof(argv[2]);

	assert((0 <= pfdim) && (pfdim < N));
	assert(frac > 0.);

	if (image) {
		complex float* ksp_in = md_alloc(N, dims, CFL_SIZE);
		fftuc(N, dims, FFT_FLAGS, ksp_in, idata);
		md_copy(N, dims, idata, ksp_in, CFL_SIZE);
		md_free(ksp_in);
	}


	long strs[N];
	md_calc_strides(N, strs, dims, CFL_SIZE);

	struct wdata wdata;
	wdata.frac = frac;
	wdata.pfdim = pfdim;
	md_select_dims(N, MD_BIT(pfdim), wdata.wdims, dims);
	md_calc_strides(N, wdata.wstrs, wdata.wdims, CFL_SIZE);
	wdata.weights = md_alloc(N, wdata.wdims, CFL_SIZE);
	wdata.alpha = alpha;
	wdata.clear = clear;

	md_loop(N, wdata.wdims, &wdata, comp_weights);

	long pstrs[N];
	long pdims[N];
	complex float* phase = NULL;

	if (NULL == phase_ref) {

		phase = estimate_phase(wdata, FFT_FLAGS, N, dims, idata);
		md_copy_dims(N, pdims, dims);
	}
	else
		phase = load_cfl(phase_ref, N, pdims);

	md_calc_strides(N, pstrs, pdims, CFL_SIZE);

	homodyne(wdata, FFT_FLAGS, N, dims, strs, data, idata, pstrs, phase);

	md_free(wdata.weights);

	if (NULL == phase_ref)
		md_free(phase);
	else {
		unmap_cfl(N, pdims, phase);
		free((void*)phase_ref);
	}

	unmap_cfl(N, dims, idata);
	unmap_cfl(N, dims, data);

	exit(0);
}
Beispiel #9
0
int main_pics(int argc, char* argv[])
{
	// Initialize default parameters

	struct sense_conf conf = sense_defaults;



	bool use_gpu = false;

	bool randshift = true;
	unsigned int maxiter = 30;
	float step = -1.;

	// Start time count

	double start_time = timestamp();

	// Read input options
	struct nufft_conf_s nuconf = nufft_conf_defaults;
	nuconf.toeplitz = false;

	float restrict_fov = -1.;
	const char* pat_file = NULL;
	const char* traj_file = NULL;
	bool scale_im = false;
	bool eigen = false;
	float scaling = 0.;

	unsigned int llr_blk = 8;

	const char* image_truth_file = NULL;
	bool im_truth = false;

	const char* image_start_file = NULL;
	bool warm_start = false;

	bool hogwild = false;
	bool fast = false;
	float admm_rho = iter_admm_defaults.rho;
	unsigned int admm_maxitercg = iter_admm_defaults.maxitercg;

	struct opt_reg_s ropts;
	ropts.r = 0;
	ropts.algo = CG;
	ropts.lambda = -1.;


	const struct opt_s opts[] = {

		{ 'l', true, opt_reg, &ropts, "1/-l2\t\ttoggle l1-wavelet or l2 regularization." },
		OPT_FLOAT('r', &ropts.lambda, "lambda", "regularization parameter"),
		{ 'R', true, opt_reg, &ropts, " <T>:A:B:C\tgeneralized regularization options (-Rh for help)" },
		OPT_SET('c', &conf.rvc, "real-value constraint"),
		OPT_FLOAT('s', &step, "step", "iteration stepsize"),
		OPT_UINT('i', &maxiter, "iter", "max. number of iterations"),
		OPT_STRING('t', &traj_file, "file", "k-space trajectory"),
		OPT_CLEAR('n', &randshift, "disable random wavelet cycle spinning"),
		OPT_SET('g', &use_gpu, "use GPU"),
		OPT_STRING('p', &pat_file, "file", "pattern or weights"),
		OPT_SELECT('I', enum algo_t, &ropts.algo, IST, "(select IST)"),
		OPT_UINT('b', &llr_blk, "blk", "Lowrank block size"),
		OPT_SET('e', &eigen, "Scale stepsize based on max. eigenvalue"),
		OPT_SET('H', &hogwild, "(hogwild)"),
		OPT_SET('F', &fast, "(fast)"),
		OPT_STRING('T', &image_truth_file, "file", "(truth file)"),
		OPT_STRING('W', &image_start_file, "<img>", "Warm start with <img>"),
		OPT_INT('d', &debug_level, "level", "Debug level"),
		OPT_INT('O', &conf.rwiter, "rwiter", "(reweighting)"),
		OPT_FLOAT('o', &conf.gamma, "gamma", "(reweighting)"),
		OPT_FLOAT('u', &admm_rho, "rho", "ADMM rho"),
		OPT_UINT('C', &admm_maxitercg, "iter", "ADMM max. CG iterations"),
		OPT_FLOAT('q', &conf.cclambda, "cclambda", "(cclambda)"),
		OPT_FLOAT('f', &restrict_fov, "rfov", "restrict FOV"),
		OPT_SELECT('m', enum algo_t, &ropts.algo, ADMM, "Select ADMM"),
		OPT_FLOAT('w', &scaling, "val", "scaling"),
		OPT_SET('S', &scale_im, "Re-scale the image after reconstruction"),
	};

	cmdline(&argc, argv, 3, 3, usage_str, help_str, ARRAY_SIZE(opts), opts);

	if (NULL != image_truth_file)
		im_truth = true;

	if (NULL != image_start_file)
		warm_start = true;


	long max_dims[DIMS];
	long map_dims[DIMS];
	long pat_dims[DIMS];
	long img_dims[DIMS];
	long coilim_dims[DIMS];
	long ksp_dims[DIMS];
	long traj_dims[DIMS];



	// load kspace and maps and get dimensions

	complex float* kspace = load_cfl(argv[1], DIMS, ksp_dims);
	complex float* maps = load_cfl(argv[2], DIMS, map_dims);


	complex float* traj = NULL;

	if (NULL != traj_file)
		traj = load_cfl(traj_file, DIMS, traj_dims);


	md_copy_dims(DIMS, max_dims, ksp_dims);
	md_copy_dims(5, max_dims, map_dims);

	md_select_dims(DIMS, ~COIL_FLAG, img_dims, max_dims);
	md_select_dims(DIMS, ~MAPS_FLAG, coilim_dims, max_dims);

	if (!md_check_compat(DIMS, ~(MD_BIT(MAPS_DIM)|FFT_FLAGS), img_dims, map_dims))
		error("Dimensions of image and sensitivities do not match!\n");

	assert(1 == ksp_dims[MAPS_DIM]);


	(use_gpu ? num_init_gpu : num_init)();

	// print options

	if (use_gpu)
		debug_printf(DP_INFO, "GPU reconstruction\n");

	if (map_dims[MAPS_DIM] > 1) 
		debug_printf(DP_INFO, "%ld maps.\nESPIRiT reconstruction.\n", map_dims[MAPS_DIM]);

	if (hogwild)
		debug_printf(DP_INFO, "Hogwild stepsize\n");

	if (im_truth)
		debug_printf(DP_INFO, "Compare to truth\n");



	// initialize sampling pattern

	complex float* pattern = NULL;

	if (NULL != pat_file) {

		pattern = load_cfl(pat_file, DIMS, pat_dims);

		assert(md_check_compat(DIMS, COIL_FLAG, ksp_dims, pat_dims));

	} else {

		md_select_dims(DIMS, ~COIL_FLAG, pat_dims, ksp_dims);
		pattern = md_alloc(DIMS, pat_dims, CFL_SIZE);
		estimate_pattern(DIMS, ksp_dims, COIL_DIM, pattern, kspace);
	}


	if ((NULL != traj_file) && (NULL == pat_file)) {

		md_free(pattern);
		pattern = NULL;
		nuconf.toeplitz = true;

	} else {

		// print some statistics

		long T = md_calc_size(DIMS, pat_dims);
		long samples = (long)pow(md_znorm(DIMS, pat_dims, pattern), 2.);

		debug_printf(DP_INFO, "Size: %ld Samples: %ld Acc: %.2f\n", T, samples, (float)T / (float)samples);
	}

	if (NULL == traj_file) {

		fftmod(DIMS, ksp_dims, FFT_FLAGS, kspace, kspace);
		fftmod(DIMS, map_dims, FFT_FLAGS, maps, maps);
	}

	// apply fov mask to sensitivities

	if (-1. != restrict_fov) {

		float restrict_dims[DIMS] = { [0 ... DIMS - 1] = 1. };
		restrict_dims[0] = restrict_fov;
		restrict_dims[1] = restrict_fov;
		restrict_dims[2] = restrict_fov;

		apply_mask(DIMS, map_dims, maps, restrict_dims);
	}
Beispiel #10
0
void opt_reg_configure(unsigned int N, const long img_dims[N], struct opt_reg_s* ropts, const struct operator_p_s* prox_ops[NUM_REGS], const struct linop_s* trafos[NUM_REGS], unsigned int llr_blk, bool randshift, bool use_gpu)
{
	float lambda = ropts->lambda;

	if (-1. == lambda)
		lambda = 0.;

	// if no penalities specified but regularization
	// parameter is given, add a l2 penalty

	struct reg_s* regs = ropts->regs;

	if ((0 == ropts->r) && (lambda > 0.)) {

		regs[0].xform = L2IMG;
		regs[0].xflags = 0u;
		regs[0].jflags = 0u;
		regs[0].lambda = lambda;
		ropts->r = 1;
	}



	int nr_penalties = ropts->r;
	long blkdims[MAX_LEV][DIMS];
	int levels;


	for (int nr = 0; nr < nr_penalties; nr++) {

		// fix up regularization parameter
		if (-1. == regs[nr].lambda)
			regs[nr].lambda = lambda;

		switch (regs[nr].xform) {

			case L1WAV:
				debug_printf(DP_INFO, "l1-wavelet regularization: %f\n", regs[nr].lambda);

				if (0 != regs[nr].jflags)
					debug_printf(DP_WARN, "joint l1-wavelet thresholding not currently supported.\n");

				long minsize[DIMS] = { [0 ... DIMS - 1] = 1 };
				minsize[0] = MIN(img_dims[0], 16);
				minsize[1] = MIN(img_dims[1], 16);
				minsize[2] = MIN(img_dims[2], 16);


				unsigned int wflags = 0;
				for (unsigned int i = 0; i < DIMS; i++) {

					if ((1 < img_dims[i]) && MD_IS_SET(regs[nr].xflags, i)) {

						wflags = MD_SET(wflags, i);
						minsize[i] = MIN(img_dims[i], 16);
					}
				}

				trafos[nr] = linop_identity_create(DIMS, img_dims);
				prox_ops[nr] = prox_wavelet3_thresh_create(DIMS, img_dims, wflags, minsize, regs[nr].lambda, randshift);
				break;

			case TV:
				debug_printf(DP_INFO, "TV regularization: %f\n", regs[nr].lambda);

				trafos[nr] = linop_grad_create(DIMS, img_dims, regs[nr].xflags);
				prox_ops[nr] = prox_thresh_create(DIMS + 1,
						linop_codomain(trafos[nr])->dims,
						regs[nr].lambda, regs[nr].jflags | MD_BIT(DIMS), use_gpu);
				break;

			case LLR:
				debug_printf(DP_INFO, "lowrank regularization: %f\n", regs[nr].lambda);

				// add locally lowrank penalty
				levels = llr_blkdims(blkdims, regs[nr].jflags, img_dims, llr_blk);

				assert(1 == levels);
				assert(levels == img_dims[LEVEL_DIM]);

				for(int l = 0; l < levels; l++)
#if 0
					blkdims[l][MAPS_DIM] = img_dims[MAPS_DIM];
#else
				blkdims[l][MAPS_DIM] = 1;
#endif

				int remove_mean = 0;

				trafos[nr] = linop_identity_create(DIMS, img_dims);
				prox_ops[nr] = lrthresh_create(img_dims, randshift, regs[nr].xflags, (const long (*)[DIMS])blkdims, regs[nr].lambda, false, remove_mean, use_gpu);
				break;

			case MLR:
#if 0
				// FIXME: multiscale low rank changes the output image dimensions 
				// and requires the forward linear operator. This should be decoupled...
				debug_printf(DP_INFO, "multi-scale lowrank regularization: %f\n", regs[nr].lambda);

				levels = multilr_blkdims(blkdims, regs[nr].jflags, img_dims, 8, 1);

				img_dims[LEVEL_DIM] = levels;
				max_dims[LEVEL_DIM] = levels;

				for(int l = 0; l < levels; l++)
					blkdims[l][MAPS_DIM] = 1;

				trafos[nr] = linop_identity_create(DIMS, img_dims);
				prox_ops[nr] = lrthresh_create(img_dims, randshift, regs[nr].xflags, (const long (*)[DIMS])blkdims, regs[nr].lambda, false, 0, use_gpu);

				const struct linop_s* decom_op = sum_create( img_dims, use_gpu );
				const struct linop_s* tmp_op = forward_op;
				forward_op = linop_chain(decom_op, forward_op);

				linop_free(decom_op);
				linop_free(tmp_op);
#else
				debug_printf(DP_WARN, "multi-scale lowrank regularization not yet supported: %f\n", regs[nr].lambda);
#endif

				break;

			case IMAGL1:
				debug_printf(DP_INFO, "l1 regularization of imaginary part: %f\n", regs[nr].lambda);

				trafos[nr] = linop_rdiag_create(DIMS, img_dims, 0, &(complex float){ 1.i });
				prox_ops[nr] = prox_thresh_create(DIMS, img_dims, regs[nr].lambda, regs[nr].jflags, use_gpu);
				break;

			case IMAGL2:
				debug_printf(DP_INFO, "l2 regularization of imaginary part: %f\n", regs[nr].lambda);

				trafos[nr] = linop_rdiag_create(DIMS, img_dims, 0, &(complex float){ 1.i });
				prox_ops[nr] = prox_leastsquares_create(DIMS, img_dims, regs[nr].lambda, NULL);
				break;

			case L1IMG:
				debug_printf(DP_INFO, "l1 regularization: %f\n", regs[nr].lambda);

				trafos[nr] = linop_identity_create(DIMS, img_dims);
				prox_ops[nr] = prox_thresh_create(DIMS, img_dims, regs[nr].lambda, regs[nr].jflags, use_gpu);
				break;

			case L2IMG:
				debug_printf(DP_INFO, "l2 regularization: %f\n", regs[nr].lambda);

				trafos[nr] = linop_identity_create(DIMS, img_dims);
				prox_ops[nr] = prox_leastsquares_create(DIMS, img_dims, regs[nr].lambda, NULL);
				break;

			case FTL1:
				debug_printf(DP_INFO, "l1 regularization of Fourier transform: %f\n", regs[nr].lambda);

				trafos[nr] = linop_fft_create(DIMS, img_dims, regs[nr].xflags);
				prox_ops[nr] = prox_thresh_create(DIMS, img_dims, regs[nr].lambda, regs[nr].jflags, use_gpu);
				break;
		}
int main_homodyne(int argc, char* argv[])
{
	bool clear = false;
	const char* phase_ref = NULL;

	int com;
	while (-1 != (com = getopt(argc, argv, "hCP:"))) {

		switch (com) {

		case 'C':
			clear = true;
			break;

		case 'P':
			phase_ref = strdup(optarg);
			break;

		case 'h':
			help(argv[0], stdout);
			exit(0);

		default:
			help(argv[0], stderr);
			exit(1);
		}
	}

	if (argc - optind != 4) {
		usage(argv[0], stderr);
		exit(1);
	}

	const int N = DIMS;
	long dims[N];
	complex float* idata = load_cfl(argv[optind + 2], N, dims);
	complex float* data = create_cfl(argv[optind + 3], N, dims);

	int pfdim = atoi(argv[optind + 0]);
	float frac = atof(argv[optind + 1]);

	assert((0 <= pfdim) && (pfdim < N));
	assert(frac > 0.);


	long strs[N];
	md_calc_strides(N, strs, dims, CFL_SIZE);

	struct wdata wdata;
	wdata.frac = frac;
	wdata.pfdim = pfdim;
	md_select_dims(N, MD_BIT(pfdim), wdata.wdims, dims);
	md_calc_strides(N, wdata.wstrs, wdata.wdims, CFL_SIZE);
	wdata.weights = md_alloc(N, wdata.wdims, CFL_SIZE);

	md_loop(N, wdata.wdims, &wdata, comp_weights);

	long pstrs[N];
	long pdims[N];
	complex float* phase = NULL;

	if (NULL == phase_ref) {

		phase = estimate_phase(wdata, FFT_FLAGS, N, dims, idata);
		md_copy_dims(N, pdims, dims);
	}
	else
		phase = load_cfl(phase_ref, N, pdims);

	md_calc_strides(N, pstrs, pdims, CFL_SIZE);

	complex float* cdata = NULL;
	complex float* idata2 = NULL;

	if (clear) {

		long cdims[N];
		md_select_dims(N, ~MD_BIT(pfdim), cdims, dims);
		cdims[pfdim] = (int)(dims[pfdim] * frac);

		cdata = md_alloc(N, cdims, CFL_SIZE);
		idata2 = anon_cfl(NULL, N, dims);

		md_resize(N, cdims, cdata, dims, idata, CFL_SIZE);
		md_resize(N, dims, idata2, cdims, cdata, CFL_SIZE);

		md_free(cdata);
		unmap_cfl(N, dims, idata);
		idata = idata2;

	}


	if ((1 == dims[PHS2_DIM]) || (PHS2_DIM == pfdim)) {

		homodyne(wdata, FFT_FLAGS, N, dims, strs, data, idata, pstrs, phase);

	} else {

		unsigned int pardim = PHS2_DIM;

		ifftuc(N, dims, MD_CLEAR(FFT_FLAGS, pfdim), data, idata);

		long rdims[N];
		md_select_dims(N, ~MD_BIT(pardim), rdims, dims);
		long rstrs[N];
		md_calc_strides(N, rstrs, rdims, CFL_SIZE);

#pragma 	omp parallel for
		for (unsigned int i = 0; i < dims[pardim]; i++) {

			complex float* tmp = md_alloc(N, rdims, CFL_SIZE);
			long pos[N];
			md_set_dims(N, pos, 0);
			pos[pardim] = i;

			md_copy_block(N, pos, rdims, tmp, dims, data, CFL_SIZE);
			homodyne(wdata, MD_BIT(pfdim), N, rdims, rstrs, tmp, tmp, pstrs, phase);
			md_copy_block(N, pos, dims, data, rdims, tmp, CFL_SIZE);
			md_free(tmp);
		}
	}

	md_free(wdata.weights);
	if (NULL == phase_ref)
		md_free(phase);
	else {
		unmap_cfl(N, pdims, phase);
		free((void*)phase_ref);
	}

	unmap_cfl(N, dims, idata);
	unmap_cfl(N, dims, data);

	exit(0);
}
Beispiel #12
0
static double bench_sumf(long scale)
{
	long dims[DIMS] = { 65536 * scale, 1, 50 * scale, 1, 1, 1, 1, 1 };
	return bench_generic_sum(dims, MD_BIT(2), true);
}
Beispiel #13
0
static double bench_sum2(long scale)
{
	long dims[DIMS] = { 50 * scale, 1, 65536 * scale, 1, 1, 1, 1, 1 };
	return bench_generic_sum(dims, MD_BIT(0), false);
}
Beispiel #14
0
static double bench_add(long scale)
{
	long dims[DIMS] = { 65536 * scale, 1, 50 * scale, 1, 1, 1, 1, 1 };
	return bench_generic_add(dims, MD_BIT(2), false);
}
Beispiel #15
0
int main_homodyne(int argc, char* argv[])
{
	mini_cmdline(argc, argv, 4, usage_str, help_str);

	const int N = DIMS;
	long dims[N];
	complex float* idata = load_cfl(argv[3], N, dims);
	complex float* data = create_cfl(argv[4], N, dims);

	int pfdim = atoi(argv[1]);
	float frac = atof(argv[2]);

	assert((0 <= pfdim) && (pfdim < N));
	assert(frac > 0.);


	long strs[N];
	md_calc_strides(N, strs, dims, CFL_SIZE);

	struct wdata wdata;
	wdata.frac = frac;
	wdata.pfdim = pfdim;
	md_select_dims(N, MD_BIT(pfdim), wdata.wdims, dims);
	md_calc_strides(N, wdata.wstrs, wdata.wdims, CFL_SIZE);
	wdata.weights = md_alloc(N, wdata.wdims, CFL_SIZE);

	md_loop(N, wdata.wdims, &wdata, comp_weights);

	if ((1 == dims[PHS2_DIM]) || (PHS2_DIM == pfdim)) {

		homodyne(wdata, FFT_FLAGS, N, dims, strs, data, idata);

	} else {

		unsigned int pardim = PHS2_DIM;

		ifftuc(N, dims, MD_CLEAR(FFT_FLAGS, pfdim), data, idata);

		long rdims[N];
		md_select_dims(N, ~MD_BIT(pardim), rdims, dims);
		long rstrs[N];
		md_calc_strides(N, rstrs, rdims, CFL_SIZE);

#pragma 	omp parallel for
		for (unsigned int i = 0; i < dims[pardim]; i++) {

			complex float* tmp = md_alloc(N, rdims, CFL_SIZE);
			long pos[N];
			md_set_dims(N, pos, 0);
			pos[pardim] = i;

			md_copy_block(N, pos, rdims, tmp, dims, data, CFL_SIZE);
			homodyne(wdata, MD_BIT(pfdim), N, rdims, rstrs, tmp, tmp);
			md_copy_block(N, pos, dims, data, rdims, tmp, CFL_SIZE);
			md_free(tmp);
		}
	}

	md_free(wdata.weights);

	unmap_cfl(N, dims, idata);
	unmap_cfl(N, dims, data);

	exit(0);
}