int main_estdelay(int argc, char* argv[]) { bool ring = false; int pad_factor = 100; unsigned int no_intersec_sp = 1; float size = 1.5; const struct opt_s opts[] = { OPT_SET('R', &ring, "RING method"), OPT_INT('p', &pad_factor, "p", "[RING] Padding"), OPT_UINT('n', &no_intersec_sp, "n", "[RING] Number of intersecting spokes"), OPT_FLOAT('r', &size, "r", "[RING] Central region size"), }; cmdline(&argc, argv, 2, 2, usage_str, help_str, ARRAY_SIZE(opts), opts); num_init(); if (pad_factor % 2 != 0) error("Pad_factor -p should be even\n"); long tdims[DIMS]; const complex float* traj = load_cfl(argv[1], DIMS, tdims); long tdims1[DIMS]; md_select_dims(DIMS, ~MD_BIT(1), tdims1, tdims); complex float* traj1 = md_alloc(DIMS, tdims1, CFL_SIZE); md_slice(DIMS, MD_BIT(1), (long[DIMS]){ 0 }, tdims, traj1, traj, CFL_SIZE);
int main_index(int argc, char* argv[]) { mini_cmdline(&argc, argv, 3, usage_str, help_str); num_init(); int N = atoi(argv[1]); int s = atoi(argv[2]); assert(N >= 0); assert(s >= 0); long dims[N + 1]; for (int i = 0; i < N; i++) dims[i] = 1; dims[N] = s; complex float* x = create_cfl(argv[3], N + 1, dims); for (int i = 0; i < s; i++) x[i] = i; unmap_cfl(N + 1, dims, x); exit(0); }
int main_slice(int argc, char* argv[]) { mini_cmdline(&argc, argv, 4, usage_str, help_str); num_init(); long in_dims[DIMS]; long out_dims[DIMS]; complex float* in_data = load_cfl(argv[3], DIMS, in_dims); int dim = atoi(argv[1]); int pos = atoi(argv[2]); assert(dim < DIMS); assert(pos >= 0); assert(pos < in_dims[dim]); for (int i = 0; i < DIMS; i++) out_dims[i] = in_dims[i]; out_dims[dim] = 1; complex float* out_data = create_cfl(argv[4], DIMS, out_dims); long pos2[DIMS] = { [0 ... DIMS - 1] = 0 }; pos2[dim] = pos; md_slice(DIMS, MD_BIT(dim), pos2, in_dims, out_data, in_data, CFL_SIZE); unmap_cfl(DIMS, out_dims, out_data); unmap_cfl(DIMS, in_dims, in_data); return 0; }
int main_conv(int argc, char* argv[]) { cmdline(&argc, argv, 4, 4, usage_str, help_str, 0, NULL); num_init(); unsigned int flags = atoi(argv[1]); unsigned int N = DIMS; long dims[N]; const complex float* in = load_cfl(argv[2], N, dims); long krn_dims[N]; const complex float* krn = load_cfl(argv[3], N, krn_dims); complex float* out = create_cfl(argv[4], N, dims); struct conv_plan* plan = conv_plan(N, flags, CONV_CYCLIC, CONV_SYMMETRIC, dims, dims, krn_dims, krn); conv_exec(plan, out, in); conv_free(plan); unmap_cfl(N, dims, out); unmap_cfl(N, krn_dims, krn); unmap_cfl(N, dims, in); exit(0); }
int main_threshold(int argc, char* argv[]) { unsigned int flags = 0; enum th_type { NONE, WAV, LLR, DFW, MPDFW, HARD } th_type = NONE; int llrblk = 8; const struct opt_s opts[] = { OPT_SELECT('H', enum th_type, &th_type, HARD, "hard thresholding"), OPT_SELECT('W', enum th_type, &th_type, WAV, "daubechies wavelet soft-thresholding"), OPT_SELECT('L', enum th_type, &th_type, LLR, "locally low rank soft-thresholding"), OPT_SELECT('D', enum th_type, &th_type, DFW, "divergence-free wavelet soft-thresholding"), OPT_UINT('j', &flags, "bitmask", "joint soft-thresholding"), OPT_INT('b', &llrblk, "blocksize", "locally low rank block size"), }; cmdline(&argc, argv, 3, 3, usage_str, help_str, ARRAY_SIZE(opts), opts); num_init(); const int N = DIMS; long dims[N]; complex float* idata = load_cfl(argv[2], N, dims); complex float* odata = create_cfl(argv[3], N, dims); float lambda = atof(argv[1]); switch (th_type) { case WAV: wthresh(N, dims, lambda, flags, odata, idata); break; case LLR: lrthresh(N, dims, llrblk, lambda, flags, odata, idata); break; case DFW: dfthresh(N, dims, lambda, odata, idata); break; case HARD: hard_thresh(N, dims, lambda, odata, idata); break; default: md_zsoftthresh(N, dims, lambda, flags, odata, idata); } unmap_cfl(N, dims, idata); unmap_cfl(N, dims, odata); return 0; }
int main_copy(int argc, char* argv[]) { const struct opt_s opts[] = { }; cmdline(&argc, argv, 2, 1000, usage_str, help_str, ARRAY_SIZE(opts), opts); num_init(); unsigned int N = DIMS; int count = argc - 3; assert((count >= 0) && (count % 2 == 0)); long in_dims[N]; long out_dims[N]; void* in_data = load_cfl(argv[argc - 2], N, in_dims); if (count > 0) { // get dimensions void* out_data = load_cfl(argv[argc - 1], N, out_dims); unmap_cfl(N, out_dims, out_data); } else { md_copy_dims(N, out_dims, in_dims); } void* out_data = create_cfl(argv[argc - 1], N, out_dims); long position[N]; for (unsigned int i = 0; i < N; i++) position[i] = 0; for (int i = 0; i < count; i += 2) { unsigned int dim = atoi(argv[i + 1]); long pos = atol(argv[i + 2]); assert(dim < N); assert((0 <= pos) && (pos < out_dims[dim])); position[dim] = pos; } md_copy_block(N, position, out_dims, out_data, in_dims, in_data, CFL_SIZE); unmap_cfl(N, in_dims, in_data); unmap_cfl(N, out_dims, out_data); return 0; }
int main_reshape(int argc, char* argv[]) { cmdline(&argc, argv, 3, 100, usage_str, help_str, 0, NULL); num_init(); unsigned int flags = atoi(argv[1]); unsigned int n = bitcount(flags); assert((int)n + 3 == argc - 1); long in_dims[DIMS]; long in_strs[DIMS]; long out_dims[DIMS]; long out_strs[DIMS]; complex float* in_data = load_cfl(argv[n + 2], DIMS, in_dims); md_calc_strides(DIMS, in_strs, in_dims, CFL_SIZE); md_copy_dims(DIMS, out_dims, in_dims); unsigned int j = 0; for (unsigned int i = 0; i < DIMS; i++) if (MD_IS_SET(flags, i)) out_dims[i] = atoi(argv[j++ + 2]); assert(j == n); assert(md_calc_size(DIMS, in_dims) == md_calc_size(DIMS, out_dims)); md_calc_strides(DIMS, out_strs, out_dims, CFL_SIZE); for (unsigned int i = 0; i < DIMS; i++) if (!(MD_IS_SET(flags, i) || (in_strs[i] == out_strs[i]))) error("Dimensions are not consistent at index %d.\n"); complex float* out_data = create_cfl(argv[n + 3], DIMS, out_dims); md_copy(DIMS, in_dims, out_data, in_data, CFL_SIZE); unmap_cfl(DIMS, in_dims, in_data); unmap_cfl(DIMS, out_dims, out_data); exit(0); }
int main_creal(int argc, char* argv[]) { mini_cmdline(argc, argv, 2, usage_str, help_str); num_init(); long dims[DIMS]; complex float* in_data = load_cfl(argv[1], DIMS, dims); complex float* out_data = create_cfl(argv[2], DIMS, dims); md_zreal(DIMS, dims, out_data, in_data); unmap_cfl(DIMS, dims, out_data); unmap_cfl(DIMS, dims, in_data); exit(0); }
int main_resize(int argc, char* argv[]) { bool center = false; const struct opt_s opts[] = { OPT_SET('c', ¢er, "center"), }; cmdline(&argc, argv, 4, 1000, usage_str, help_str, ARRAY_SIZE(opts), opts); num_init(); unsigned int N = DIMS; int count = argc - 3; assert((count > 0) && (count % 2 == 0)); long in_dims[N]; long out_dims[N]; void* in_data = load_cfl(argv[argc - 2], N, in_dims); md_copy_dims(N, out_dims, in_dims); for (int i = 0; i < count; i += 2) { unsigned int dim = atoi(argv[i + 1]); unsigned int size = atoi(argv[i + 2]); assert(dim < N); assert(size >= 1); out_dims[dim] = size; } void* out_data = create_cfl(argv[argc - 1], N, out_dims); (center ? md_resize_center : md_resize)(N, out_dims, out_data, in_dims, in_data, CFL_SIZE); unmap_cfl(N, in_dims, in_data); unmap_cfl(N, out_dims, out_data); return 0; }
int main_invert(int argc, char* argv[]) { mini_cmdline(&argc, argv, 2, usage_str, help_str); num_init(); long dims[DIMS]; complex float* idata = load_cfl(argv[1], DIMS, dims); complex float* odata = create_cfl(argv[2], DIMS, dims); #pragma omp parallel for for (long i = 0; i < md_calc_size(DIMS, dims); i++) odata[i] = idata[i] == 0 ? 0. : 1. / idata[i]; unmap_cfl(DIMS, dims, idata); unmap_cfl(DIMS, dims, odata); return 0; }
int main_flip(int argc, char* argv[]) { mini_cmdline(argc, argv, 3, usage_str, help_str); num_init(); int N = DIMS; long dims[N]; complex float* idata = load_cfl(argv[2], N, dims); complex float* odata = create_cfl(argv[3], N, dims); unsigned long flags = atoi(argv[1]); md_flip(N, dims, flags, odata, idata, sizeof(complex float)); unmap_cfl(N, dims, idata); unmap_cfl(N, dims, odata); exit(0); }
int main_itsense(int argc, char* argv[]) { mini_cmdline(argc, argv, 5, usage_str, help_str); struct sense_data data; data.alpha = atof(argv[1]); complex float* kspace = load_cfl(argv[3], DIMS, data.data_dims); data.sens = load_cfl(argv[2], DIMS, data.sens_dims); data.pattern = load_cfl(argv[4], DIMS, data.mask_dims); // 1 2 4 8 md_select_dims(DIMS, ~COIL_FLAG, data.imgs_dims, data.sens_dims); assert(check_dimensions(&data)); complex float* image = create_cfl(argv[5], DIMS, data.imgs_dims); md_calc_strides(DIMS, data.sens_strs, data.sens_dims, CFL_SIZE); md_calc_strides(DIMS, data.imgs_strs, data.imgs_dims, CFL_SIZE); md_calc_strides(DIMS, data.data_strs, data.data_dims, CFL_SIZE); md_calc_strides(DIMS, data.mask_strs, data.mask_dims, CFL_SIZE); data.tmp = md_alloc(DIMS, data.data_dims, CFL_SIZE); num_init(); sense_reco(&data, image, kspace); unmap_cfl(DIMS, data.imgs_dims, image); unmap_cfl(DIMS, data.mask_dims, data.pattern); unmap_cfl(DIMS, data.sens_dims, data.sens); unmap_cfl(DIMS, data.data_dims, data.sens); md_free(data.tmp); exit(0); }
int main_extract(int argc, char* argv[]) { mini_cmdline(argc, argv, 5, usage_str, help_str); num_init(); long in_dims[DIMS]; long out_dims[DIMS]; complex float* in_data = load_cfl(argv[4], DIMS, in_dims); int dim = atoi(argv[1]); int start = atoi(argv[2]); int end = atoi(argv[3]); assert((0 <= dim) && (dim < DIMS)); assert(start >= 0); assert(start <= end); assert(end < in_dims[dim]); for (int i = 0; i < DIMS; i++) out_dims[i] = in_dims[i]; out_dims[dim] = end - start + 1; complex float* out_data = create_cfl(argv[5], DIMS, out_dims); long pos2[DIMS] = { [0 ... DIMS - 1] = 0 }; pos2[dim] = start; md_copy_block(DIMS, pos2, out_dims, out_data, in_dims, in_data, sizeof(complex float)); unmap_cfl(DIMS, in_dims, in_data); unmap_cfl(DIMS, out_dims, out_data); exit(0); }
int main_zeros(int argc, char* argv[]) { mini_cmdline(argc, argv, -3, usage_str, help_str); num_init(); int N = atoi(argv[1]); assert(N >= 0); assert(argc == 3 + N); long dims[N]; for (int i = 0; i < N; i++) { dims[i] = atoi(argv[2 + i]); assert(dims[i] >= 1); } complex float* x = create_cfl(argv[2 + N], N, dims); md_clear(N, dims, x, sizeof(complex float)); unmap_cfl(N, dims, x); exit(0); }
int main_pocsense(int argc, char* argv[]) { int c; float alpha = 0.; int maxiter = 50; bool l1wav = false; float lambda = -1.; bool use_gpu = false; bool use_admm = false; float admm_rho = 0.1; while (-1 != (c = getopt(argc, argv, "m:ghi:r:o:l:"))) { switch (c) { case 'i': maxiter = atoi(optarg); break; case 'r': alpha = atof(optarg); break; case 'l': if (1 == atoi(optarg)) l1wav = true; else if (2 == atoi(optarg)) l1wav = false; else { usage(argv[0], stderr); exit(1); } break; case 'g': use_gpu = true; break; case 'o': lambda = atof(optarg); break; case 'm': use_admm = true; admm_rho = atof(optarg); break; case 'h': usage(argv[0], stdout); help(); exit(0); default: usage(argv[0], stderr); exit(1); } } if (argc - optind != 3) { usage(argv[0], stderr); exit(1); } unsigned int N = DIMS; long dims[N]; long ksp_dims[N]; complex float* kspace_data = load_cfl(argv[optind + 0], N, ksp_dims); complex float* sens_maps = load_cfl(argv[optind + 1], N, dims); for (int i = 0; i < 4; i++) { // sizes2[4] may be > 1 if (ksp_dims[i] != dims[i]) { fprintf(stderr, "Dimensions of kspace and sensitivities do not match!\n"); exit(1); } } assert(1 == ksp_dims[MAPS_DIM]); num_init(); long dims1[N]; md_select_dims(N, ~(COIL_FLAG|MAPS_FLAG), dims1, dims); // ----------------------------------------------------------- // memory allocation complex float* result = create_cfl(argv[optind + 2], N, ksp_dims); complex float* pattern = md_alloc(N, dims1, CFL_SIZE); // ----------------------------------------------------------- // pre-process data float scaling = estimate_scaling(ksp_dims, NULL, kspace_data); md_zsmul(N, ksp_dims, kspace_data, kspace_data, 1. / scaling); estimate_pattern(N, ksp_dims, COIL_DIM, pattern, kspace_data); // ----------------------------------------------------------- // l1-norm threshold operator const struct operator_p_s* thresh_op = NULL; const struct linop_s* wave_op = NULL; if (l1wav) { long minsize[DIMS] = { [0 ... DIMS - 1] = 1 }; minsize[0] = MIN(ksp_dims[0], 16); minsize[1] = MIN(ksp_dims[1], 16); minsize[2] = MIN(ksp_dims[2], 16); wave_op = wavelet_create(DIMS, ksp_dims, FFT_FLAGS, minsize, true, use_gpu); thresh_op = prox_unithresh_create(DIMS, wave_op, alpha, COIL_FLAG, use_gpu); } #if 0 else {
int main_nlinv(int argc, char* argv[]) { int iter = 8; float l1 = -1.; bool waterfat = false; bool rvc = false; bool normalize = true; float restrict_fov = -1.; float csh[3] = { 0., 0., 0. }; bool usegpu = false; const char* psf = NULL; const struct opt_s opts[] = { { 'l', true, opt_float, &l1, NULL }, { 'i', true, opt_int, &iter, NULL }, { 'c', false, opt_set, &rvc, NULL }, { 'N', false, opt_clear, &normalize, NULL }, { 'f', true, opt_float, &restrict_fov, NULL }, { 'p', true, opt_string, &psf, NULL }, { 'g', false, opt_set, &usegpu, NULL }, }; cmdline(&argc, argv, 2, 3, usage_str, help_str, ARRAY_SIZE(opts), opts); num_init(); assert(iter > 0); long ksp_dims[DIMS]; complex float* kspace_data = load_cfl(argv[1], DIMS, ksp_dims); long dims[DIMS]; md_copy_dims(DIMS, dims, ksp_dims); if (waterfat) dims[CSHIFT_DIM] = 2; long img_dims[DIMS]; md_select_dims(DIMS, FFT_FLAGS|CSHIFT_FLAG, img_dims, dims); long img_strs[DIMS]; md_calc_strides(DIMS, img_strs, img_dims, CFL_SIZE); complex float* image = create_cfl(argv[2], DIMS, img_dims); long msk_dims[DIMS]; md_select_dims(DIMS, FFT_FLAGS, msk_dims, dims); long msk_strs[DIMS]; md_calc_strides(DIMS, msk_strs, msk_dims, CFL_SIZE); complex float* mask; complex float* norm = md_alloc(DIMS, msk_dims, CFL_SIZE); complex float* sens; if (4 == argc) { sens = create_cfl(argv[3], DIMS, ksp_dims); } else { sens = md_alloc(DIMS, ksp_dims, CFL_SIZE); } complex float* pattern = NULL; long pat_dims[DIMS]; if (NULL != psf) { pattern = load_cfl(psf, DIMS, pat_dims); // FIXME: check compatibility } else { pattern = md_alloc(DIMS, img_dims, CFL_SIZE); estimate_pattern(DIMS, ksp_dims, COIL_DIM, pattern, kspace_data); } if (waterfat) { size_t size = md_calc_size(DIMS, msk_dims); md_copy(DIMS, msk_dims, pattern + size, pattern, CFL_SIZE); long shift_dims[DIMS]; md_select_dims(DIMS, FFT_FLAGS, shift_dims, msk_dims); long shift_strs[DIMS]; md_calc_strides(DIMS, shift_strs, shift_dims, CFL_SIZE); complex float* shift = md_alloc(DIMS, shift_dims, CFL_SIZE); unsigned int X = shift_dims[READ_DIM]; unsigned int Y = shift_dims[PHS1_DIM]; unsigned int Z = shift_dims[PHS2_DIM]; for (unsigned int x = 0; x < X; x++) for (unsigned int y = 0; y < Y; y++) for (unsigned int z = 0; z < Z; z++) shift[(z * Z + y) * Y + x] = cexp(2.i * M_PI * ((csh[0] * x) / X + (csh[1] * y) / Y + (csh[2] * z) / Z)); md_zmul2(DIMS, msk_dims, msk_strs, pattern + size, msk_strs, pattern + size, shift_strs, shift); md_free(shift); } #if 0 float scaling = 1. / estimate_scaling(ksp_dims, NULL, kspace_data); #else float scaling = 100. / md_znorm(DIMS, ksp_dims, kspace_data); #endif debug_printf(DP_INFO, "Scaling: %f\n", scaling); md_zsmul(DIMS, ksp_dims, kspace_data, kspace_data, scaling); if (-1. == restrict_fov) { mask = md_alloc(DIMS, msk_dims, CFL_SIZE); md_zfill(DIMS, msk_dims, mask, 1.); } else { float restrict_dims[DIMS] = { [0 ... DIMS - 1] = 1. }; restrict_dims[0] = restrict_fov; restrict_dims[1] = restrict_fov; restrict_dims[2] = restrict_fov; mask = compute_mask(DIMS, msk_dims, restrict_dims); } #ifdef USE_CUDA if (usegpu) { complex float* kspace_gpu = md_alloc_gpu(DIMS, ksp_dims, CFL_SIZE); md_copy(DIMS, ksp_dims, kspace_gpu, kspace_data, CFL_SIZE); noir_recon(dims, iter, l1, image, NULL, pattern, mask, kspace_gpu, rvc, usegpu); md_free(kspace_gpu); md_zfill(DIMS, ksp_dims, sens, 1.); } else #endif noir_recon(dims, iter, l1, image, sens, pattern, mask, kspace_data, rvc, usegpu); if (normalize) { md_zrss(DIMS, ksp_dims, COIL_FLAG, norm, sens); md_zmul2(DIMS, img_dims, img_strs, image, img_strs, image, msk_strs, norm); } if (4 == argc) { long strs[DIMS]; md_calc_strides(DIMS, strs, ksp_dims, CFL_SIZE); if (norm) md_zdiv2(DIMS, ksp_dims, strs, sens, strs, sens, img_strs, norm); fftmod(DIMS, ksp_dims, FFT_FLAGS, sens, sens); unmap_cfl(DIMS, ksp_dims, sens); } else { md_free(sens); } md_free(norm); md_free(mask); if (NULL != psf) unmap_cfl(DIMS, pat_dims, pattern); else md_free(pattern); unmap_cfl(DIMS, img_dims, image); unmap_cfl(DIMS, ksp_dims, kspace_data); exit(0); }
int main_nufft(int argc, char* argv[]) { int c; bool adjoint = false; bool inverse = false; bool toeplitz = false; bool precond = false; bool use_gpu = false; bool two = false; bool calib = false; bool sizeinit = false; bool stoch = false; long coilim_dims[DIMS]; md_singleton_dims(DIMS, coilim_dims); int maxiter = 50; float lambda = 0.00; const char* pat_str = NULL; while (-1 != (c = getopt(argc, argv, "d:m:l:p:aihCto:w:2:c:S"))) { switch (c) { case '2': two = true; break; case 'i': inverse = true; break; case 'a': adjoint = true; break; case 'C': precond = true; break; case 'S': stoch = true; break; case 'c': calib = true; inverse = true; case 'd': sscanf(optarg, "%ld:%ld:%ld", &coilim_dims[0], &coilim_dims[1], &coilim_dims[2]); sizeinit = true; break; case 'm': maxiter = atoi(optarg); break; case 'p': pat_str = strdup(optarg); break; case 'l': lambda = atof(optarg); break; case 't': toeplitz = true; break; case 'h': usage(argv[0], stdout); help(); exit(0); default: usage(argv[0], stderr); exit(1); } } if (argc - optind != 3) { usage(argv[0], stderr); exit(1); } // Read trajectory long traj_dims[2]; complex float* traj = load_cfl(argv[optind + 0], 2, traj_dims); assert(3 == traj_dims[0]); if (!sizeinit) estimate_im_dims(coilim_dims, traj_dims, traj); num_init(); // Load pattern / density compensation (if any) complex float* pat = NULL; long pat_dims[2]; if (pat_str) { pat = load_cfl(pat_str, 2, pat_dims); assert(pat_dims[0] == 1); assert(pat_dims[1] == traj_dims[1]); } if (inverse || adjoint) { long ksp_dims[DIMS]; const complex float* ksp = load_cfl(argv[optind + 1], DIMS, ksp_dims); coilim_dims[COIL_DIM] = ksp_dims[COIL_DIM]; long out_dims[DIMS]; if (calib) { md_singleton_dims(DIMS, out_dims); estimate_im_dims(out_dims, traj_dims, traj); out_dims[COIL_DIM] = ksp_dims[COIL_DIM]; } else { md_copy_dims(DIMS, out_dims, coilim_dims); } complex float* out = create_cfl(argv[optind + 2], DIMS, out_dims); complex float* img = out; if (calib) img = md_alloc(DIMS, coilim_dims, CFL_SIZE); md_clear(DIMS, coilim_dims, img, CFL_SIZE); struct iter_conjgrad_conf cgconf = iter_conjgrad_defaults; cgconf.maxiter = maxiter; cgconf.l2lambda = 0.; cgconf.tol = 0; const struct linop_s* nufft_op; // Get nufft_op if (two) #ifdef BERKELEY_SVN nufft_op = nufft2_create(ksp_dims, coilim_dims, traj, pat, toeplitz, precond, &cgconf, use_gpu); #else assert(!two); #endif else
int main_phantom(int argc, char* argv[]) { bool kspace = false; bool d3 = false; int sens = 0; int osens = -1; int xdim = -1; bool out_sens = false; bool tecirc = false; bool circ = false; const char* traj = NULL; long dims[DIMS] = { [0 ... DIMS - 1] = 1 }; dims[0] = 128; dims[1] = 128; dims[2] = 1; const struct opt_s opts[] = { OPT_INT('s', &sens, "nc", "nc sensitivities"), OPT_INT('S', &osens, "", "Output nc sensitivities"), OPT_SET('k', &kspace, "k-space"), OPT_STRING('t', &traj, "file", "trajectory"), OPT_SET('c', &circ, "()"), OPT_SET('m', &tecirc, "()"), OPT_INT('x', &xdim, "n", "dimensions in y and z"), OPT_SET('3', &d3, "3D"), }; cmdline(&argc, argv, 1, 1, usage_str, help_str, ARRAY_SIZE(opts), opts); num_init(); if (tecirc) { circ = true; dims[TE_DIM] = 32; } if (-1 != osens) { out_sens = true; sens = osens; } if (-1 != xdim) dims[0] = dims[1] = xdim; if (d3) dims[2] = dims[0]; long sdims[DIMS]; complex float* samples = NULL; if (NULL != traj) { samples = load_cfl(traj, DIMS, sdims); dims[0] = 1; dims[1] = sdims[1]; dims[2] = sdims[2]; } if (sens) dims[3] = sens; complex float* out = create_cfl(argv[1], DIMS, dims); if (out_sens) { assert(NULL == traj); assert(!kspace); calc_sens(dims, out); } else if (circ) { assert(NULL == traj); if (1 < dims[TE_DIM]) { assert(!d3); calc_moving_circ(dims, out, kspace); } else { (d3 ? calc_circ3d : calc_circ)(dims, out, kspace); // calc_ring(dims, out, kspace); } } else { //assert(1 == dims[COIL_DIM]); if (NULL == samples) { (d3 ? calc_phantom3d : calc_phantom)(dims, out, kspace); } else { dims[0] = 3; (d3 ? calc_phantom3d_noncart : calc_phantom_noncart)(dims, out, samples); dims[0] = 1; } } if (NULL != traj) free((void*)traj); if (NULL != samples) unmap_cfl(3, sdims, samples); unmap_cfl(DIMS, dims, out); return 0; }
int main_homodyne(int argc, char* argv[]) { bool clear = false; bool image = false; const char* phase_ref = NULL; float alpha = 0.; num_init(); const struct opt_s opts[] = { { 'r', true, opt_float, &alpha, " <alpha>\tOffset of ramp filter, between 0 and 1. alpha=0 is a full ramp, alpha=1 is a horizontal line" }, { 'I', false, opt_set, &image, "\tInput is in image domain" }, { 'C', false, opt_set, &clear, "\tClear unacquired portion of kspace" }, { 'P', true, opt_string, &phase_ref, " <phase_ref>\tUse <phase_ref> as phase reference" }, }; cmdline(&argc, argv, 4, 4, usage_str, help_str, ARRAY_SIZE(opts), opts); const int N = DIMS; long dims[N]; complex float* idata = load_cfl(argv[3], N, dims); complex float* data = create_cfl(argv[4], N, dims); int pfdim = atoi(argv[1]); float frac = atof(argv[2]); assert((0 <= pfdim) && (pfdim < N)); assert(frac > 0.); if (image) { complex float* ksp_in = md_alloc(N, dims, CFL_SIZE); fftuc(N, dims, FFT_FLAGS, ksp_in, idata); md_copy(N, dims, idata, ksp_in, CFL_SIZE); md_free(ksp_in); } long strs[N]; md_calc_strides(N, strs, dims, CFL_SIZE); struct wdata wdata; wdata.frac = frac; wdata.pfdim = pfdim; md_select_dims(N, MD_BIT(pfdim), wdata.wdims, dims); md_calc_strides(N, wdata.wstrs, wdata.wdims, CFL_SIZE); wdata.weights = md_alloc(N, wdata.wdims, CFL_SIZE); wdata.alpha = alpha; wdata.clear = clear; md_loop(N, wdata.wdims, &wdata, comp_weights); long pstrs[N]; long pdims[N]; complex float* phase = NULL; if (NULL == phase_ref) { phase = estimate_phase(wdata, FFT_FLAGS, N, dims, idata); md_copy_dims(N, pdims, dims); } else phase = load_cfl(phase_ref, N, pdims); md_calc_strides(N, pstrs, pdims, CFL_SIZE); homodyne(wdata, FFT_FLAGS, N, dims, strs, data, idata, pstrs, phase); md_free(wdata.weights); if (NULL == phase_ref) md_free(phase); else { unmap_cfl(N, pdims, phase); free((void*)phase_ref); } unmap_cfl(N, dims, idata); unmap_cfl(N, dims, data); exit(0); }
int main_bench(int argc, char* argv[]) { int c; bool threads = false; bool scaling = false; while (-1 != (c = getopt(argc, argv, "TSh"))) { switch (c) { case 'T': threads = true; break; case 'S': scaling = true; break; case 'h': usage(argv[0], stdout); help(); exit(0); default: usage(argv[0], stderr); exit(1); } } if (argc - optind > 1) { usage(argv[0], stderr); exit(1); } long dims[BENCH_DIMS] = MD_INIT_ARRAY(BENCH_DIMS, 1); long strs[BENCH_DIMS]; long pos[BENCH_DIMS] = { 0 }; dims[REPETITION_IND] = 5; dims[THREADS_IND] = threads ? 8 : 1; dims[SCALE_IND] = scaling ? 5 : 1; dims[TESTS_IND] = sizeof(benchmarks) / sizeof(benchmarks[0]); md_calc_strides(BENCH_DIMS, strs, dims, CFL_SIZE); bool outp = (1 == argc - optind); complex float* out = (outp ? create_cfl : anon_cfl)(outp ? argv[optind] : "", BENCH_DIMS, dims); num_init(); do { if (threads) { num_set_num_threads(pos[THREADS_IND] + 1); debug_printf(DP_INFO, "%02d threads. ", pos[THREADS_IND] + 1); } do_test(dims, &MD_ACCESS(BENCH_DIMS, strs, pos, out), pos[SCALE_IND] + 1, benchmarks[pos[TESTS_IND]].fun, benchmarks[pos[TESTS_IND]].str); } while (md_next(BENCH_DIMS, dims, ~MD_BIT(REPETITION_IND), pos)); unmap_cfl(BENCH_DIMS, dims, out); exit(0); }
int main_nufft(int argc, char* argv[]) { bool adjoint = false; bool inverse = false; bool use_gpu = false; bool precond = false; bool dft = false; struct nufft_conf_s conf = nufft_conf_defaults; struct iter_conjgrad_conf cgconf = iter_conjgrad_defaults; long coilim_vec[3] = { 0 }; float lambda = 0.; const struct opt_s opts[] = { OPT_SET('a', &adjoint, "adjoint"), OPT_SET('i', &inverse, "inverse"), OPT_VEC3('d', &coilim_vec, "x:y:z", "dimensions"), OPT_VEC3('D', &coilim_vec, "", "()"), OPT_SET('t', &conf.toeplitz, "Toeplitz embedding for inverse NUFFT"), OPT_SET('c', &precond, "Preconditioning for inverse NUFFT"), OPT_FLOAT('l', &lambda, "lambda", "l2 regularization"), OPT_UINT('m', &cgconf.maxiter, "", "()"), OPT_SET('s', &dft, "DFT"), }; cmdline(&argc, argv, 3, 3, usage_str, help_str, ARRAY_SIZE(opts), opts); long coilim_dims[DIMS] = { 0 }; md_copy_dims(3, coilim_dims, coilim_vec); // Read trajectory long traj_dims[DIMS]; complex float* traj = load_cfl(argv[1], DIMS, traj_dims); assert(3 == traj_dims[0]); num_init(); if (inverse || adjoint) { long ksp_dims[DIMS]; const complex float* ksp = load_cfl(argv[2], DIMS, ksp_dims); assert(1 == ksp_dims[0]); assert(md_check_compat(DIMS, ~(PHS1_FLAG|PHS2_FLAG), ksp_dims, traj_dims)); md_copy_dims(DIMS - 3, coilim_dims + 3, ksp_dims + 3); if (0 == md_calc_size(DIMS, coilim_dims)) { estimate_im_dims(DIMS, coilim_dims, traj_dims, traj); debug_printf(DP_INFO, "Est. image size: %ld %ld %ld\n", coilim_dims[0], coilim_dims[1], coilim_dims[2]); } complex float* img = create_cfl(argv[3], DIMS, coilim_dims); md_clear(DIMS, coilim_dims, img, CFL_SIZE); const struct linop_s* nufft_op; if (!dft) nufft_op = nufft_create(DIMS, ksp_dims, coilim_dims, traj_dims, traj, NULL, conf, use_gpu); else nufft_op = nudft_create(DIMS, FFT_FLAGS, ksp_dims, coilim_dims, traj_dims, traj); if (inverse) { const struct operator_s* precond_op = NULL; if (conf.toeplitz && precond) precond_op = nufft_precond_create(nufft_op); lsqr(DIMS, &(struct lsqr_conf){ lambda }, iter_conjgrad, CAST_UP(&cgconf), nufft_op, NULL, coilim_dims, img, ksp_dims, ksp, precond_op); if (conf.toeplitz && precond) operator_free(precond_op); } else {
int main_pocsense(int argc, char* argv[]) { float alpha = 0.; int maxiter = 50; bool l1wav = false; float lambda = -1.; bool use_gpu = false; bool use_admm = false; float admm_rho = -1.; int l1type = 2; const struct opt_s opts[] = { { 'i', true, opt_int, &maxiter, NULL }, { 'r', true, opt_float, &alpha, " alpha\tregularization parameter" }, { 'l', true, opt_int, &l1type, "1/-l2\t\ttoggle l1-wavelet or l2 regularization" }, { 'g', false, opt_set, &use_gpu, NULL }, { 'o', true, opt_float, &lambda, NULL }, { 'm', true, opt_float, &admm_rho, NULL }, }; cmdline(&argc, argv, 3, 3, usage_str, help_str, ARRAY_SIZE(opts), opts); if (1 == l1type) l1wav = true; else if (2 == l1type) l1wav = false; else error("Unknown regularization type."); unsigned int N = DIMS; long dims[N]; long ksp_dims[N]; complex float* kspace_data = load_cfl(argv[1], N, ksp_dims); complex float* sens_maps = load_cfl(argv[2], N, dims); for (int i = 0; i < 4; i++) // sizes2[4] may be > 1 if (ksp_dims[i] != dims[i]) error("Dimensions of kspace and sensitivities do not match!\n"); assert(1 == ksp_dims[MAPS_DIM]); num_init(); long dims1[N]; md_select_dims(N, ~(COIL_FLAG|MAPS_FLAG), dims1, dims); // ----------------------------------------------------------- // memory allocation complex float* result = create_cfl(argv[3], N, ksp_dims); complex float* pattern = md_alloc(N, dims1, CFL_SIZE); // ----------------------------------------------------------- // pre-process data float scaling = estimate_scaling(ksp_dims, NULL, kspace_data); md_zsmul(N, ksp_dims, kspace_data, kspace_data, 1. / scaling); estimate_pattern(N, ksp_dims, COIL_DIM, pattern, kspace_data); // ----------------------------------------------------------- // l1-norm threshold operator const struct operator_p_s* thresh_op = NULL; const struct linop_s* wave_op = NULL; if (l1wav) { long minsize[DIMS] = { [0 ... DIMS - 1] = 1 }; minsize[0] = MIN(ksp_dims[0], 16); minsize[1] = MIN(ksp_dims[1], 16); minsize[2] = MIN(ksp_dims[2], 16); wave_op = wavelet_create(DIMS, ksp_dims, FFT_FLAGS, minsize, true, use_gpu); thresh_op = prox_unithresh_create(DIMS, wave_op, alpha, COIL_FLAG, use_gpu); } #if 0 else {
int main_nufft(int argc, char* argv[]) { int c; bool adjoint = false; bool inverse = false; bool use_gpu = false; bool sizeinit = false; struct nufft_conf_s conf = nufft_conf_defaults; struct iter_conjgrad_conf cgconf = iter_conjgrad_defaults; long coilim_dims[DIMS]; md_singleton_dims(DIMS, coilim_dims); float lambda = 0.; while (-1 != (c = getopt(argc, argv, "d:m:l:aiht"))) { switch (c) { case 'i': inverse = true; break; case 'a': adjoint = true; break; case 'd': sscanf(optarg, "%ld:%ld:%ld", &coilim_dims[0], &coilim_dims[1], &coilim_dims[2]); sizeinit = true; break; case 'm': cgconf.maxiter = atoi(optarg); break; case 'l': lambda = atof(optarg); break; case 't': conf.toeplitz = true; break; case 'h': usage(argv[0], stdout); help(); exit(0); default: usage(argv[0], stderr); exit(1); } } if (argc - optind != 3) { usage(argv[0], stderr); exit(1); } // Read trajectory long traj_dims[DIMS]; complex float* traj = load_cfl(argv[optind + 0], DIMS, traj_dims); assert(3 == traj_dims[0]); num_init(); if (inverse || adjoint) { long ksp_dims[DIMS]; const complex float* ksp = load_cfl(argv[optind + 1], DIMS, ksp_dims); assert(1 == ksp_dims[0]); assert(md_check_compat(DIMS, ~(PHS1_FLAG|PHS2_FLAG), ksp_dims, traj_dims)); md_copy_dims(DIMS - 3, coilim_dims + 3, ksp_dims + 3); if (!sizeinit) { estimate_im_dims(DIMS, coilim_dims, traj_dims, traj); debug_printf(DP_INFO, "Est. image size: %ld %ld %ld\n", coilim_dims[0], coilim_dims[1], coilim_dims[2]); } complex float* img = create_cfl(argv[optind + 2], DIMS, coilim_dims); md_clear(DIMS, coilim_dims, img, CFL_SIZE); const struct linop_s* nufft_op = nufft_create(DIMS, ksp_dims, coilim_dims, traj_dims, traj, NULL, conf, use_gpu); if (inverse) { lsqr(DIMS, &(struct lsqr_conf){ lambda }, iter_conjgrad, &cgconf, nufft_op, NULL, coilim_dims, img, ksp_dims, ksp); } else {