Exemplo n.º 1
0
int main( int argc , char * argv[] )
{
   int do_norm=0 , qdet=2 , have_freq=0 , do_automask=0 ;
   float dt=0.0f , fbot=0.0f,ftop=999999.9f , blur=0.0f ;
   MRI_IMARR *ortar=NULL ; MRI_IMAGE *ortim=NULL ;
   THD_3dim_dataset **ortset=NULL ; int nortset=0 ;
   THD_3dim_dataset *inset=NULL , *outset=NULL;
   char *prefix="RSFC" ;
   byte *mask=NULL ;
   int mask_nx=0,mask_ny=0,mask_nz=0,nmask , verb=1 , 
		nx,ny,nz,nvox , nfft=0 , kk ;
   float **vec , **ort=NULL ; int nort=0 , vv , nopt , ntime  ;
   MRI_vectim *mrv ;
   float pvrad=0.0f ; int nosat=0 ;
   int do_despike=0 ;

	// @@ non-BP variables
	float fbotALL=0.0f, ftopALL=999999.9f; // do full range version
	int NumDen = 0; // switch for doing numerator or denom
	THD_3dim_dataset *outsetALL=NULL ; 	
	int m, mm;
	float delf; // harmonics
	int ind_low,ind_high,N_ny, ctr;
	float sqnt,nt_fac;
	gsl_fft_real_wavetable *real1, *real2; // GSL stuff
	gsl_fft_real_workspace *work;
	double *series1, *series2;	
	double *xx1,*xx2;
	float numer,denom,val;
	float *alff=NULL,*malff=NULL,*falff=NULL,
         *rsfa=NULL,*mrsfa=NULL,*frsfa=NULL; // values
	float meanALFF=0.0f,meanRSFA=0.0f; // will be for mean in brain region
	THD_3dim_dataset *outsetALFF=NULL;
	THD_3dim_dataset *outsetmALFF=NULL;
	THD_3dim_dataset *outsetfALFF=NULL;
	THD_3dim_dataset *outsetRSFA=NULL;
	THD_3dim_dataset *outsetmRSFA=NULL;
	THD_3dim_dataset *outsetfRSFA=NULL;
	char out_lff[300];
	char out_alff[300];
	char out_malff[300];
	char out_falff[300];
	char out_rsfa[300];
	char out_mrsfa[300];
	char out_frsfa[300];
	char out_unBP[300];
	int SERIES_OUT = 1;
	int UNBP_OUT = 0; 
	int DO_RSFA = 1;
	int BP_LAST = 0; // option for only doing filter to LFFs at very end of proc
	float de_rsfa=0.0f,nu_rsfa=0.0f;
	double pow1=0.0,pow2=0.0;

   /*-- help? --*/

   if( argc < 2 || strcmp(argv[1],"-help") == 0 ){
		printf(
"\n  Program to calculate common resting state functional connectivity (RSFC)\n"
"  parameters (ALFF, mALFF, fALFF, RSFA, etc.) for resting state time\n"
"  series.  This program is **heavily** based on the existing\n"
"  3dBandPass by RW Cox, with the amendments to calculate RSFC\n"
"  parameters written by PA Taylor (July, 2012).\n"
"  This program is part of FATCAT (Taylor & Saad, 2013) in AFNI. Importantly,\n"
"  its functionality can be included in the `afni_proc.py' processing-script \n"
"  generator; see that program's help file for an example including RSFC\n"
"  and spectral parameter calculation via the `-regress_RSFC' option.\n"
"\n"
"* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n"
"\n"
"  All options of 3dBandPass may be used here (with a couple other\n"
"  parameter options, as well): essentially, the motivation of this\n"
"  program is to produce ALFF, etc. values of the actual RSFC time\n"
"  series that you calculate.  Therefore, all the 3dBandPass processing\n"
"  you normally do en route to making your final `resting state time\n"
"  series' is done here to generate your LFFs, from which the\n"
"  amplitudes in the LFF band are calculated at the end.  In order to\n"
"  calculate fALFF, the same initial time series are put through the\n"
"  same processing steps which you have chosen but *without* the\n"
"  bandpass part; the spectrum of this second time series is used to\n"
"  calculate the fALFF denominator.\n"
" \n"
"  For more information about each RSFC parameter, see, e.g.:   \n"
"  ALFF/mALFF -- Zang et al. (2007),\n"
"  fALFF --      Zou et al. (2008),\n"
"  RSFA --       Kannurpatti & Biswal (2008).\n"
"\n"
"* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n"
"\n"
" + USAGE: 3dRSFC [options] fbot ftop dataset\n"
"\n"
"* One function of this program is to prepare datasets for input\n"
"   to 3dSetupGroupInCorr.  Other uses are left to your imagination.\n"
"\n"
"* 'dataset' is a 3D+time sequence of volumes\n"
"   ++ This must be a single imaging run -- that is, no discontinuities\n"
"       in time from 3dTcat-ing multiple datasets together.\n"
"\n"
"* fbot = lowest frequency in the passband, in Hz\n"
"   ++ fbot can be 0 if you want to do a lowpass filter only;\n"
"       HOWEVER, the mean and Nyquist freq are always removed.\n"
"\n"
"* ftop = highest frequency in the passband (must be > fbot)\n"
"   ++ if ftop > Nyquist freq, then it's a highpass filter only.\n"
"\n"
"* Set fbot=0 and ftop=99999 to do an 'allpass' filter.\n"
"  ++ Except for removal of the 0 and Nyquist frequencies, that is.\n"
"\n"
"* You cannot construct a 'notch' filter with this program!\n"
"  ++ You could use 3dRSFC followed by 3dcalc to get the same effect.\n"
"  ++ If you are understand what you are doing, that is.\n"
"  ++ Of course, that is the AFNI way -- if you don't want to\n"
"     understand what you are doing, use Some other PrograM, and\n"
"     you can still get Fine StatisticaL maps.\n"
"\n"
"* 3dRSFC will fail if fbot and ftop are too close for comfort.\n"
"  ++ Which means closer than one frequency grid step df,\n"
"     where df = 1 / (nfft * dt) [of course]\n"
"\n"
"* The actual FFT length used will be printed, and may be larger\n"
"   than the input time series length for the sake of efficiency.\n"
"  ++ The program will use a power-of-2, possibly multiplied by\n"
"     a power of 3 and/or 5 (up to and including the 3rd power of\n"
"     each of these: 3, 9, 27, and 5, 25, 125).\n"
"\n"
"* Note that the results of combining 3dDetrend and 3dRSFC will\n"
"   depend on the order in which you run these programs.  That's why\n"
"   3dRSFC has the '-ort' and '-dsort' options, so that the\n"
"   time series filtering can be done properly, in one place.\n"
"\n"
"* The output dataset is stored in float format.\n"
"\n"
"* The order of processing steps is the following (most are optional), and\n"
"  for the LFFs, the bandpass is done between the specified fbot and ftop,\n"
"  while for the `whole spectrum' (i.e., fALFF denominator) the bandpass is:\n"
"  done only to exclude the time series mean and the Nyquist frequency:\n"
" (0) Check time series for initial transients [does not alter data]\n"
" (1) Despiking of each time series\n"
" (2) Removal of a constant+linear+quadratic trend in each time series\n"
" (3) Bandpass of data time series\n"
" (4) Bandpass of -ort time series, then detrending of data\n"
"      with respect to the -ort time series\n"
" (5) Bandpass and de-orting of the -dsort dataset,\n"
"      then detrending of the data with respect to -dsort\n"
" (6) Blurring inside the mask [might be slow]\n"
" (7) Local PV calculation     [WILL be slow!]\n"
" (8) L2 normalization         [will be fast.]\n"
" (9) Calculate spectrum and amplitudes, for RSFC parameters.\n"
"\n"
"* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n"
"--------\n"
"OPTIONS:\n"
"--------\n"
" -despike        = Despike each time series before other processing.\n"
"                   ++ Hopefully, you don't actually need to do this,\n"
"                      which is why it is optional.\n"
" -ort f.1D       = Also orthogonalize input to columns in f.1D\n"
"                   ++ Multiple '-ort' options are allowed.\n"
" -dsort fset     = Orthogonalize each voxel to the corresponding\n"
"                    voxel time series in dataset 'fset', which must\n"
"                    have the same spatial and temporal grid structure\n"
"                    as the main input dataset.\n"
"                   ++ At present, only one '-dsort' option is allowed.\n"
" -nodetrend      = Skip the quadratic detrending of the input that\n"
"                    occurs before the FFT-based bandpassing.\n"
"                   ++ You would only want to do this if the dataset\n"
"                      had been detrended already in some other program.\n"
" -dt dd          = set time step to 'dd' sec [default=from dataset header]\n"
" -nfft N         = set the FFT length to 'N' [must be a legal value]\n"
" -norm           = Make all output time series have L2 norm = 1\n"
"                   ++ i.e., sum of squares = 1\n"
" -mask mset      = Mask dataset\n"
" -automask       = Create a mask from the input dataset\n"
" -blur fff       = Blur (inside the mask only) with a filter\n"
"                    width (FWHM) of 'fff' millimeters.\n"
" -localPV rrr    = Replace each vector by the local Principal Vector\n"
"                    (AKA first singular vector) from a neighborhood\n"
"                    of radius 'rrr' millimiters.\n"
"                   ++ Note that the PV time series is L2 normalized.\n"
"                   ++ This option is mostly for Bob Cox to have fun with.\n"
"\n"
" -input dataset  = Alternative way to specify input dataset.\n"
" -band fbot ftop = Alternative way to specify passband frequencies.\n"
"\n"
" -prefix ppp     = Set prefix name of output dataset. Name of filtered time\n"
"                   series would be, e.g., ppp_LFF+orig.*, and the parameter\n"
"                   outputs are named with obvious suffices.\n"
" -quiet          = Turn off the fun and informative messages. (Why?)\n"
" -no_rs_out      = Don't output processed time series-- just output\n"
"                   parameters (not recommended, since the point of\n"
"                   calculating RSFC params here is to have them be quite\n"
"                   related to the time series themselves which are used for\n"
"                   further analysis)."
" -un_bp_out      = Output the un-bandpassed series as well (default is not \n"
"                   to).  Name would be, e.g., ppp_unBP+orig.* .\n"
"                   with suffix `_unBP'.\n"
" -no_rsfa        = If you don't want RSFA output (default is to do so).\n"
" -bp_at_end      = A (probably unnecessary) switch to have bandpassing be \n"
"                   the very last processing step that is done in the\n"
"                   sequence of steps listed above; at Step 3 above, only \n"
"                   the time series mean and nyquist are BP'ed out, and then\n"
"                   the LFF series is created only after Step 9.  NB: this \n"
"                   probably makes only very small changes for most\n"
"                   processing sequences (but maybe not, depending usage).\n"
"\n"
" -notrans        = Don't check for initial positive transients in the data:\n"
"  *OR*             ++ The test is a little slow, so skipping it is OK,\n"
" -nosat               if you KNOW the data time series are transient-free.\n"
"                   ++ Or set AFNI_SKIP_SATCHECK to YES.\n"
"                   ++ Initial transients won't be handled well by the\n"
"                      bandpassing algorithm, and in addition may seriously\n"
"                      contaminate any further processing, such as inter-\n"
"                      voxel correlations via InstaCorr.\n"
"                   ++ No other tests are made [yet] for non-stationary \n"
"                      behavior in the time series data.\n"
"\n"
"* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n"
"\n"
"  If you use this program, please reference the introductory/description\n"
"  paper for the FATCAT toolbox:\n"
"        Taylor PA, Saad ZS (2013).  FATCAT: (An Efficient) Functional\n"
"        And Tractographic Connectivity Analysis Toolbox. Brain \n"
"        Connectivity 3(5):523-535.\n"
"____________________________________________________________________________\n"
);
		PRINT_AFNI_OMP_USAGE(
" 3dRSFC" ,
" * At present, the only part of 3dRSFC that is parallelized is the\n"
"   '-blur' option, which processes each sub-brick independently.\n"
									) ;
		PRINT_COMPILE_DATE ; exit(0) ;
   }
	
   /*-- startup --*/
	
   mainENTRY("3dRSFC"); machdep();
   AFNI_logger("3dRSFC",argc,argv);
   PRINT_VERSION("3dRSFC (from 3dBandpass by RW Cox): version THETA"); 
	AUTHOR("PA Taylor");
	
   nosat =  AFNI_yesenv("AFNI_SKIP_SATCHECK") ;
	
   nopt = 1 ;
   while( nopt < argc && argv[nopt][0] == '-' ){

		if( strcmp(argv[nopt],"-despike") == 0 ){  /* 08 Oct 2010 */
			do_despike++ ; nopt++ ; continue ;
		}

		if( strcmp(argv[nopt],"-nfft") == 0 ){
			int nnup ;
			if( ++nopt >= argc ) ERROR_exit("need an argument after -nfft!") ;
			nfft = (int)strtod(argv[nopt],NULL) ;
			nnup = csfft_nextup_even(nfft) ;
			if( nfft < 16 || nfft != nnup )
				ERROR_exit("value %d after -nfft is illegal! Next legal value = %d",nfft,nnup) ;
			nopt++ ; continue ;
		}

		if( strcmp(argv[nopt],"-blur") == 0 ){
			if( ++nopt >= argc ) ERROR_exit("need an argument after -blur!") ;
			blur = strtod(argv[nopt],NULL) ;
			if( blur <= 0.0f ) WARNING_message("non-positive blur?!") ;
			nopt++ ; continue ;
		}

		if( strcmp(argv[nopt],"-localPV") == 0 ){
			if( ++nopt >= argc ) ERROR_exit("need an argument after -localpv!") ;
			pvrad = strtod(argv[nopt],NULL) ;
			if( pvrad <= 0.0f ) WARNING_message("non-positive -localpv?!") ;
			nopt++ ; continue ;
		}

		if( strcmp(argv[nopt],"-prefix") == 0 ){
			if( ++nopt >= argc ) ERROR_exit("need an argument after -prefix!") ;
			prefix = strdup(argv[nopt]) ;
			if( !THD_filename_ok(prefix) ) ERROR_exit("bad -prefix option!") ;
			nopt++ ; continue ;
		}

		if( strcmp(argv[nopt],"-automask") == 0 ){
			if( mask != NULL ) ERROR_exit("Can't use -mask AND -automask!") ;
			do_automask = 1 ; nopt++ ; continue ;
		}

		if( strcmp(argv[nopt],"-mask") == 0 ){
			THD_3dim_dataset *mset ;
			if( ++nopt >= argc ) ERROR_exit("Need argument after '-mask'") ;
			if( mask != NULL || do_automask ) ERROR_exit("Can't have two mask inputs") ;
			mset = THD_open_dataset( argv[nopt] ) ;
			CHECK_OPEN_ERROR(mset,argv[nopt]) ;
			DSET_load(mset) ; CHECK_LOAD_ERROR(mset) ;
			mask_nx = DSET_NX(mset); mask_ny = DSET_NY(mset); mask_nz = DSET_NZ(mset);
			mask = THD_makemask( mset , 0 , 0.5f, 0.0f ) ; DSET_delete(mset) ;
			if( mask == NULL ) ERROR_exit("Can't make mask from dataset '%s'",argv[nopt]) ;
			nmask = THD_countmask( mask_nx*mask_ny*mask_nz , mask ) ;
			if( verb ) INFO_message("Number of voxels in mask = %d",nmask) ;
			if( nmask < 1 ) ERROR_exit("Mask is too small to process") ;
			nopt++ ; continue ;
		}

		if( strcmp(argv[nopt],"-norm") == 0 ){
			do_norm = 1 ; nopt++ ; continue ;
		}

		if( strcmp(argv[nopt],"-quiet") == 0 ){
			verb = 0 ; nopt++ ; continue ;
		}

		if( strcmp(argv[nopt],"-no_rs_out") == 0 ){ // @@
			SERIES_OUT = 0 ; nopt++ ; continue ;
		}

		if( strcmp(argv[nopt],"-un_bp_out") == 0 ){ // @@
			UNBP_OUT = 1 ; nopt++ ; continue ;
		}

		if( strcmp(argv[nopt],"-no_rsfa") == 0 ){ // @@
			DO_RSFA = 0 ; nopt++ ; continue ;
		}

		if( strcmp(argv[nopt],"-bp_at_end") == 0 ){ // @@
			BP_LAST = 1 ; nopt++ ; continue ;
		}




		if( strcmp(argv[nopt],"-notrans") == 0 || strcmp(argv[nopt],"-nosat") == 0 ){
			nosat = 1 ; nopt++ ; continue ;
		}

		if( strcmp(argv[nopt],"-ort") == 0 ){
			if( ++nopt >= argc ) ERROR_exit("need an argument after -ort!") ;
			if( ortar == NULL ) INIT_IMARR(ortar) ;
			ortim = mri_read_1D( argv[nopt] ) ;
			if( ortim == NULL ) ERROR_exit("can't read from -ort '%s'",argv[nopt]) ;
			mri_add_name(argv[nopt],ortim) ;
			ADDTO_IMARR(ortar,ortim) ;
			nopt++ ; continue ;
		}

		if( strcmp(argv[nopt],"-dsort") == 0 ){
			THD_3dim_dataset *qset ;
			if( ++nopt >= argc ) ERROR_exit("need an argument after -dsort!") ;
			if( nortset > 0 ) ERROR_exit("only 1 -dsort option is allowed!") ;
			qset = THD_open_dataset(argv[nopt]) ;
			CHECK_OPEN_ERROR(qset,argv[nopt]) ;
			ortset = (THD_3dim_dataset **)realloc(ortset,
															  sizeof(THD_3dim_dataset *)*(nortset+1)) ;
			ortset[nortset++] = qset ;
			nopt++ ; continue ;
		}

		if( strncmp(argv[nopt],"-nodetrend",6) == 0 ){
			qdet = 0 ; nopt++ ; continue ;
		}

		if( strcmp(argv[nopt],"-dt") == 0 ){
			if( ++nopt >= argc ) ERROR_exit("need an argument after -dt!") ;
			dt = (float)strtod(argv[nopt],NULL) ;
			if( dt <= 0.0f ) WARNING_message("value after -dt illegal!") ;
			nopt++ ; continue ;
		}

		if( strcmp(argv[nopt],"-input") == 0 ){
			if( inset != NULL ) ERROR_exit("Can't have 2 -input options!") ;
			if( ++nopt >= argc ) ERROR_exit("need an argument after -input!") ;
			inset = THD_open_dataset(argv[nopt]) ;
			CHECK_OPEN_ERROR(inset,argv[nopt]) ; 

			nopt++ ; continue ;
		}

		if( strncmp(argv[nopt],"-band",5) == 0 ){
			if( ++nopt >= argc-1 ) ERROR_exit("need 2 arguments after -band!") ;
			if( have_freq ) WARNING_message("second -band option replaces first one!") ;
			fbot = strtod(argv[nopt++],NULL) ;
			ftop = strtod(argv[nopt++],NULL) ;
			have_freq = 1 ; continue ;
		}

		ERROR_exit("Unknown option: '%s'",argv[nopt]) ;
   }

   /** check inputs for reasonablositiness **/

   if( !have_freq ){
		if( nopt+1 >= argc )
			ERROR_exit("Need frequencies on command line after options!") ;
		fbot = (float)strtod(argv[nopt++],NULL) ;
		ftop = (float)strtod(argv[nopt++],NULL) ;
   }

   if( inset == NULL ){
		if( nopt >= argc )
			ERROR_exit("Need input dataset name on command line after options!") ;
		inset = THD_open_dataset(argv[nopt]) ;
		CHECK_OPEN_ERROR(inset,argv[nopt]) ;	 

		nopt++ ;
   }
   DSET_UNMSEC(inset) ;

   if( fbot < 0.0f  ) ERROR_exit("fbot value can't be negative!") ;
   if( ftop <= fbot ) ERROR_exit("ftop value %g must be greater than fbot value %g!",ftop,fbot) ;

   ntime = DSET_NVALS(inset) ;
   if( ntime < 9 ) ERROR_exit("Input dataset is too short!") ;

   if( nfft <= 0 ){
		nfft = csfft_nextup_even(ntime) ;
		if( verb ) INFO_message("Data length = %d  FFT length = %d",ntime,nfft) ;
		(void)THD_bandpass_set_nfft(nfft) ;
   } else if( nfft < ntime ){
		ERROR_exit("-nfft %d is less than data length = %d",nfft,ntime) ;
   } else {
		kk = THD_bandpass_set_nfft(nfft) ;
		if( kk != nfft && verb )
			INFO_message("Data length = %d  FFT length = %d",ntime,kk) ;
   }

   if( dt <= 0.0f ){
		dt = DSET_TR(inset) ;
		if( dt <= 0.0f ){
			WARNING_message("Setting dt=1.0 since input dataset lacks a time axis!") ;
			dt = 1.0f ;
		}
   }
   ftopALL = 1./dt ;// Aug,2016: should solve problem of a too-large
                    // value for THD_bandpass_vectors(), while still
                    // being >f_{Nyquist}

   if( !THD_bandpass_OK(ntime,dt,fbot,ftop,1) ) ERROR_exit("Can't continue!") ;

   nx = DSET_NX(inset); ny = DSET_NY(inset); nz = DSET_NZ(inset); nvox = nx*ny*nz;

   /* check mask, or create it */

   if( verb ) INFO_message("Loading input dataset time series" ) ;
   DSET_load(inset) ;

   if( mask != NULL ){
		if( mask_nx != nx || mask_ny != ny || mask_nz != nz )
			ERROR_exit("-mask dataset grid doesn't match input dataset") ;

   } else if( do_automask ){
		mask = THD_automask( inset ) ;
		if( mask == NULL )
			ERROR_message("Can't create -automask from input dataset?") ;
		nmask = THD_countmask( DSET_NVOX(inset) , mask ) ;
		if( verb ) INFO_message("Number of voxels in automask = %d",nmask);
		if( nmask < 1 ) ERROR_exit("Automask is too small to process") ;

   } else {
		mask = (byte *)malloc(sizeof(byte)*nvox) ; nmask = nvox ;
		memset(mask,1,sizeof(byte)*nvox) ;
		// if( verb ) // @@ alert if aaaalllllll vox are going to be analyzed!
		INFO_message("No mask ==> processing all %d voxels",nvox);
   }

   /* A simple check of dataset quality [08 Feb 2010] */

   if( !nosat ){
		float val ;
		INFO_message(
						 "Checking dataset for initial transients [use '-notrans' to skip this test]") ;
		val = THD_saturation_check(inset,mask,0,0) ; kk = (int)(val+0.54321f) ;
		if( kk > 0 )
			ININFO_message(
								"Looks like there %s %d non-steady-state initial time point%s :-(" ,
								((kk==1) ? "is" : "are") , kk , ((kk==1) ? " " : "s") ) ;
		else if( val > 0.3210f )  /* don't ask where this threshold comes from! */
			ININFO_message(
								"MAYBE there's an initial positive transient of 1 point, but it's hard to tell\n") ;
		else
			ININFO_message("No widespread initial positive transient detected :-)") ;
   }

   /* check -dsort inputs for match to inset */

   for( kk=0 ; kk < nortset ; kk++ ){
		if( DSET_NX(ortset[kk])    != nx ||
			 DSET_NY(ortset[kk])    != ny ||
			 DSET_NZ(ortset[kk])    != nz ||
			 DSET_NVALS(ortset[kk]) != ntime )
			ERROR_exit("-dsort %s doesn't match input dataset grid" ,
						  DSET_BRIKNAME(ortset[kk]) ) ;
   }

   /* convert input dataset to a vectim, which is more fun */

	// @@ convert BP'ing ftop/bot into indices for the DFT (below)
	delf = 1.0/(ntime*dt); 
	ind_low = (int) rint(fbot/delf);
	ind_high = (int) rint(ftop/delf);
	if( ntime % 2 ) // nyquist number
		N_ny = (ntime-1)/2;
	else
		N_ny = ntime/2;
	sqnt = sqrt(ntime);
	nt_fac = sqrt(ntime*(ntime-1));

	// @@ if BP_LAST==0:
	// now we go through twice, doing LFF bandpass for NumDen==0 and
	// `full spectrum' processing for NumDen==1.
	// if BP_LAST==1:
	// now we go through once, doing only `full spectrum' processing
	for( NumDen=0 ; NumDen<2 ; NumDen++) {
		//if( NumDen==1 ){ // full spectrum
		//	fbot = fbotALL;
		//	ftop = ftopALL;
		//}
		
		// essentially, just doesn't BP here, and the perfect filtering at end
		// is used for both still; this makes the final output spectrum
		// contain only frequencies in range of 0.01-0.08
		if( BP_LAST==1 )
			INFO_message("Only doing filtering to LFFs at end!");
		
		
		mrv = THD_dset_to_vectim( inset , mask , 0 ) ;
		if( mrv == NULL ) ERROR_exit("Can't load time series data!?") ;
		if( NumDen==1 )
			DSET_unload(inset) ; // @@ only unload on 2nd pass

		/* similarly for the ort vectors */

		if( ortar != NULL ){
			for( kk=0 ; kk < IMARR_COUNT(ortar) ; kk++ ){
				ortim = IMARR_SUBIM(ortar,kk) ;
				if( ortim->nx < ntime )
					ERROR_exit("-ort file %s is shorter than input dataset time series",
								  ortim->name ) ;
				ort  = (float **)realloc( ort , sizeof(float *)*(nort+ortim->ny) ) ;
				for( vv=0 ; vv < ortim->ny ; vv++ )
					ort[nort++] = MRI_FLOAT_PTR(ortim) + ortim->nx * vv ;
			}
		}

		/* all the real work now */

		if( do_despike ){
			int_pair nsp ;
			if( verb ) INFO_message("Testing data time series for spikes") ;
			nsp = THD_vectim_despike9( mrv ) ;
			if( verb ) ININFO_message(" -- Squashed %d spikes from %d voxels",nsp.j,nsp.i) ;
		}

		if( verb ) INFO_message("Bandpassing data time series") ;

		if( (BP_LAST==0) && (NumDen==0) )
			(void)THD_bandpass_vectim( mrv , dt,fbot,ftop , qdet , nort,ort ) ;
		else
			(void)THD_bandpass_vectim( mrv , dt,fbotALL,ftopALL, qdet,nort,ort ) ;

		/* OK, maybe a little more work */

		if( nortset == 1 ){
			MRI_vectim *orv ;
			orv = THD_dset_to_vectim( ortset[0] , mask , 0 ) ;
			if( orv == NULL ){
				ERROR_message("Can't load -dsort %s",DSET_BRIKNAME(ortset[0])) ;
			} else {
				float *dp , *mvv , *ovv , ff ;
				if( verb ) INFO_message("Orthogonalizing to bandpassed -dsort") ;
				//(void)THD_bandpass_vectim( orv , dt,fbot,ftop , qdet , nort,ort ) ; //@@
				if( (BP_LAST==0) && (NumDen==0) )
					(void)THD_bandpass_vectim(orv,dt,fbot,ftop,qdet,nort,ort);
				else
					(void)THD_bandpass_vectim(orv,dt,fbotALL,ftopALL,qdet,nort,ort);

				THD_vectim_normalize( orv ) ;
				dp = malloc(sizeof(float)*mrv->nvec) ;
				THD_vectim_vectim_dot( mrv , orv , dp ) ;
				for( vv=0 ; vv < mrv->nvec ; vv++ ){
					ff = dp[vv] ;
					if( ff != 0.0f ){
						mvv = VECTIM_PTR(mrv,vv) ; ovv = VECTIM_PTR(orv,vv) ;
						for( kk=0 ; kk < ntime ; kk++ ) mvv[kk] -= ff*ovv[kk] ;
					}
				}
				VECTIM_destroy(orv) ; free(dp) ;
			}
		}

		if( blur > 0.0f ){
			if( verb )
				INFO_message("Blurring time series data spatially; FWHM=%.2f",blur) ;
			mri_blur3D_vectim( mrv , blur ) ;
		}
		if( pvrad > 0.0f ){
			if( verb )
				INFO_message("Local PV-ing time series data spatially; radius=%.2f",pvrad) ;
			THD_vectim_normalize( mrv ) ;
			THD_vectim_localpv( mrv , pvrad ) ;
		}
		if( do_norm && pvrad <= 0.0f ){
			if( verb ) INFO_message("L2 normalizing time series data") ;
			THD_vectim_normalize( mrv ) ;
		}

		/* create output dataset, populate it, write it, then quit */
		if( (NumDen==0) ) { // @@ BP'ed version;  will do filt if BP_LAST

			if(BP_LAST) // do bandpass here for BP_LAST
				(void)THD_bandpass_vectim(mrv,dt,fbot,ftop,qdet,0,NULL);

			if( verb ) INFO_message("Creating output dataset in memory, then writing it") ;
			outset = EDIT_empty_copy(inset) ;
			if(SERIES_OUT){
				sprintf(out_lff,"%s_LFF",prefix); 
				EDIT_dset_items( outset , ADN_prefix,out_lff , ADN_none ) ;
				tross_Copy_History( inset , outset ) ;
				tross_Make_History( "3dBandpass" , argc,argv , outset ) ;
			}
			for( vv=0 ; vv < ntime ; vv++ )
				EDIT_substitute_brick( outset , vv , MRI_float , NULL ) ;
		
#if 1
			THD_vectim_to_dset( mrv , outset ) ;
#else
			AFNI_OMP_START ;
#pragma omp parallel
			{ float *far , *var ; int *ivec=mrv->ivec ; int vv,kk ;
#pragma omp for
				for( vv=0 ; vv < ntime ; vv++ ){
					far = DSET_BRICK_ARRAY(outset,vv) ; var = mrv->fvec + vv ;
					for( kk=0 ; kk < nmask ; kk++ ) far[ivec[kk]] = var[kk*ntime] ;
				}
			}
			AFNI_OMP_END ;
#endif
			VECTIM_destroy(mrv) ;
			if(SERIES_OUT){ // @@
				DSET_write(outset) ; if( verb ) WROTE_DSET(outset) ;
			}
		}
		else{ // @@ non-BP'ed version
			if( verb ) INFO_message("Creating output dataset 2 in memory") ;

			// do this here because LFF version was also BP'ed at end.
			if(BP_LAST) // do bandpass here for BP_LAST
				(void)THD_bandpass_vectim(mrv,dt,fbotALL,ftopALL,qdet,0,NULL);

			outsetALL = EDIT_empty_copy(inset) ;
			if(UNBP_OUT){ 
				sprintf(out_unBP,"%s_unBP",prefix); 
				EDIT_dset_items( outsetALL, ADN_prefix, out_unBP, ADN_none );
				tross_Copy_History( inset , outsetALL ) ;
				tross_Make_History( "3dRSFC" , argc,argv , outsetALL ) ;
			}
			for( vv=0 ; vv < ntime ; vv++ )
				EDIT_substitute_brick( outsetALL , vv , MRI_float , NULL ) ;
		
#if 1
			THD_vectim_to_dset( mrv , outsetALL ) ;
#else
			AFNI_OMP_START ;
#pragma omp parallel
			{ float *far , *var ; int *ivec=mrv->ivec ; int vv,kk ;
#pragma omp for
				for( vv=0 ; vv < ntime ; vv++ ){
					far = DSET_BRICK_ARRAY(outsetALL,vv) ; var = mrv->fvec + vv ;
					for( kk=0 ; kk < nmask ; kk++ ) far[ivec[kk]] = var[kk*ntime] ;
				}
			}
			AFNI_OMP_END ;
#endif
			VECTIM_destroy(mrv) ;
			if(UNBP_OUT){ 
				DSET_write(outsetALL) ; if( verb ) WROTE_DSET(outsetALL) ;
			}
		}
	}// end of NumDen loop


	// @@
	INFO_message("Starting the (f)ALaFFel calcs") ;

	// allocations
	series1 = (double *)calloc(ntime,sizeof(double)); 
	series2 = (double *)calloc(ntime,sizeof(double)); 
	xx1 = (double *)calloc(2*ntime,sizeof(double)); 
	xx2 = (double *)calloc(2*ntime,sizeof(double)); 
	alff = (float *)calloc(nvox,sizeof(float)); 
	malff = (float *)calloc(nvox,sizeof(float)); 
	falff = (float *)calloc(nvox,sizeof(float)); 

	if( (series1 == NULL) || (series2 == NULL) 
		 || (xx1 == NULL) || (xx2 == NULL) 
		 || (alff == NULL) || (malff == NULL) || (falff == NULL)) { 
		fprintf(stderr, "\n\n MemAlloc failure.\n\n");
		exit(122);
	}
	if(DO_RSFA) {
		rsfa = (float *)calloc(nvox,sizeof(float)); 
		mrsfa = (float *)calloc(nvox,sizeof(float)); 
		frsfa = (float *)calloc(nvox,sizeof(float)); 
		if( (rsfa == NULL) || (mrsfa == NULL) || (frsfa == NULL)) { 
			fprintf(stderr, "\n\n MemAlloc failure.\n\n");
			exit(123);
		}	
	}
	
	
	work = gsl_fft_real_workspace_alloc (ntime);
	real1 = gsl_fft_real_wavetable_alloc (ntime);
	real2 = gsl_fft_real_wavetable_alloc (ntime);
	gsl_complex_packed_array compl_freqs1 = xx1;
	gsl_complex_packed_array compl_freqs2 = xx2;




	// *********************************************************************
	// *********************************************************************
	// **************    Falafelling = ALFF/fALFF calcs    *****************
	// *********************************************************************
	// *********************************************************************

	// Be now have the BP'ed data set (outset) and the non-BP'ed one
	// (outsetALL).  now we'll FFT both, get amplitudes in appropriate
	// ranges, and calculate:  ALFF, mALFF, fALFF,

	ctr = 0;
	for( kk=0; kk<nvox ; kk++) {
		if(mask[kk]) {
			
			// BP one, and unBP one, either for BP_LAST or !BP_LAST
			for( m=0 ; m<ntime ; m++ ) {
				series1[m] = THD_get_voxel(outset,kk,m);
				series2[m] = THD_get_voxel(outsetALL,kk,m);
			}
			
			
			mm = gsl_fft_real_transform(series1, 1, ntime, real1, work);
			mm = gsl_fft_halfcomplex_unpack(series1, compl_freqs1, 1, ntime);
			mm = gsl_fft_real_transform(series2, 1, ntime, real2, work);
			mm = gsl_fft_halfcomplex_unpack(series2, compl_freqs2, 1, ntime);

			numer = 0.0f; 
			denom = 0.0f;
			de_rsfa = 0.0f;
			nu_rsfa = 0.0f;
			for( m=1 ; m<N_ny ; m++ ) {
				mm = 2*m;
				pow2 = compl_freqs2[mm]*compl_freqs2[mm] +
					compl_freqs2[mm+1]*compl_freqs2[mm+1]; // power
				//pow2*=2;// factor of 2 since ampls are even funcs
				denom+= (float) sqrt(pow2); // amplitude 
				de_rsfa+= (float) pow2;
				
				if( ( m>=ind_low ) && ( m<=ind_high ) ){
					pow1 = compl_freqs1[mm]*compl_freqs1[mm]+
						compl_freqs1[mm+1]*compl_freqs1[mm+1];
					//pow1*=2;
					numer+= (float) sqrt(pow1);
					nu_rsfa+= (float) pow1;
				}
			}

			if( denom>0.000001 )
			  falff[kk] = numer/denom;
			else
			  falff[kk] = 0.;
			alff[kk] = 2*numer/sqnt;// factor of 2 since ampl is even funct
			meanALFF+= alff[kk];

			if(DO_RSFA){
			  nu_rsfa = sqrt(2*nu_rsfa); // factor of 2 since ampls 
			  de_rsfa = sqrt(2*de_rsfa); // are even funcs
			  if( de_rsfa>0.000001 )
			    frsfa[kk] = nu_rsfa/de_rsfa;
			  else
			    frsfa[kk]=0.;
			  rsfa[kk] = nu_rsfa/nt_fac;
			  meanRSFA+= rsfa[kk];
			}
			
			ctr+=1;
		}
	}
	meanALFF/= ctr;
	meanRSFA/= ctr;

	gsl_fft_real_wavetable_free(real1);
	gsl_fft_real_wavetable_free(real2);
	gsl_fft_real_workspace_free(work);

	// ALFFs divided by mean of brain value
	for( kk=0 ; kk<nvox ; kk++ ) 
		if(mask[kk]){
			malff[kk] = alff[kk]/meanALFF;
			if(DO_RSFA)
				mrsfa[kk] = rsfa[kk]/meanRSFA;
		}
	// **************************************************************
	// **************************************************************
	//                 Store and output
	// **************************************************************
	// **************************************************************
	
	outsetALFF = EDIT_empty_copy( inset ) ; 
	sprintf(out_alff,"%s_ALFF",prefix); 
	EDIT_dset_items( outsetALFF,
                    ADN_nvals, 1,
						  ADN_datum_all , MRI_float , 
						  ADN_prefix    , out_alff,
						  ADN_none ) ;
	if( !THD_ok_overwrite() && THD_is_ondisk(DSET_HEADNAME(outsetALFF)) )
		ERROR_exit("Can't overwrite existing dataset '%s'",
					  DSET_HEADNAME(outsetALFF));
	EDIT_substitute_brick(outsetALFF, 0, MRI_float, alff); 
	alff=NULL;
	THD_load_statistics(outsetALFF);
	tross_Make_History("3dRSFC", argc, argv, outsetALFF);
	THD_write_3dim_dataset(NULL, NULL, outsetALFF, True);

	outsetfALFF = EDIT_empty_copy( inset ) ;
	sprintf(out_falff,"%s_fALFF",prefix); 
	EDIT_dset_items( outsetfALFF,
                    ADN_nvals, 1,
						  ADN_datum_all , MRI_float , 
						  ADN_prefix    , out_falff,
						  ADN_none ) ;
	if( !THD_ok_overwrite() && THD_is_ondisk(DSET_HEADNAME(outsetfALFF)) )
		ERROR_exit("Can't overwrite existing dataset '%s'",
					  DSET_HEADNAME(outsetfALFF));
	EDIT_substitute_brick(outsetfALFF, 0, MRI_float, falff); 
	falff=NULL;
	THD_load_statistics(outsetfALFF);
	tross_Make_History("3dRSFC", argc, argv, outsetfALFF);
	THD_write_3dim_dataset(NULL, NULL, outsetfALFF, True);



	outsetmALFF = EDIT_empty_copy( inset ) ;
	sprintf(out_malff,"%s_mALFF",prefix); 
	EDIT_dset_items( outsetmALFF,
                    ADN_nvals, 1,
                    ADN_datum_all , MRI_float , 
						  ADN_prefix    , out_malff,
						  ADN_none ) ;
	if( !THD_ok_overwrite() && THD_is_ondisk(DSET_HEADNAME(outsetmALFF)) )
		ERROR_exit("Can't overwrite existing dataset '%s'",
					  DSET_HEADNAME(outsetmALFF));
	EDIT_substitute_brick(outsetmALFF, 0, MRI_float, malff); 
	malff=NULL;
	THD_load_statistics(outsetmALFF);
	tross_Make_History("3dRSFC", argc, argv, outsetmALFF);
	THD_write_3dim_dataset(NULL, NULL, outsetmALFF, True);

	if(DO_RSFA){
     outsetRSFA = EDIT_empty_copy( inset ) ;
		sprintf(out_rsfa,"%s_RSFA",prefix); 
		EDIT_dset_items( outsetRSFA,
                       ADN_nvals, 1,
                       ADN_datum_all , MRI_float , 
							  ADN_prefix    , out_rsfa,
							  ADN_none ) ;
		if( !THD_ok_overwrite() && THD_is_ondisk(DSET_HEADNAME(outsetRSFA)) )
			ERROR_exit("Can't overwrite existing dataset '%s'",
						  DSET_HEADNAME(outsetRSFA));
		EDIT_substitute_brick(outsetRSFA, 0, MRI_float, rsfa); 
		rsfa=NULL;
		THD_load_statistics(outsetRSFA);
		tross_Make_History("3dRSFC", argc, argv, outsetRSFA);
		THD_write_3dim_dataset(NULL, NULL, outsetRSFA, True);
		
      outsetfRSFA = EDIT_empty_copy( inset ) ;
		sprintf(out_frsfa,"%s_fRSFA",prefix); 
		EDIT_dset_items( outsetfRSFA,
                       ADN_nvals, 1,
                       ADN_datum_all , MRI_float , 
							  ADN_prefix    , out_frsfa,
							  ADN_none ) ;
		if( !THD_ok_overwrite() && THD_is_ondisk(DSET_HEADNAME(outsetfRSFA)) )
			ERROR_exit("Can't overwrite existing dataset '%s'",
						  DSET_HEADNAME(outsetfRSFA));
		EDIT_substitute_brick(outsetfRSFA, 0, MRI_float, frsfa); 
		frsfa=NULL;
		THD_load_statistics(outsetfRSFA);
		tross_Make_History("3dRSFC", argc, argv, outsetfRSFA);
		THD_write_3dim_dataset(NULL, NULL, outsetfRSFA, True);
		
		outsetmRSFA = EDIT_empty_copy( inset ) ; 
		sprintf(out_mrsfa,"%s_mRSFA",prefix); 
		EDIT_dset_items( outsetmRSFA,
                       ADN_nvals, 1,
                       ADN_datum_all , MRI_float , 
							  ADN_prefix    , out_mrsfa,
							  ADN_none ) ;
		if( !THD_ok_overwrite() && THD_is_ondisk(DSET_HEADNAME(outsetmRSFA)) )
			ERROR_exit("Can't overwrite existing dataset '%s'",
						  DSET_HEADNAME(outsetmRSFA));
		EDIT_substitute_brick(outsetmRSFA, 0, MRI_float, mrsfa); 
		mrsfa=NULL;
		THD_load_statistics(outsetmRSFA);
		tross_Make_History("3dRSFC", argc, argv, outsetmRSFA);
		THD_write_3dim_dataset(NULL, NULL, outsetmRSFA, True);
	}



	// ************************************************************
	// ************************************************************
	//                    Freeing
	// ************************************************************
	// ************************************************************

	DSET_delete(inset);
	DSET_delete(outsetALL);
	DSET_delete(outset);
	DSET_delete(outsetALFF);
	DSET_delete(outsetmALFF);
	DSET_delete(outsetfALFF);
	DSET_delete(outsetRSFA);
	DSET_delete(outsetmRSFA);
	DSET_delete(outsetfRSFA);

	free(inset);
	free(outsetALL);
	free(outset);
	free(outsetALFF);
	free(outsetmALFF);
	free(outsetfALFF);
	free(outsetRSFA);
	free(outsetmRSFA);
	free(outsetfRSFA);

	free(rsfa);
	free(mrsfa);
	free(frsfa);
	free(alff);
	free(malff);
	free(falff);
	free(mask);
	free(series1);
	free(series2);
	free(xx1);
	free(xx2);

	exit(0) ;
}
Exemplo n.º 2
0
void NWC_help(void)
{
   printf(
    "Usage: 3dNwarpCat [options] warp1 warp2 ...\n"
    "------\n"
    " * This program catenates (composes) 3D warps defined on a grid,\n"
    "   OR via a matrix.\n"
    "  ++ All transformations are from DICOM xyz (in mm) to DICOM xyz.\n"
    "\n"
    " * Matrix warps are in files that end in '.1D' or in '.txt'.  A matrix\n"
    "   warp file should have 12 numbers in it, as output (for example), by\n"
    "   '3dAllineate -1Dmatrix_save'.\n"
    "  ++ The matrix (affine) warp can have either 12 numbers on one row,\n"
    "     or be in the 3x4 format.\n"
    "  ++ The 12-numbers-on-one-row format is preferred, and is the format\n"
    "     output by the '-1Dmatrix_save' option in 3dvolreg and 3dAllineate.\n"
    "  ++ The matrix warp is a transformation of coordinates, not voxels,\n"
    "     and its use presumes the correctness of the voxel-to-coordinate\n"
    "     transformation stored in the header of the datasets involved.\n"
    "\n"
    " * Nonlinear warps are in dataset files (AFNI .HEAD/.BRIK or NIfTI .nii)\n"
    "   with 3 sub-bricks giving the DICOM order xyz grid displacements in mm.\n"
    "  ++ Note that it is not required that the xyz order of voxel storage be in\n"
    "     DICOM order, just that the displacements be in DICOM order (and sign).\n"
    "  ++ However, it is important that the warp dataset coordinate order be\n"
    "     properly specified in the dataset header, since warps are applied\n"
    "     based on coordinates, not on voxels.\n"
    "  ++ Also note again that displacements are in mm, NOT in voxel.\n"
    "  ++ You can 'edit' the warp on the command line by using the 'FAC:'\n"
    "     scaling prefix, described later. This input editing could be used\n"
    "     to change the sign of the xyz displacments, if needed.\n"
    "\n"
    " * If all the input warps are matrices, then the output is a matrix\n"
    "   and will be written to the file 'prefix.aff12.1D'.\n"
    "  ++ Unless the prefix already contains the string '.1D', in which case\n"
    "     the filename is just the prefix.\n"
    "  ++ If 'prefix' is just 'stdout', then the output matrix is written\n"
    "     to standard output.\n"
    "  ++ In any of these cases, the output format is 12 numbers in one row.\n"
    "\n"
    " * If any of the input warps are datasets, they must all be defined on\n"
    "   the same 3D grid!\n"
    "  ++ And of course, then the output will be a dataset on the same grid.\n"
    "  ++ However, you can expand the grid using the '-expad' option.\n"
    "\n"
    " * The order of operations in the final (output) warp is, for the\n"
    "   case of 3 input warps:\n"
    "\n"
    "     OUTPUT(x) = warp3( warp2( warp1(x) ) )\n"
    "\n"
    "   That is, warp1 is applied first, then warp2, et cetera.\n"
    "   The 3D x coordinates are taken from each grid location in the\n"
    "   first dataset defined on a grid.\n"
    "\n"
    " * For example, if you aligned a dataset to a template with @auto_tlrc,\n"
    "   then further refined the alignment with 3dQwarp, you would do something\n"
    "   like this:\n"
    "       warp1 is the output of 3dQwarp\n"
    "       warp2 is the matrix from @auto_tlrc\n"
    "       This is the proper order, since the desired warp takes template xyz\n"
    "       to original dataset xyz, and we have\n"
    "         3dQwarp warp:      takes template xyz to affinely aligned xyz, and\n"
    "         @auto_tlrc matrix: takes affinely aligned xyz to original xyz\n"
    "\n"
    "   3dNwarpCat -prefix Fred_total_WARP -warp1 Fred_WARP+tlrc.HEAD -warp2 Fred.Xat.1D \n"
    "\n"
    "   The dataset Fred_total_WARP+tlrc.HEAD could then be used to transform original\n"
    "   datasets directly to the final template space, as in\n"
    "\n"
    "   3dNwarpApply -prefix Wilma_warped        \\\n"
    "                -nwarp Fred_total_WARP+tlrc \\\n"
    "                -source Wilma+orig          \\\n"
    "                -master Fred_total_WARP+tlrc\n"
    "\n"
    " * If you wish to invert a warp before it is used here, supply its\n"
    "   input name in the form of\n"
    "     INV(warpfilename)\n"
    "   To produce the inverse of the warp in the example above:\n"
    "\n"
    "   3dNwarpCat -prefix Fred_total_WARPINV        \\\n"
    "              -warp2 'INV(Fred_WARP+tlrc.HEAD)' \\\n"
    "              -warp1 'INV(Fred.Xat.1D)' \n"
    "\n"
    "   Note the order of the warps is reversed, in addition to the use of 'INV()'.\n"
    "\n"
    " * The final warp may also be inverted simply by adding the '-iwarp' option, as in\n"
    "\n"
    "   3dNwarpCat -prefix Fred_total_WARPINV -iwarp -warp1 Fred_WARP+tlrc.HEAD -warp2 Fred.Xat.1D \n"
    "\n"
    " * Other functions you can apply to modify a 3D dataset warp are:\n"
    "    SQRT(datasetname) to get the square root of a warp\n"
    "    SQRTINV(datasetname) to get the inverse square root of a warp\n"
    "   However, you can't do more complex expressions, such as 'SQRT(SQRT(warp))'.\n"
    "   If you think you need something so rococo, use 3dNwarpCalc.  Or think again.\n"
    "\n"
    " * You can also manufacture a 3D warp from a 1-brick dataset with displacments\n"
    "   in a single direction.  For example:\n"
    "      AP:0.44:disp+tlrc.HEAD  (note there are no blanks here!)\n"
    "   means to take the 1-brick dataset disp+tlrc.HEAD, scale the values inside\n"
    "   by 0.44, then load them into the y-direction displacements of a 3-brick 3D\n"
    "   warp, and fill the other 2 directions with zeros.  The prefixes you can use\n"
    "   here for the 1-brick to 3-brick displacment trick are\n"
    "     RL: for x-displacements (Right-to-Left)\n"
    "     AP: for y-displacements (Anterior-to-Posterior)\n"
    "     IS: for z-displacements (Inferior-to-Superior)\n"
    "     VEC:a,b,c: for displacements in the vector direction (a,b,c),\n"
    "                which vector will be scaled to be unit length.\n"
    "     Following the prefix's colon, you can put in a scale factor followed\n"
    "     by another colon (as in '0.44:' in the example above).  Then the name\n"
    "     of the dataset with the 1D displacments follows.\n"
    " * You might reasonably ask of what possible value is this peculiar format?\n"
    "   This was implemented to use Bz fieldmaps for correction of EPI datasets,\n"
    "   which are distorted only along the phase-encoding direction.  This format\n"
    "   for specifying the input dataset (the fieldmap) is built to make the\n"
    "   scripting a little easier.  Its principal use is in the program 3dNwarpApply.\n"
    "\n"
    " * You can scale the displacements in a 3D warp file via the 'FAC:' prefix, as in\n"
    "     FAC:0.6,0.4,-0.2:fred_WARP.nii\n"
    "   which will scale the x-displacements by 0.6, the y-displacements by 0.4, and\n"
    "   the z-displacments by -0.2.\n"
    "\n"
    " * Finally, you can input a warp catenation string directly as in the '-nwarp'\n"
    "   option of 3dNwarpApply, as in\n"
    "\n"
    "   3dNwarpCat -prefix Fred_total_WARP 'Fred_WARP+tlrc.HEAD Fred.Xat.1D' \n"
    "\n"
    "\n"
    "OPTIONS\n"
    "-------\n"
    " -interp iii == 'iii' is the interpolation mode:\n"
    "                ++ Modes allowed are a subset of those in 3dAllineate:\n"
    "                     linear  quintic  wsinc5\n"
    "                ++ The default interpolation mode is 'wsinc5'.\n"
    "                ++ 'linear' is much faster but less accurate.\n"
    "                ++ 'quintic' is between 'linear' and 'wsinc5',\n"
    "                   in both accuracy and speed.\n"
    "\n"
    " -verb       == print (to stderr) various fun messages along the road.\n"
    "\n"
    " -prefix ppp == prefix name for the output dataset that holds the warp.\n"
    " -space sss  == attach string 'sss' to the output dataset as its atlas\n"
    "                space marker.\n"
    "\n"
    " -warp1 ww1  == alternative way to specify warp#1\n"
    " -warp2 ww2  == alternative way to specify warp#2 (etc.)\n"
    "                ++ If you use any '-warpX' option for X=1..99, then\n"
    "                   any addition warps specified after all command\n"
    "                   line options appear AFTER these enumerated warps.\n"
    "                   That is, '-warp1 A+tlrc -warp2 B+tlrc C+tlrc'\n"
    "                   is like using '-warp3 C+tlrc'.\n"
    "                ++ At most 99 warps can be used.  If you need more,\n"
    "                   PLEASE back away from the computer slowly, and\n"
    "                   get professional counseling.\n"
    "\n"
    " -iwarp      == Invert the final warp before output.\n"
    "\n"
    " -expad PP   == Pad the nonlinear warps by 'PP' voxels in all directions.\n"
    "                The warp displacements are extended by linear extrapolation\n"
    "                from the faces of the input grid.\n"
   ) ;

   printf(
    "\n"
    "AUTHOR -- RWCox -- March 2013\n"
   ) ;

   PRINT_AFNI_OMP_USAGE("3dNwarpCat",NULL) ; PRINT_COMPILE_DATE ;
   exit(0) ;
}
Exemplo n.º 3
0
int main( int argc , char *argv[] )
{
   THD_3dim_dataset *xset , *cset, *mset=NULL ;
   int nopt=1 , method=PEARSON , do_autoclip=0 ;
   int nvox , nvals , ii, jj, kout, kin, polort=1 ;
   int ix1,jy1,kz1, ix2, jy2, kz2 ;
   char *prefix = "degree_centrality" ;
   byte *mask=NULL;
   int   nmask , abuc=1 ;
   int   all_source=0;        /* output all source voxels  25 Jun 2010 [rickr] */
   char str[32] , *cpt ;
   int *imap = NULL ; MRI_vectim *xvectim ;
   float (*corfun)(int,float *,float*) = NULL ;
   /* djc - add 1d file output for similarity matrix */
   FILE *fout1D=NULL;

   /* CC - we will have two subbricks: binary and weighted centrality */
   int nsubbriks = 2;
   int subbrik = 0;
   float * bodset;
   float * wodset;

   int nb_ctr = 0;

   /* CC - added flags for thresholding correlations */
   double thresh = 0.0;
   double othresh = 0.0;
   int dothresh = 0;
   double sparsity = 0.0;
   int dosparsity = 0;
  
   /* variables for calculating degree centrality */
   long * binaryDC = NULL;
   double * weightedDC = NULL;

   /* variables for histogram */
   hist_node_head* histogram=NULL;
   hist_node* hptr=NULL;
   hist_node* pptr=NULL;
   int bottom_node_idx = 0;
   int totNumCor = 0;
   long totPosCor = 0;
   int ngoal = 0;
   int nretain = 0;
   float binwidth = 0.0;
   int nhistnodes = 50;

   /*----*/

   AFNI_SETUP_OMP(0) ;  /* 24 Jun 2013 */

   if( argc < 2 || strcmp(argv[1],"-help") == 0 ){
      printf(
"Usage: 3dDegreeCentrality [options] dset\n"
"  Computes voxelwise weighted and binary degree centrality and\n"
"  stores the result in a new 3D bucket dataset as floats to\n"
"  preserve their values. Degree centrality reflects the strength and\n"
"  extent of the correlation of a voxel with every other voxel in\n"
"  the brain.\n\n"
"  Conceptually the process involves: \n"
"      1. Calculating the correlation between voxel time series for\n"
"         every pair of voxels in the brain (as determined by masking)\n"
"      2. Applying a threshold to the resulting correlations to exclude\n"
"         those that might have arisen by chance, or to sparsify the\n"
"         connectivity graph.\n"
"      3. At each voxel, summarizing its correlation with other voxels\n"
"         in the brain, by either counting the number of voxels correlated\n"
"         with the seed voxel (binary) or by summing the correlation \n"
"         coefficients (weighted).\n"
"   Practically the algorithm is ordered differently to optimize for\n"
"   computational time and memory usage.\n\n"
"   The threshold can be supplied as a correlation coefficient, \n"
"   or a sparsity threshold. The sparsity threshold reflects the fraction\n"
"   of connections that should be retained after the threshold has been\n"
"   applied. To minimize resource consumption, using a sparsity threshold\n"
"   involves a two-step procedure. In the first step, a correlation\n"
"   coefficient threshold is applied to substantially reduce the number\n"
"   of correlations. Next, the remaining correlations are sorted and a\n"
"   threshold is calculated so that only the specified fraction of \n"
"   possible correlations are above threshold. Due to ties between\n"
"   correlations, the fraction of correlations that pass the sparsity\n"
"   threshold might be slightly more than the number specified.\n\n"
"   Regardless of the thresholding procedure employed, negative \n"
"   correlations are excluded from the calculations.\n" 
"\n"
"Options:\n"
"  -pearson  = Correlation is the normal Pearson (product moment)\n"
"               correlation coefficient [default].\n"
   #if 0
"  -spearman = Correlation is the Spearman (rank) correlation\n"
"               coefficient.\n"
"  -quadrant = Correlation is the quadrant correlation coefficient.\n"
   #else
"  -spearman AND -quadrant are disabled at this time :-(\n"
   #endif
"\n"
"  -thresh r = exclude correlations <= r from calculations\n"
"  -sparsity s = only use top s percent of correlations in calculations\n"
"                s should be an integer between 0 and 100. Uses an\n"
"                an adaptive thresholding procedure to reduce memory.\n"
"                The speed of determining the adaptive threshold can\n"
"                be improved by specifying an initial threshold with\n"
"                the -thresh flag.\n"
"\n"
"  -polort m = Remove polynomical trend of order 'm', for m=-1..3.\n"
"               [default is m=1; removal is by least squares].\n"
"               Using m=-1 means no detrending; this is only useful\n"
"               for data/information that has been pre-processed.\n"
"\n"
"  -autoclip = Clip off low-intensity regions in the dataset,\n"
"  -automask =  so that the correlation is only computed between\n"
"               high-intensity (presumably brain) voxels.  The\n"
"               mask is determined the same way that 3dAutomask works.\n"
"\n"
"  -mask mmm = Mask to define 'in-brain' voxels. Reducing the number\n"
"               the number of voxels included in the calculation will\n"
"               significantly speedup the calculation. Consider using\n"
"               a mask to constrain the calculations to the grey matter\n"
"               rather than the whole brain. This is also preferrable\n"
"               to using -autoclip or -automask.\n"
"\n"
"  -prefix p = Save output into dataset with prefix 'p', this file will\n"
"               contain bricks for both 'weighted' or 'degree' centrality\n"
"               [default prefix is 'deg_centrality'].\n"
"\n"
"  -out1D f = Save information about the above threshold correlations to\n"
"              1D file 'f'. Each row of this file will contain:\n"
"               Voxel1 Voxel2 i1 j1 k1 i2 j2 k2 Corr\n"
"              Where voxel1 and voxel2 are the 1D indices of the pair of\n"
"              voxels, i j k correspond to their 3D coordinates, and Corr\n"
"              is the value of the correlation between the voxel time courses.\n" 
"\n"
"Notes:\n"
" * The output dataset is a bucket type of floats.\n"
" * The program prints out an estimate of its memory used\n"
"    when it ends.  It also prints out a progress 'meter'\n"
"    to keep you pacified.\n"
"\n"
"-- RWCox - 31 Jan 2002 and 16 Jul 2010\n"
"-- Cameron Craddock - 26 Sept 2015 \n"
            ) ;
      PRINT_AFNI_OMP_USAGE("3dDegreeCentrality",NULL) ;
      PRINT_COMPILE_DATE ; exit(0) ;
   }

   mainENTRY("3dDegreeCentrality main"); machdep(); PRINT_VERSION("3dDegreeCentrality");
   AFNI_logger("3dDegreeCentrality",argc,argv);

   /*-- option processing --*/

   while( nopt < argc && argv[nopt][0] == '-' ){

      if( strcmp(argv[nopt],"-time") == 0 ){
         abuc = 0 ; nopt++ ; continue ;
      }

      if( strcmp(argv[nopt],"-autoclip") == 0 ||
          strcmp(argv[nopt],"-automask") == 0   ){

         do_autoclip = 1 ; nopt++ ; continue ;
      }

      if( strcmp(argv[nopt],"-mask") == 0 ){
         mset = THD_open_dataset(argv[++nopt]);
         CHECK_OPEN_ERROR(mset,argv[nopt]);
         nopt++ ; continue ;
      }

      if( strcmp(argv[nopt],"-pearson") == 0 ){
         method = PEARSON ; nopt++ ; continue ;
      }

#if 0
      if( strcmp(argv[nopt],"-spearman") == 0 ){
         method = SPEARMAN ; nopt++ ; continue ;
      }

      if( strcmp(argv[nopt],"-quadrant") == 0 ){
         method = QUADRANT ; nopt++ ; continue ;
      }
#endif

      if( strcmp(argv[nopt],"-eta2") == 0 ){
         method = ETA2 ; nopt++ ; continue ;
      }

      if( strcmp(argv[nopt],"-prefix") == 0 ){
         prefix = strdup(argv[++nopt]) ;
         if( !THD_filename_ok(prefix) ){
            ERROR_exit("Illegal value after -prefix!") ;
         }
         nopt++ ; continue ;
      }

      if( strcmp(argv[nopt],"-thresh") == 0 ){
         double val = (double)strtod(argv[++nopt],&cpt) ;
         if( *cpt != '\0' || val >= 1.0 || val < 0.0 ){
            ERROR_exit("Illegal value (%f) after -thresh!", val) ;
         }
         dothresh = 1;
         thresh = val ; othresh = val ; nopt++ ; continue ;
      }
      if( strcmp(argv[nopt],"-sparsity") == 0 ){
         double val = (double)strtod(argv[++nopt],&cpt) ;
         if( *cpt != '\0' || val > 100 || val <= 0 ){
            ERROR_exit("Illegal value (%f) after -sparsity!", val) ;
         }
         if( val > 5.0 )
         {
             WARNING_message("Sparsity %3.2f%% is large and will require alot of memory and time, consider using a smaller value. ", val);
         }
         dosparsity = 1 ;
         sparsity = val ; nopt++ ; continue ;
      }
      if( strcmp(argv[nopt],"-polort") == 0 ){
         int val = (int)strtod(argv[++nopt],&cpt) ;
         if( *cpt != '\0' || val < -1 || val > 3 ){
            ERROR_exit("Illegal value after -polort!") ;
         }
         polort = val ; nopt++ ; continue ;
      }
      if( strcmp(argv[nopt],"-mem_stat") == 0 ){
         MEM_STAT = 1 ; nopt++ ; continue ;
      }
      if( strncmp(argv[nopt],"-mem_profile",8) == 0 ){
         MEM_PROF = 1 ; nopt++ ; continue ;
      }
      /* check for 1d argument */
      if ( strcmp(argv[nopt],"-out1D") == 0 ){
          if (!(fout1D = fopen(argv[++nopt], "w"))) {
             ERROR_message("Failed to open %s for writing", argv[nopt]);
             exit(1);
          }
          nopt++ ; continue ;
      }

      ERROR_exit("Illegal option: %s",argv[nopt]) ;
   }

   /*-- open dataset, check for legality --*/

   if( nopt >= argc ) ERROR_exit("Need a dataset on command line!?") ;

   xset = THD_open_dataset(argv[nopt]); CHECK_OPEN_ERROR(xset,argv[nopt]);


   if( DSET_NVALS(xset) < 3 )
     ERROR_exit("Input dataset %s does not have 3 or more sub-bricks!",argv[nopt]) ;
   DSET_load(xset) ; CHECK_LOAD_ERROR(xset) ;

   /*-- compute mask array, if desired --*/
   nvox = DSET_NVOX(xset) ; nvals = DSET_NVALS(xset) ;
   INC_MEM_STATS((nvox * nvals * sizeof(double)), "input dset");
   PRINT_MEM_STATS("inset");

   /* if a mask was specified make sure it is appropriate */
   if( mset ){

      if( DSET_NVOX(mset) != nvox )
         ERROR_exit("Input and mask dataset differ in number of voxels!") ;
      mask  = THD_makemask(mset, 0, 1.0, 0.0) ;

      /* update running memory statistics to reflect loading the image */
      INC_MEM_STATS( mset->dblk->total_bytes, "mask dset" );
      PRINT_MEM_STATS( "mset load" );

      nmask = THD_countmask( nvox , mask ) ;
      INC_MEM_STATS( nmask * sizeof(byte), "mask array" );
      PRINT_MEM_STATS( "mask" );

      INFO_message("%d voxels in -mask dataset",nmask) ;
      if( nmask < 2 ) ERROR_exit("Only %d voxels in -mask, exiting...",nmask);

      /* update running memory statistics to reflect loading the image */
      DEC_MEM_STATS( mset->dblk->total_bytes, "mask dset" );
      DSET_unload(mset) ;
      PRINT_MEM_STATS( "mset unload" );
   } 
   /* if automasking is requested, handle that now */
   else if( do_autoclip ){
      mask  = THD_automask( xset ) ;
      nmask = THD_countmask( nvox , mask ) ;
      INFO_message("%d voxels survive -autoclip",nmask) ;
      if( nmask < 2 ) ERROR_exit("Only %d voxels in -automask!",nmask);
   }
   /* otherwise we use all of the voxels in the image */
   else {
      nmask = nvox ;
      INFO_message("computing for all %d voxels",nmask) ;
   }
   
   if( method == ETA2 && polort >= 0 )
      WARNING_message("Polort for -eta2 should probably be -1...");

    /* djc - 1d file out init */
    if (fout1D != NULL) {
        /* define affine matrix */
        mat44 affine_mat = xset->daxes->ijk_to_dicom;

        /* print command line statement */
        fprintf(fout1D,"#Similarity matrix from command:\n#");
        for(ii=0; ii<argc; ++ii) fprintf(fout1D,"%s ", argv[ii]);

        /* Print affine matrix */
        fprintf(fout1D,"\n");
        fprintf(fout1D,"#[ ");
        int mi, mj;
        for(mi = 0; mi < 4; mi++) {
            for(mj = 0; mj < 4; mj++) {
                fprintf(fout1D, "%.6f ", affine_mat.m[mi][mj]);
            }
        }
        fprintf(fout1D, "]\n");

        /* Print image extents*/
        THD_dataxes *xset_daxes = xset->daxes;
        fprintf(fout1D, "#Image dimensions:\n");
        fprintf(fout1D, "#[%d, %d, %d]\n",
                xset_daxes->nxx, xset_daxes->nyy, xset_daxes->nzz);

        /* Similarity matrix headers */
        fprintf(fout1D,"#Voxel1 Voxel2 i1 j1 k1 i2 j2 k2 Corr\n");
    }


   /* CC calculate the total number of possible correlations, will be 
       usefule down the road */
   totPosCor = (.5*((float)nmask))*((float)(nmask-1));

   /**  For the case of Pearson correlation, we make sure the  **/
   /**  data time series have their mean removed (polort >= 0) **/
   /**  and are normalized, so that correlation = dot product, **/
   /**  and we can use function zm_THD_pearson_corr for speed. **/

   switch( method ){
     default:
     case PEARSON: corfun = zm_THD_pearson_corr ; break ;
     case ETA2:    corfun = my_THD_eta_squared  ; break ;
   }

   /*-- create vectim from input dataset --*/
   INFO_message("vectim-izing input dataset") ;

   /*-- CC added in mask to reduce the size of xvectim -- */
   xvectim = THD_dset_to_vectim( xset , mask , 0 ) ;
   if( xvectim == NULL ) ERROR_exit("Can't create vectim?!") ;

   /*-- CC update our memory stats to reflect vectim -- */
   INC_MEM_STATS((xvectim->nvec*sizeof(int)) +
                       ((xvectim->nvec)*(xvectim->nvals))*sizeof(float) +
                       sizeof(MRI_vectim), "vectim");
   PRINT_MEM_STATS( "vectim" );

   /*--- CC the vectim contains a mapping between voxel index and mask index, 
         tap into that here to avoid duplicating memory usage ---*/

   if( mask != NULL )
   {
       imap = xvectim->ivec;

       /* --- CC free the mask */
       DEC_MEM_STATS( nmask*sizeof(byte), "mask array" );
       free(mask); mask=NULL;
       PRINT_MEM_STATS( "mask unload" );
   }

   /* -- CC unloading the dataset to reduce memory usage ?? -- */
   DEC_MEM_STATS((DSET_NVOX(xset) * DSET_NVALS(xset) * sizeof(double)), "input dset");
   DSET_unload(xset) ;
   PRINT_MEM_STATS("inset unload");

   /* -- CC configure detrending --*/
   if( polort < 0 && method == PEARSON ){
     polort = 0; WARNING_message("Pearson correlation always uses polort >= 0");
   }
   if( polort >= 0 ){
     for( ii=0 ; ii < xvectim->nvec ; ii++ ){  /* remove polynomial trend */
       DETREND_polort(polort,nvals,VECTIM_PTR(xvectim,ii)) ;
     }
   }


   /* -- this procedure does not change time series that have zero variance -- */
   if( method == PEARSON ) THD_vectim_normalize(xvectim) ;  /* L2 norm = 1 */

    /* -- CC create arrays to hold degree and weighted centrality while
          they are being calculated -- */
    if( dosparsity == 0 )
    {
        if( ( binaryDC = (long*)calloc( nmask, sizeof(long) )) == NULL )
        {
            ERROR_message( "Could not allocate %d byte array for binary DC calculation\n",
                nmask*sizeof(long)); 
        }

        /* -- update running memory estimate to reflect memory allocation */ 
        INC_MEM_STATS( nmask*sizeof(long), "binary DC array" );
        PRINT_MEM_STATS( "binaryDC" );

        if( ( weightedDC = (double*)calloc( nmask, sizeof(double) )) == NULL )
        {
            if (binaryDC){ free(binaryDC); binaryDC = NULL; }
            ERROR_message( "Could not allocate %d byte array for weighted DC calculation\n",
                nmask*sizeof(double)); 
        }
        /* -- update running memory estimate to reflect memory allocation */ 
        INC_MEM_STATS( nmask*sizeof(double), "weighted DC array" );
        PRINT_MEM_STATS( "weightedDC" );
    }


    /* -- CC if we are using a sparsity threshold, build a histogram to calculate the 
         threshold */
    if (dosparsity == 1)
    {
        /* make sure that there is a bin for correlation values that == 1.0 */
        binwidth = (1.005-thresh)/nhistnodes;

        /* calculate the number of correlations we wish to retain */
        ngoal = nretain = (int)(((double)totPosCor)*((double)sparsity) / 100.0);

        /* allocate memory for the histogram bins */
        if(( histogram = (hist_node_head*)malloc(nhistnodes*sizeof(hist_node_head))) == NULL )
        {
            /* if the allocation fails, free all memory and exit */
            if (binaryDC){ free(binaryDC); binaryDC = NULL; }
            if (weightedDC){ free(weightedDC); weightedDC = NULL; }
            ERROR_message( "Could not allocate %d byte array for histogram\n",
                nhistnodes*sizeof(hist_node_head)); 
        }
        else {
            /* -- update running memory estimate to reflect memory allocation */ 
            INC_MEM_STATS( nhistnodes*sizeof(hist_node_head), "hist bins" );
            PRINT_MEM_STATS( "hist1" );
        }

        /* initialize history bins */
        for( kout = 0; kout < nhistnodes; kout++ )
        {
            histogram[ kout ].bin_low = thresh+kout*binwidth;
            histogram[ kout ].bin_high = histogram[ kout ].bin_low+binwidth;
            histogram[ kout ].nbin = 0;
            histogram[ kout ].nodes = NULL; 
            /*INFO_message("Hist bin %d [%3.3f, %3.3f) [%d, %p]\n",
                kout, histogram[ kout ].bin_low, histogram[ kout ].bin_high,
                histogram[ kout ].nbin, histogram[ kout ].nodes );*/
        }
    }

    /*-- tell the user what we are about to do --*/
    if (dosparsity == 0 )
    {
        INFO_message( "Calculating degree centrality with threshold = %f.\n", thresh);
    }
    else
    {
        INFO_message( "Calculating degree centrality with threshold = %f and sparsity = %3.2f%% (%d)\n",
            thresh, sparsity, nretain);
    }

    /*---------- loop over mask voxels, correlate ----------*/
    AFNI_OMP_START ;
#pragma omp parallel if( nmask > 999 )
    {
       int lii,ljj,lin,lout,ithr,nthr,vstep,vii ;
       float *xsar , *ysar ;
       hist_node* new_node = NULL ;
       hist_node* tptr = NULL ;
       hist_node* rptr = NULL ;
       int new_node_idx = 0;
       double car = 0.0 ; 

       /*-- get information about who we are --*/
#ifdef USE_OMP
       ithr = omp_get_thread_num() ;
       nthr = omp_get_num_threads() ;
       if( ithr == 0 ) INFO_message("%d OpenMP threads started",nthr) ;
#else
       ithr = 0 ; nthr = 1 ;
#endif

       /*-- For the progress tracker, we want to print out 50 numbers,
            figure out a number of loop iterations that will make this easy */
       vstep = (int)( nmask / (nthr*50.0f) + 0.901f ) ; vii = 0 ;
       if((MEM_STAT==0) && (ithr == 0 )) fprintf(stderr,"Looping:") ;

#pragma omp for schedule(static, 1)
       for( lout=0 ; lout < xvectim->nvec ; lout++ ){  /*----- outer voxel loop -----*/

          if( ithr == 0 && vstep > 2 ) /* allow small dsets 16 Jun 2011 [rickr] */
          { vii++ ; if( vii%vstep == vstep/2 && MEM_STAT == 0 ) vstep_print(); }

          /* get ref time series from this voxel */
          xsar = VECTIM_PTR(xvectim,lout) ;

          /* try to make calculation more efficient by only calculating the unique 
             correlations */
          for( lin=(lout+1) ; lin < xvectim->nvec ; lin++ ){  /*----- inner loop over voxels -----*/

             /* extract the voxel time series */
             ysar = VECTIM_PTR(xvectim,lin) ;

             /* now correlate the time series */
             car = (double)(corfun(nvals,xsar,ysar)) ;

             if ( car <= thresh )
             {
                 continue ;
             }

/* update degree centrality values, hopefully the pragma
   will handle mutual exclusion */
#pragma omp critical(dataupdate)
             {
                 /* if the correlation is less than threshold, ignore it */
                 if ( car > thresh )
                 {
                     totNumCor += 1;
               
                     if ( dosparsity == 0 )
                     { 
                         binaryDC[lout] += 1; binaryDC[lin] += 1;
                         weightedDC[lout] += car; weightedDC[lin] += car;

                         /* print correlation out to the 1D file */
                         if ( fout1D != NULL )
                         {
                             /* determine the i,j,k coords */
                             ix1 = DSET_index_to_ix(xset,lii) ;
                             jy1 = DSET_index_to_jy(xset,lii) ;
                             kz1 = DSET_index_to_kz(xset,lii) ;
                             ix2 = DSET_index_to_ix(xset,ljj) ;
                             jy2 = DSET_index_to_jy(xset,ljj) ;
                             kz2 = DSET_index_to_kz(xset,ljj) ;
                             /* add source, dest, correlation to 1D file */
                             fprintf(fout1D, "%d %d %d %d %d %d %d %d %.6f\n",
                                lii, ljj, ix1, jy1, kz1, ix2, jy2, kz2, car);
                        }
                    }
                    else
                    {
                        /* determine the index in the histogram to add the node */
                        new_node_idx = (int)floor((double)(car-othresh)/(double)binwidth);
                        if ((new_node_idx > nhistnodes) || (new_node_idx < bottom_node_idx))
                        {
                            /* this error should indicate a programming error and should not happen */
                            WARNING_message("Node index %d is out of range [%d,%d)!",new_node_idx,
                            bottom_node_idx, nhistnodes);
                        }
                        else
                        {
                            /* create a node to add to the histogram */
                            new_node = (hist_node*)calloc(1,sizeof(hist_node));
                            if( new_node == NULL )
                            {
                                /* allocate memory for this node, rather than fiddling with 
                                   error handling here, lets just move on */
                                WARNING_message("Could not allocate a new node!");
                            }
                            else
                            {
                 
                                /* populate histogram node */
                                new_node->i = lout; 
                                new_node->j = lin;
                                new_node->corr = car;
                                new_node->next = NULL;

                                /* -- update running memory estimate to reflect memory allocation */ 
                                INC_MEM_STATS( sizeof(hist_node), "hist nodes" );
                                if ((totNumCor % (1024*1024)) == 0) PRINT_MEM_STATS( "hist nodes" );

                                /* populate histogram */
                                new_node->next = histogram[new_node_idx].nodes;
                                histogram[new_node_idx].nodes = new_node;
                                histogram[new_node_idx].nbin++; 

                                /* see if there are enough correlations in the histogram
                                   for the sparsity */
                                if ((totNumCor - histogram[bottom_node_idx].nbin) > nretain)
                                { 
                                    /* delete the list of nodes */
                                    rptr = histogram[bottom_node_idx].nodes;
                                    while(rptr != NULL)
                                    {
                                        tptr = rptr;
                                        rptr = rptr->next;
                                        /* check that the ptr is not null before freeing it*/
                                        if(tptr!= NULL)
                                        {
                                            DEC_MEM_STATS( sizeof(hist_node), "hist nodes" );
                                            free(tptr);
                                        }
                                    }
                                    PRINT_MEM_STATS( "unloaded hist nodes - thresh increase" );

                                    histogram[bottom_node_idx].nodes = NULL;
                                    totNumCor -= histogram[bottom_node_idx].nbin;
                                    histogram[bottom_node_idx].nbin=0;
 
                                    /* get the new threshold */
                                    thresh = (double)histogram[++bottom_node_idx].bin_low;
                                    if(MEM_STAT == 1) INFO_message("Increasing threshold to %3.2f (%d)\n",
                                        thresh,bottom_node_idx); 
                                }

                            } /* else, newptr != NULL */
                        } /* else, new_node_idx in range */
                    } /* else, do_sparsity == 1 */
                 } /* car > thresh */
             } /* this is the end of the critical section */
          } /* end of inner loop over voxels */
       } /* end of outer loop over ref voxels */

       if( ithr == 0 ) fprintf(stderr,".\n") ;

    } /* end OpenMP */
    AFNI_OMP_END ;

    /* update the user so that they know what we are up to */
    INFO_message ("AFNI_OMP finished\n");
    INFO_message ("Found %d (%3.2f%%) correlations above threshold (%f)\n",
       totNumCor, 100.0*((float)totNumCor)/((float)totPosCor), thresh);

   /*----------  Finish up ---------*/

   /*if( dosparsity == 1 )
   {
       for( kout = 0; kout < nhistnodes; kout++ )
       {
           INFO_message("Hist bin %d [%3.3f, %3.3f) [%d, %p]\n",
                kout, histogram[ kout ].bin_low, histogram[ kout ].bin_high,
                histogram[ kout ].nbin, histogram[ kout ].nodes );
       }
   }*/

   /*-- create output dataset --*/
   cset = EDIT_empty_copy( xset ) ;

   /*-- configure the output dataset */
   if( abuc ){
     EDIT_dset_items( cset ,
                        ADN_prefix    , prefix         ,
                        ADN_nvals     , nsubbriks      , /* 2 subbricks, degree and weighted centrality */
                        ADN_ntt       , 0              , /* no time axis */
                        ADN_type      , HEAD_ANAT_TYPE ,
                        ADN_func_type , ANAT_BUCK_TYPE ,
                        ADN_datum_all , MRI_float      ,
                      ADN_none ) ;
   } else {
     EDIT_dset_items( cset ,
                        ADN_prefix    , prefix         ,
                        ADN_nvals     , nsubbriks      , /* 2 subbricks, degree and weighted centrality */
                        ADN_ntt       , nsubbriks      ,  /* num times */
                        ADN_ttdel     , 1.0            ,  /* fake TR */
                        ADN_nsl       , 0              ,  /* no slice offsets */
                        ADN_type      , HEAD_ANAT_TYPE ,
                        ADN_func_type , ANAT_EPI_TYPE  ,
                        ADN_datum_all , MRI_float      ,
                      ADN_none ) ;
   }

   /* add history information to the hearder */
   tross_Make_History( "3dDegreeCentrality" , argc,argv , cset ) ;

   ININFO_message("creating output dataset in memory") ;

   /* -- Configure the subbriks: Binary Degree Centrality */
   subbrik = 0;
   EDIT_BRICK_TO_NOSTAT(cset,subbrik) ;                     /* stat params  */
   /* CC this sets the subbrik scaling factor, which we will probably want
      to do again after we calculate the voxel values */
   EDIT_BRICK_FACTOR(cset,subbrik,1.0) ;                 /* scale factor */

   sprintf(str,"Binary Degree Centrality") ;

   EDIT_BRICK_LABEL(cset,subbrik,str) ;
   EDIT_substitute_brick(cset,subbrik,MRI_float,NULL) ;   /* make array   */


   /* copy measure data into the subbrik */
   bodset = DSET_ARRAY(cset,subbrik);
 
   /* -- Configure the subbriks: Weighted Degree Centrality */
   subbrik = 1;
   EDIT_BRICK_TO_NOSTAT(cset,subbrik) ;                     /* stat params  */
   /* CC this sets the subbrik scaling factor, which we will probably want
      to do again after we calculate the voxel values */
   EDIT_BRICK_FACTOR(cset,subbrik,1.0) ;                 /* scale factor */

   sprintf(str,"Weighted Degree Centrality") ;

   EDIT_BRICK_LABEL(cset,subbrik,str) ;
   EDIT_substitute_brick(cset,subbrik,MRI_float,NULL) ;   /* make array   */

   /* copy measure data into the subbrik */
   wodset = DSET_ARRAY(cset,subbrik);

   /* increment memory stats */
   INC_MEM_STATS( (DSET_NVOX(cset)*DSET_NVALS(cset)*sizeof(float)), "output dset");
   PRINT_MEM_STATS( "outset" );

   /* pull the values out of the histogram */
   if( dosparsity == 0 )
   {
       for( kout = 0; kout < nmask; kout++ )
       {
          if ( imap != NULL )
          {
              ii = imap[kout] ;  /* ii= source voxel (we know that ii is in the mask) */
          }
          else
          {
              ii = kout ;
          }
   
          if( ii >= DSET_NVOX(cset) )
          {
              WARNING_message("Avoiding bodset, wodset overflow %d > %d (%s,%d)\n",
                  ii,DSET_NVOX(cset),__FILE__,__LINE__ );
          }
          else
          {
              bodset[ ii ] = (float)(binaryDC[kout]);
              wodset[ ii ] = (float)(weightedDC[kout]);
          }
       }

       /* we are done with this memory, and can kill it now*/
       if(binaryDC)
       {
           free(binaryDC);
           binaryDC=NULL;
           /* -- update running memory estimate to reflect memory allocation */ 
           DEC_MEM_STATS( nmask*sizeof(long), "binary DC array" );
           PRINT_MEM_STATS( "binaryDC" );
       }
       if(weightedDC)
       {
           free(weightedDC);
           weightedDC=NULL;
           /* -- update running memory estimate to reflect memory allocation */ 
           DEC_MEM_STATS( nmask*sizeof(double), "weighted DC array" );
           PRINT_MEM_STATS( "weightedDC" );
       }
   }
   else
   {

       /* add in the values from the histogram, this is a two stage procedure:
             at first we add in values a whole bin at the time until we get to a point
             where we need to add in a partial bin, then we create a new histogram
             to sort the values in the bin and then add those bins at a time */
       kout = nhistnodes - 1;
       while (( histogram[kout].nbin < nretain ) && ( kout >= 0 ))
       {
           hptr = pptr = histogram[kout].nodes;
           while( hptr != NULL )
           {

               /* determine the indices corresponding to this node */
               if ( imap != NULL )
               {
                   ii = imap[hptr->i] ;  /* ii= source voxel (we know that ii is in the mask) */
               }
               else 
               {
                   ii = hptr->i ;
               }
               if ( imap != NULL )
               {
                   jj = imap[hptr->j] ;  /* ii= source voxel (we know that ii is in the mask) */
               }
               else
               {
                   jj = hptr->j ;
               }

               /* add in the values */
               if(( ii >= DSET_NVOX(cset) ) || ( jj >= DSET_NVOX(cset)))
               {
                   if( ii >= DSET_NVOX(cset))
                   {
                       WARNING_message("Avoiding bodset, wodset overflow (ii) %d > %d\n (%s,%d)\n",
                           ii,DSET_NVOX(cset),__FILE__,__LINE__ );
                   }
                   if( jj >= DSET_NVOX(cset))
                   {
                       WARNING_message("Avoiding bodset, wodset overflow (jj) %d > %d\n (%s,%d)\n",
                           jj,DSET_NVOX(cset),__FILE__,__LINE__ );
                   }
               }
               else
               {
                   bodset[ ii ] += 1.0 ;
                   wodset[ ii ] += (float)(hptr->corr);
                   bodset[ jj ] += 1.0 ;
                   wodset[ jj ] += (float)(hptr->corr);
               }

               if( fout1D != NULL )
               {
                   /* add source, dest, correlation to 1D file */
                   ix1 = DSET_index_to_ix(cset,ii) ;
                   jy1 = DSET_index_to_jy(cset,ii) ;
                   kz1 = DSET_index_to_kz(cset,ii) ;
                   ix2 = DSET_index_to_ix(cset,jj) ;
                   jy2 = DSET_index_to_jy(cset,jj) ;
                   kz2 = DSET_index_to_kz(cset,jj) ;
                   fprintf(fout1D, "%d %d %d %d %d %d %d %d %.6f\n",
                           ii, jj, ix1, jy1, kz1, ix2, jy2, kz2, (float)(hptr->corr));
               }

               /* increment node pointers */
               pptr = hptr;
               hptr = hptr->next;

               /* delete the node */
               if(pptr)
               {
                   /* -- update running memory estimate to reflect memory allocation */ 
                   DEC_MEM_STATS(sizeof( hist_node ), "hist nodes" );
                   /* free the mem */
                   free(pptr);
                   pptr=NULL;
               }
           } 
           /* decrement the number of correlations we wish to retain */
           nretain -= histogram[kout].nbin;
           histogram[kout].nodes = NULL;

           /* go on to the next bin */
           kout--;
       }
       PRINT_MEM_STATS( "hist1 bins free - inc into output" );

        /* if we haven't used all of the correlations that are available, go through and 
           add a subset of the voxels from the remaining bin */
        if(( nretain > 0 ) && (kout >= 0))
        {

            hist_node_head* histogram2 = NULL; 
            hist_node_head* histogram2_save = NULL; 
            int h2nbins = 100;
            float h2binwidth = 0.0;
            int h2ndx=0;

            h2binwidth = (((1.0+binwidth/((float)h2nbins))*histogram[kout].bin_high) - histogram[kout].bin_low) /
               ((float)h2nbins);

            /* allocate the bins */
            if(( histogram2 = (hist_node_head*)malloc(h2nbins*sizeof(hist_node_head))) == NULL )
            {
                if (binaryDC){ free(binaryDC); binaryDC = NULL; }
                if (weightedDC){ free(weightedDC); weightedDC = NULL; }
                if (histogram){ histogram = free_histogram(histogram, nhistnodes); }
                ERROR_message( "Could not allocate %d byte array for histogram2\n",
                    h2nbins*sizeof(hist_node_head)); 
            }
            else {
                /* -- update running memory estimate to reflect memory allocation */ 
                histogram2_save = histogram2;
                INC_MEM_STATS(( h2nbins*sizeof(hist_node_head )), "hist bins");
                PRINT_MEM_STATS( "hist2" );
            }
   
            /* initiatize the bins */ 
            for( kin = 0; kin < h2nbins; kin++ )
            {
                histogram2[ kin ].bin_low = histogram[kout].bin_low + kin*h2binwidth;
                histogram2[ kin ].bin_high = histogram2[ kin ].bin_low + h2binwidth;
                histogram2[ kin ].nbin = 0;
                histogram2[ kin ].nodes = NULL; 
                /*INFO_message("Hist2 bin %d [%3.3f, %3.3f) [%d, %p]\n",
                    kin, histogram2[ kin ].bin_low, histogram2[ kin ].bin_high,
                    histogram2[ kin ].nbin, histogram2[ kin ].nodes );*/
            }

            /* move correlations from histogram to histgram2 */
            INFO_message ("Adding %d nodes from histogram to histogram2",histogram[kout].nbin);
            while ( histogram[kout].nodes != NULL )
            {
                hptr = histogram[kout].nodes;
                h2ndx = (int)floor((double)(hptr->corr - histogram[kout].bin_low)/(double)h2binwidth);
                if(( h2ndx < h2nbins ) && ( h2ndx >= 0 ))
                {
                    histogram[kout].nodes = hptr->next;
                    hptr->next = histogram2[h2ndx].nodes;
                    histogram2[h2ndx].nodes = hptr; 
                    histogram2[h2ndx].nbin++;
                    histogram[kout].nbin--;
                }
                else
                {
                    WARNING_message("h2ndx %d is not in range [0,%d) :: %.10f,%.10f\n",h2ndx,h2nbins,hptr->corr, histogram[kout].bin_low);
                }
               
            }

            /* free the remainder of histogram */
            {
                int nbins_rem = 0;
                for(ii = 0; ii < nhistnodes; ii++) nbins_rem+=histogram[ii].nbin;
                histogram = free_histogram(histogram, nhistnodes);
                PRINT_MEM_STATS( "free remainder of histogram1" );
            }

            kin = h2nbins - 1;
            while (( nretain > 0 ) && ( kin >= 0 ))
            {
                hptr = pptr = histogram2[kin].nodes;
                while( hptr != NULL )
                {
     
                    /* determine the indices corresponding to this node */
                    if ( imap != NULL )
                    {
                        ii = imap[hptr->i] ;  
                    }
                    else
                    {
                        ii = hptr->i ;
                    }
                    if ( imap != NULL )
                    {
                        jj = imap[hptr->j] ; 
                    }
                    else
                    {
                        jj = hptr->j ;
                    }

                    /* add in the values */
                    if(( ii >= DSET_NVOX(cset) ) || ( jj >= DSET_NVOX(cset)))
                    {
                        if( ii >= DSET_NVOX(cset))
                        {
                            WARNING_message("Avoiding bodset, wodset overflow (ii) %d > %d\n (%s,%d)\n",
                                ii,DSET_NVOX(cset),__FILE__,__LINE__ );
                        }
                        if( jj >= DSET_NVOX(cset))
                        {
                            WARNING_message("Avoiding bodset, wodset overflow (jj) %d > %d\n (%s,%d)\n",
                                jj,DSET_NVOX(cset),__FILE__,__LINE__ );
                        }
                    }
                    else
                    {
                        bodset[ ii ] += 1.0 ;
                        wodset[ ii ] += (float)(hptr->corr);
                        bodset[ jj ] += 1.0 ;
                        wodset[ jj ] += (float)(hptr->corr);
                    }
                    if( fout1D != NULL )
                    {
                        /* add source, dest, correlation to 1D file */
                        ix1 = DSET_index_to_ix(cset,ii) ;
                        jy1 = DSET_index_to_jy(cset,ii) ;
                        kz1 = DSET_index_to_kz(cset,ii) ;
                        ix2 = DSET_index_to_ix(cset,jj) ;
                        jy2 = DSET_index_to_jy(cset,jj) ;
                        kz2 = DSET_index_to_kz(cset,jj) ;
                        fprintf(fout1D, "%d %d %d %d %d %d %d %d %.6f\n",
                            ii, jj, ix1, jy1, kz1, ix2, jy2, kz2, (float)(hptr->corr));
                    }

                    /* increment node pointers */
                    pptr = hptr;
                    hptr = hptr->next;

                    /* delete the node */
                    if(pptr)
                    {
                        free(pptr);
                        DEC_MEM_STATS(( sizeof(hist_node) ), "hist nodes");
                        pptr=NULL;
                    }
                }
 
                /* decrement the number of correlations we wish to retain */
                nretain -= histogram2[kin].nbin;
                histogram2[kin].nodes = NULL;

                /* go on to the next bin */
                kin--;
            }
            PRINT_MEM_STATS("hist2 nodes free - incorporated into output");

            /* we are finished with histogram2 */
            {
                histogram2 = free_histogram(histogram2, h2nbins);
                /* -- update running memory estimate to reflect memory allocation */ 
                PRINT_MEM_STATS( "free hist2" );
            }

            if (nretain < 0 )
            {
                WARNING_message( "Went over sparsity goal %d by %d, with a resolution of %f",
                      ngoal, -1*nretain, h2binwidth);
            }
        }
        if (nretain > 0 )
        {
            WARNING_message( "Was not able to meet goal of %d (%3.2f%%) correlations, %d (%3.2f%%) correlations passed the threshold of %3.2f, maybe you need to change the threshold or the desired sparsity?",
                  ngoal, 100.0*((float)ngoal)/((float)totPosCor), totNumCor, 100.0*((float)totNumCor)/((float)totPosCor),  thresh);
        }
   }

   INFO_message("Done..\n") ;

   /* update running memory statistics to reflect freeing the vectim */
   DEC_MEM_STATS(((xvectim->nvec*sizeof(int)) +
                       ((xvectim->nvec)*(xvectim->nvals))*sizeof(float) +
                       sizeof(MRI_vectim)), "vectim");

   /* toss some trash */
   VECTIM_destroy(xvectim) ;
   DSET_delete(xset) ;
   if(fout1D!=NULL)fclose(fout1D);

   PRINT_MEM_STATS( "vectim unload" );

   if (weightedDC) free(weightedDC) ; weightedDC = NULL;
   if (binaryDC) free(binaryDC) ; binaryDC = NULL;
   
   /* finito */
   INFO_message("Writing output dataset to disk [%s bytes]",
                commaized_integer_string(cset->dblk->total_bytes)) ;

   /* write the dataset */
   DSET_write(cset) ;
   WROTE_DSET(cset) ;

   /* increment our memory stats, since we are relying on the header for this
      information, we update the stats before actually freeing the memory */
   DEC_MEM_STATS( (DSET_NVOX(cset)*DSET_NVALS(cset)*sizeof(float)), "output dset");

   /* free up the output dataset memory */
   DSET_unload(cset) ;
   DSET_delete(cset) ;

   /* force a print */
   MEM_STAT = 1;
   PRINT_MEM_STATS( "Fin" );

   exit(0) ;
}
Exemplo n.º 4
0
int main( int argc , char * argv[] )
{
   int do_norm=0 , qdet=2 , have_freq=0 , do_automask=0 ;
   float dt=0.0f , fbot=0.0f,ftop=999999.9f , blur=0.0f ;
   MRI_IMARR *ortar=NULL ; MRI_IMAGE *ortim=NULL ;
   THD_3dim_dataset **ortset=NULL ; int nortset=0 ;
   THD_3dim_dataset *inset=NULL , *outset ;
   char *prefix="bandpass" ;
   byte *mask=NULL ;
   int mask_nx=0,mask_ny=0,mask_nz=0,nmask , verb=1 , 
       nx,ny,nz,nvox , nfft=0 , kk ;
   float **vec , **ort=NULL ; int nort=0 , vv , nopt , ntime  ;
   MRI_vectim *mrv ;
   float pvrad=0.0f ; int nosat=0 ;
   int do_despike=0 ;

   /*-- help? --*/

   AFNI_SETUP_OMP(0) ;  /* 24 Jun 2013 */

   if( argc < 2 || strcmp(argv[1],"-help") == 0 ){
     printf(
       "\n"
       "** NOTA BENE:  For the purpose of preparing resting-state FMRI datasets **\n"
       "** for analysis (e.g., with 3dGroupInCorr),  this program is now mostly **\n"
       "** superseded by the afni_proc.py script.  See the 'afni_proc.py -help' **\n"
       "** section 'Resting state analysis (modern)' to get our current rs-FMRI **\n"
       "** pre-processing recommended sequence of steps. -- RW Cox, et alii.    **\n"
       "\n"
       "Usage: 3dBandpass [options] fbot ftop dataset\n"
       "\n"
       "* One function of this program is to prepare datasets for input\n"
       "   to 3dSetupGroupInCorr.  Other uses are left to your imagination.\n"
       "\n"
       "* 'dataset' is a 3D+time sequence of volumes\n"
       "   ++ This must be a single imaging run -- that is, no discontinuities\n"
       "       in time from 3dTcat-ing multiple datasets together.\n"
       "\n"
       "* fbot = lowest frequency in the passband, in Hz\n"
       "   ++ fbot can be 0 if you want to do a lowpass filter only;\n"
       "       HOWEVER, the mean and Nyquist freq are always removed.\n"
       "\n"
       "* ftop = highest frequency in the passband (must be > fbot)\n"
       "   ++ if ftop > Nyquist freq, then it's a highpass filter only.\n"
       "\n"
       "* Set fbot=0 and ftop=99999 to do an 'allpass' filter.\n"
       "  ++ Except for removal of the 0 and Nyquist frequencies, that is.\n"
       "\n"
       "* You cannot construct a 'notch' filter with this program!\n"
       "  ++ You could use 3dBandpass followed by 3dcalc to get the same effect.\n"
       "  ++ If you are understand what you are doing, that is.\n"
       "  ++ Of course, that is the AFNI way -- if you don't want to\n"
       "     understand what you are doing, use Some other PrograM, and\n"
       "     you can still get Fine StatisticaL maps.\n"
       "\n"
       "* 3dBandpass will fail if fbot and ftop are too close for comfort.\n"
       "  ++ Which means closer than one frequency grid step df,\n"
       "     where df = 1 / (nfft * dt) [of course]\n"
       "\n"
       "* The actual FFT length used will be printed, and may be larger\n"
       "   than the input time series length for the sake of efficiency.\n"
       "  ++ The program will use a power-of-2, possibly multiplied by\n"
       "     a power of 3 and/or 5 (up to and including the 3rd power of\n"
       "     each of these: 3, 9, 27, and 5, 25, 125).\n"
       "\n"
       "* Note that the results of combining 3dDetrend and 3dBandpass will\n"
       "   depend on the order in which you run these programs.  That's why\n"
       "   3dBandpass has the '-ort' and '-dsort' options, so that the\n"
       "   time series filtering can be done properly, in one place.\n"
       "\n"
       "* The output dataset is stored in float format.\n"
       "\n"
       "* The order of processing steps is the following (most are optional):\n"
       " (0) Check time series for initial transients [does not alter data]\n"
       " (1) Despiking of each time series\n"
       " (2) Removal of a constant+linear+quadratic trend in each time series\n"
       " (3) Bandpass of data time series\n"
       " (4) Bandpass of -ort time series, then detrending of data\n"
       "      with respect to the -ort time series\n"
       " (5) Bandpass and de-orting of the -dsort dataset,\n"
       "      then detrending of the data with respect to -dsort\n"
       " (6) Blurring inside the mask [might be slow]\n"
       " (7) Local PV calculation     [WILL be slow!]\n"
       " (8) L2 normalization         [will be fast.]\n"
       "\n"
       "--------\n"
       "OPTIONS:\n"
       "--------\n"
       " -despike        = Despike each time series before other processing.\n"
       "                   ++ Hopefully, you don't actually need to do this,\n"
       "                      which is why it is optional.\n"
       " -ort f.1D       = Also orthogonalize input to columns in f.1D\n"
       "                   ++ Multiple '-ort' options are allowed.\n"
       " -dsort fset     = Orthogonalize each voxel to the corresponding\n"
       "                    voxel time series in dataset 'fset', which must\n"
       "                    have the same spatial and temporal grid structure\n"
       "                    as the main input dataset.\n"
       "                   ++ At present, only one '-dsort' option is allowed.\n"
       " -nodetrend      = Skip the quadratic detrending of the input that\n"
       "                    occurs before the FFT-based bandpassing.\n"
       "                   ++ You would only want to do this if the dataset\n"
       "                      had been detrended already in some other program.\n"
       " -dt dd          = set time step to 'dd' sec [default=from dataset header]\n"
       " -nfft N         = set the FFT length to 'N' [must be a legal value]\n"
       " -norm           = Make all output time series have L2 norm = 1\n"
       "                   ++ i.e., sum of squares = 1\n"
       " -mask mset      = Mask dataset\n"
       " -automask       = Create a mask from the input dataset\n"
       " -blur fff       = Blur (inside the mask only) with a filter\n"
       "                    width (FWHM) of 'fff' millimeters.\n"
       " -localPV rrr    = Replace each vector by the local Principal Vector\n"
       "                    (AKA first singular vector) from a neighborhood\n"
       "                    of radius 'rrr' millimiters.\n"
       "                   ++ Note that the PV time series is L2 normalized.\n"
       "                   ++ This option is mostly for Bob Cox to have fun with.\n"
       "\n"
       " -input dataset  = Alternative way to specify input dataset.\n"
       " -band fbot ftop = Alternative way to specify passband frequencies.\n"
       "\n"
       " -prefix ppp     = Set prefix name of output dataset.\n"
       " -quiet          = Turn off the fun and informative messages. (Why?)\n"
       "\n"
       " -notrans        = Don't check for initial positive transients in the data:\n"
       "  *OR*             ++ The test is a little slow, so skipping it is OK,\n"
       " -nosat               if you KNOW the data time series are transient-free.\n"
       "                   ++ Or set AFNI_SKIP_SATCHECK to YES.\n"
       "                   ++ Initial transients won't be handled well by the\n"
       "                      bandpassing algorithm, and in addition may seriously\n"
       "                      contaminate any further processing, such as inter-voxel\n"
       "                      correlations via InstaCorr.\n"
       "                   ++ No other tests are made [yet] for non-stationary behavior\n"
       "                      in the time series data.\n"
     ) ;
     PRINT_AFNI_OMP_USAGE(
       "3dBandpass" ,
       "* At present, the only part of 3dBandpass that is parallelized is the\n"
       "  '-blur' option, which processes each sub-brick independently.\n"
     ) ;
     PRINT_COMPILE_DATE ; exit(0) ;
   }

   /*-- startup --*/

   mainENTRY("3dBandpass"); machdep();
   AFNI_logger("3dBandpass",argc,argv);
   PRINT_VERSION("3dBandpass"); AUTHOR("RW Cox");

   nosat =  AFNI_yesenv("AFNI_SKIP_SATCHECK") ;

   nopt = 1 ;
   while( nopt < argc && argv[nopt][0] == '-' ){

     if( strcmp(argv[nopt],"-despike") == 0 ){  /* 08 Oct 2010 */
       do_despike++ ; nopt++ ; continue ;
     }

     if( strcmp(argv[nopt],"-nfft") == 0 ){
       int nnup ;
       if( ++nopt >= argc ) ERROR_exit("need an argument after -nfft!") ;
       nfft = (int)strtod(argv[nopt],NULL) ;
       nnup = csfft_nextup_even(nfft) ;
       if( nfft < 16 || nfft != nnup )
         ERROR_exit("value %d after -nfft is illegal! Next legal value = %d",nfft,nnup) ;
       nopt++ ; continue ;
     }

     if( strcmp(argv[nopt],"-blur") == 0 ){
       if( ++nopt >= argc ) ERROR_exit("need an argument after -blur!") ;
       blur = strtod(argv[nopt],NULL) ;
       if( blur <= 0.0f ) WARNING_message("non-positive blur?!") ;
       nopt++ ; continue ;
     }

     if( strcmp(argv[nopt],"-localPV") == 0 ){
       if( ++nopt >= argc ) ERROR_exit("need an argument after -localpv!") ;
       pvrad = strtod(argv[nopt],NULL) ;
       if( pvrad <= 0.0f ) WARNING_message("non-positive -localpv?!") ;
       nopt++ ; continue ;
     }

     if( strcmp(argv[nopt],"-prefix") == 0 ){
       if( ++nopt >= argc ) ERROR_exit("need an argument after -prefix!") ;
       prefix = strdup(argv[nopt]) ;
       if( !THD_filename_ok(prefix) ) ERROR_exit("bad -prefix option!") ;
       nopt++ ; continue ;
     }

     if( strcmp(argv[nopt],"-automask") == 0 ){
       if( mask != NULL ) ERROR_exit("Can't use -mask AND -automask!") ;
       do_automask = 1 ; nopt++ ; continue ;
     }

     if( strcmp(argv[nopt],"-mask") == 0 ){
       THD_3dim_dataset *mset ;
       if( ++nopt >= argc ) ERROR_exit("Need argument after '-mask'") ;
       if( mask != NULL || do_automask ) ERROR_exit("Can't have two mask inputs") ;
       mset = THD_open_dataset( argv[nopt] ) ;
       CHECK_OPEN_ERROR(mset,argv[nopt]) ;
       DSET_load(mset) ; CHECK_LOAD_ERROR(mset) ;
       mask_nx = DSET_NX(mset); mask_ny = DSET_NY(mset); mask_nz = DSET_NZ(mset);
       mask = THD_makemask( mset , 0 , 0.5f, 0.0f ) ; DSET_delete(mset) ;
       if( mask == NULL ) ERROR_exit("Can't make mask from dataset '%s'",argv[nopt]) ;
       nmask = THD_countmask( mask_nx*mask_ny*mask_nz , mask ) ;
       if( verb ) INFO_message("Number of voxels in mask = %d",nmask) ;
       if( nmask < 1 ) ERROR_exit("Mask is too small to process") ;
       nopt++ ; continue ;
     }

     if( strcmp(argv[nopt],"-norm") == 0 ){
       do_norm = 1 ; nopt++ ; continue ;
     }

     if( strcmp(argv[nopt],"-quiet") == 0 ){
       verb = 0 ; nopt++ ; continue ;
     }

     if( strcmp(argv[nopt],"-notrans") == 0 || strcmp(argv[nopt],"-nosat") == 0 ){
       nosat = 1 ; nopt++ ; continue ;
     }

     if( strcmp(argv[nopt],"-ort") == 0 ){
       if( ++nopt >= argc ) ERROR_exit("need an argument after -ort!") ;
       if( ortar == NULL ) INIT_IMARR(ortar) ;
       ortim = mri_read_1D( argv[nopt] ) ;
       if( ortim == NULL ) ERROR_exit("can't read from -ort '%s'",argv[nopt]) ;
       mri_add_name(argv[nopt],ortim) ;
       ADDTO_IMARR(ortar,ortim) ;
       nopt++ ; continue ;
     }

     if( strcmp(argv[nopt],"-dsort") == 0 ){
       THD_3dim_dataset *qset ;
       if( ++nopt >= argc ) ERROR_exit("need an argument after -dsort!") ;
       if( nortset > 0 ) ERROR_exit("only 1 -dsort option is allowed!") ;
       qset = THD_open_dataset(argv[nopt]) ;
       CHECK_OPEN_ERROR(qset,argv[nopt]) ;
       ortset = (THD_3dim_dataset **)realloc(ortset,
                                       sizeof(THD_3dim_dataset *)*(nortset+1)) ;
       ortset[nortset++] = qset ;
       nopt++ ; continue ;
     }

     if( strncmp(argv[nopt],"-nodetrend",6) == 0 ){
       qdet = 0 ; nopt++ ; continue ;
     }

     if( strcmp(argv[nopt],"-dt") == 0 ){
       if( ++nopt >= argc ) ERROR_exit("need an argument after -dt!") ;
       dt = (float)strtod(argv[nopt],NULL) ;
       if( dt <= 0.0f ) WARNING_message("value after -dt illegal!") ;
       nopt++ ; continue ;
     }

     if( strcmp(argv[nopt],"-input") == 0 ){
       if( inset != NULL ) ERROR_exit("Can't have 2 -input options!") ;
       if( ++nopt >= argc ) ERROR_exit("need an argument after -input!") ;
       inset = THD_open_dataset(argv[nopt]) ;
       CHECK_OPEN_ERROR(inset,argv[nopt]) ;
       nopt++ ; continue ;
     }

     if( strncmp(argv[nopt],"-band",5) == 0 ){
       if( ++nopt >= argc-1 ) ERROR_exit("need 2 arguments after -band!") ;
       if( have_freq ) WARNING_message("second -band option replaces first one!") ;
       fbot = strtod(argv[nopt++],NULL) ;
       ftop = strtod(argv[nopt++],NULL) ;
       have_freq = 1 ; continue ;
     }

     ERROR_exit("Unknown option: '%s'",argv[nopt]) ;
   }

   /** check inputs for reasonablositiness **/

   if( !have_freq ){
     if( nopt+1 >= argc )
       ERROR_exit("Need frequencies on command line after options!") ;
     fbot = (float)strtod(argv[nopt++],NULL) ;
     ftop = (float)strtod(argv[nopt++],NULL) ;
   }

   if( inset == NULL ){
     if( nopt >= argc )
       ERROR_exit("Need input dataset name on command line after options!") ;
     inset = THD_open_dataset(argv[nopt]) ;
     CHECK_OPEN_ERROR(inset,argv[nopt]) ; nopt++ ;
   }
   DSET_UNMSEC(inset) ;

   if( fbot < 0.0f  ) ERROR_exit("fbot value can't be negative!") ;
   if( ftop <= fbot ) ERROR_exit("ftop value %g must be greater than fbot value %g!",ftop,fbot) ;

   ntime = DSET_NVALS(inset) ;
   if( ntime < 9 ) ERROR_exit("Input dataset is too short!") ;

   if( nfft <= 0 ){
     nfft = csfft_nextup_even(ntime) ;
     if( verb ) INFO_message("Data length = %d  FFT length = %d",ntime,nfft) ;
     (void)THD_bandpass_set_nfft(nfft) ;
   } else if( nfft < ntime ){
     ERROR_exit("-nfft %d is less than data length = %d",nfft,ntime) ;
   } else {
     kk = THD_bandpass_set_nfft(nfft) ;
     if( kk != nfft && verb )
       INFO_message("Data length = %d  FFT length = %d",ntime,kk) ;
   }

   if( dt <= 0.0f ){
     dt = DSET_TR(inset) ;
     if( dt <= 0.0f ){
       WARNING_message("Setting dt=1.0 since input dataset lacks a time axis!") ;
       dt = 1.0f ;
     }
   }

   if( !THD_bandpass_OK(ntime,dt,fbot,ftop,1) ) ERROR_exit("Can't continue!") ;

   nx = DSET_NX(inset); ny = DSET_NY(inset); nz = DSET_NZ(inset); nvox = nx*ny*nz;

   /* check mask, or create it */

   if( verb ) INFO_message("Loading input dataset time series" ) ;
   DSET_load(inset) ;

   if( mask != NULL ){
     if( mask_nx != nx || mask_ny != ny || mask_nz != nz )
       ERROR_exit("-mask dataset grid doesn't match input dataset") ;

   } else if( do_automask ){
     mask = THD_automask( inset ) ;
     if( mask == NULL )
       ERROR_message("Can't create -automask from input dataset?") ;
     nmask = THD_countmask( DSET_NVOX(inset) , mask ) ;
     if( verb ) INFO_message("Number of voxels in automask = %d",nmask);
     if( nmask < 1 ) ERROR_exit("Automask is too small to process") ;

   } else {
     mask = (byte *)malloc(sizeof(byte)*nvox) ; nmask = nvox ;
     memset(mask,1,sizeof(byte)*nvox) ;
     if( verb ) INFO_message("No mask ==> processing all %d voxels",nvox);
   }

   /* A simple check of dataset quality [08 Feb 2010] */

   if( !nosat ){
     float val ;
     INFO_message(
      "Checking dataset for initial transients [use '-notrans' to skip this test]") ;
     val = THD_saturation_check(inset,mask,0,0) ; kk = (int)(val+0.54321f) ;
     if( kk > 0 )
       ININFO_message(
        "Looks like there %s %d non-steady-state initial time point%s :-(" ,
        ((kk==1) ? "is" : "are") , kk , ((kk==1) ? " " : "s") ) ;
     else if( val > 0.3210f )  /* don't ask where this threshold comes from! */
       ININFO_message(
        "MAYBE there's an initial positive transient of 1 point, but it's hard to tell\n") ;
     else
       ININFO_message("No widespread initial positive transient detected :-)") ;
   }

   /* check -dsort inputs for match to inset */

   for( kk=0 ; kk < nortset ; kk++ ){
     if( DSET_NX(ortset[kk])    != nx ||
         DSET_NY(ortset[kk])    != ny ||
         DSET_NZ(ortset[kk])    != nz ||
         DSET_NVALS(ortset[kk]) != ntime )
       ERROR_exit("-dsort %s doesn't match input dataset grid" ,
                  DSET_BRIKNAME(ortset[kk]) ) ;
   }

   /* convert input dataset to a vectim, which is more fun */

   mrv = THD_dset_to_vectim( inset , mask , 0 ) ;
   if( mrv == NULL ) ERROR_exit("Can't load time series data!?") ;
   DSET_unload(inset) ;

   /* similarly for the ort vectors */

   if( ortar != NULL ){
     for( kk=0 ; kk < IMARR_COUNT(ortar) ; kk++ ){
       ortim = IMARR_SUBIM(ortar,kk) ;
       if( ortim->nx < ntime )
         ERROR_exit("-ort file %s is shorter than input dataset time series",
                    ortim->name ) ;
       ort  = (float **)realloc( ort , sizeof(float *)*(nort+ortim->ny) ) ;
       for( vv=0 ; vv < ortim->ny ; vv++ )
         ort[nort++] = MRI_FLOAT_PTR(ortim) + ortim->nx * vv ;
     }
   }

   /* check whether processing leaves any DoF remaining  18 Mar 2015 [rickr] */
   {
      int nbprem = THD_bandpass_remain_dim(ntime, dt, fbot, ftop, 1);
      int bpused, nremain;
      int wlimit;               /* warning limit */

      bpused = ntime - nbprem;  /* #dim lost in bandpass step */

      nremain = nbprem - nort;  /* #dim left in output */
      if( nortset == 1 ) nremain--;
      nremain -= (qdet+1);

      if( verb ) INFO_message("%d dimensional data reduced to %d by:\n"
                    "    %d (bandpass), %d (-ort), %d (-dsort), %d (detrend)",
                    ntime, nremain, bpused, nort, nortset?1:0, qdet+1);

      /* possibly warn (if 95% lost) user or fail */
      wlimit = ntime/20;
      if( wlimit < 3 ) wlimit = 3;
      if( nremain < wlimit && nremain > 0 )
         WARNING_message("dimensionality reduced from %d to %d, be careful!",
                         ntime, nremain);
      if( nremain <= 0 ) /* FAILURE */
         ERROR_exit("dimensionality reduced from %d to %d, failing!",
                    ntime, nremain);
   }

   /* all the real work now */

   if( do_despike ){
     int_pair nsp ;
     if( verb ) INFO_message("Testing data time series for spikes") ;
     nsp = THD_vectim_despike9( mrv ) ;
     if( verb ) ININFO_message(" -- Squashed %d spikes from %d voxels",nsp.j,nsp.i) ;
   }

   if( verb ) INFO_message("Bandpassing data time series") ;
   (void)THD_bandpass_vectim( mrv , dt,fbot,ftop , qdet , nort,ort ) ;

   /* OK, maybe a little more work */

   if( nortset == 1 ){
     MRI_vectim *orv ;
     orv = THD_dset_to_vectim( ortset[0] , mask , 0 ) ;
     if( orv == NULL ){
       ERROR_message("Can't load -dsort %s",DSET_BRIKNAME(ortset[0])) ;
     } else {
       float *dp , *mvv , *ovv , ff ;
       if( verb ) INFO_message("Orthogonalizing to bandpassed -dsort") ;
       (void)THD_bandpass_vectim( orv , dt,fbot,ftop , qdet , nort,ort ) ;
       THD_vectim_normalize( orv ) ;
       dp = malloc(sizeof(float)*mrv->nvec) ;
       THD_vectim_vectim_dot( mrv , orv , dp ) ;
       for( vv=0 ; vv < mrv->nvec ; vv++ ){
         ff = dp[vv] ;
         if( ff != 0.0f ){
           mvv = VECTIM_PTR(mrv,vv) ; ovv = VECTIM_PTR(orv,vv) ;
           for( kk=0 ; kk < ntime ; kk++ ) mvv[kk] -= ff*ovv[kk] ;
         }
       }
       VECTIM_destroy(orv) ; free(dp) ;
     }
   }

   if( blur > 0.0f ){
     if( verb )
       INFO_message("Blurring time series data spatially; FWHM=%.2f",blur) ;
     mri_blur3D_vectim( mrv , blur ) ;
   }
   if( pvrad > 0.0f ){
     if( verb )
       INFO_message("Local PV-ing time series data spatially; radius=%.2f",pvrad) ;
     THD_vectim_normalize( mrv ) ;
     THD_vectim_localpv( mrv , pvrad ) ;
   }
   if( do_norm && pvrad <= 0.0f ){
     if( verb ) INFO_message("L2 normalizing time series data") ;
     THD_vectim_normalize( mrv ) ;
   }

   /* create output dataset, populate it, write it, then quit */

   if( verb ) INFO_message("Creating output dataset in memory, then writing it") ;
   outset = EDIT_empty_copy(inset) ;
   /* do not copy scalars    11 Sep 2015 [rickr] */
   EDIT_dset_items( outset , ADN_prefix,prefix ,
                             ADN_brick_fac,NULL ,
                    ADN_none ) ;
   tross_Copy_History( inset , outset ) ;
   tross_Make_History( "3dBandpass" , argc,argv , outset ) ;

   for( vv=0 ; vv < ntime ; vv++ )
     EDIT_substitute_brick( outset , vv , MRI_float , NULL ) ;

#if 1
   THD_vectim_to_dset( mrv , outset ) ;
#else
 AFNI_OMP_START ;
#pragma omp parallel
 { float *far , *var ; int *ivec=mrv->ivec ; int vv,kk ;
#pragma omp for
   for( vv=0 ; vv < ntime ; vv++ ){
     far = DSET_BRICK_ARRAY(outset,vv) ; var = mrv->fvec + vv ;
     for( kk=0 ; kk < nmask ; kk++ ) far[ivec[kk]] = var[kk*ntime] ;
   }
 }
 AFNI_OMP_END ;
#endif
   VECTIM_destroy(mrv) ;
   DSET_write(outset) ; if( verb ) WROTE_DSET(outset) ;

   exit(0) ;
}
Exemplo n.º 5
0
void NWC_help(void)
{
   printf(
    "Usage: 3dNwarpCalc [options] expression\n"
    "------\n"
    " * This program performs calculations on 3D warps defined on a grid.\n"
    "\n"
    " * The fundamental idea is a 'stack' of warps, with operators being\n"
    "    applied to the top element(s) of the stack.\n"
    "   ++ If you don't know what a computer science 'stack' is, see\n"
    "      http://en.wikipedia.org/wiki/Stack_(data_structure)\n"
    "   ++ Also see 1dmatcalc for a similar implementation of a stack\n"
    "      of matrix operations.\n"
    "   ++ In the explanations below, the stack will be denoted as\n"
    "        [ A B C ... ]\n"
    "      where A is the top element, B the next element, etc.\n"
    "      Operations take place using the top one or two elements.\n"
    "\n"
    " * The expression is a single string enclosed in quotes ' or \",\n"
    "    with operators separated by spaces.  See EXAMPLES below.\n"
    "   ++ The expression is the last thing on the command line!\n"
    "   ++ For scripting convenience, you can actually break the\n"
    "      expression into multiple strings (after all the '-' options),\n"
    "      and they will be re-assembled into one big string for\n"
    "      parsing and processing.\n"
    "\n"
    "** Note that to get any output, you will have to use the '&write'\n"
    "    operator at least once.  Otherwise, the program computes stuff\n"
    "    and then just throws it away.  (Fun perhaps, but useless.)\n"
    "\n"
    " * To actually use a 3D warp to transform a dataset, you must run the\n"
    "    program 3dNwarpApply:\n"
    "   ++ Note that the warp used in 3dNwarpApply does not need to be on the\n"
    "      same grid as the dataset being transformed.  You can define a warp\n"
    "      on a high-resolution anatomical grid and apply it to a low-resolution\n"
    "      functional dataset, for example -- 3dNwarpApply will figure it out.\n"
    "   ++ On the other hand, all the warps in 3dNwarpCalc must be defined on\n"
    "      the same spatial grid!\n"
    "   ++ (LATER) You can use the &apply command to transform a 3D dataset\n"
    "      from within 3dNwarpCalc, if you don't need the special capabilities\n"
    "      of 3dNwarpApply.\n"
    "\n"
    " * Operations such as &invert and &sqrt may produce artifacts and be\n"
    "    inaccurate near the edges of the 3D grid, since they might require\n"
    "    extrapolating the warp to the outside of the grid in places, where\n"
    "    there is no information.\n"
    "\n"
    " * For convenience, you can break the expression up into multiple strings\n"
    "    (after all the options), and they will be re-assembled into the single\n"
    "    expression string the controlling C function needs to work.\n"
    "   ++ But note that since the '&' and '()' characters are special to the shell\n"
    "      you have to put the expression string(s) inside single quote ' or double\n"
    "      quote \" pairs, or else ugly things will happen.\n"
    "      (Not as ugly as having a hippopotamus step on your head, but almost.)\n"
    "\n"
    "OPTIONS\n"
    "-------\n"
    " -interp iii   == 'iii' is the interpolation mode:\n"
    "                  ++ Modes allowed are a subset of those in 3dAllineate:\n"
    "                       linear  quintic  wsinc5\n"
    "                  ++ The default interpolation mode is 'quintic'.\n"
    "                  ++ 'linear' is much faster but less accurate.\n"
    "                  ++ 'wsinc5' is much slower but more accurate.\n"
    "\n"
    " -ainterp jjj  == 'jjj' is the interpolation mode for the '&apply' operation.\n"
    "                  ++ Modes allowed here are\n"
    "                       NN linear cubic quintic wsinc5\n"
    "                  ++ If this option isn't given, then the value from '-interp'\n"
    "                     is used (which should be good enough for government work).\n"
    "\n"
    " -verb         == print (to stderr) various fun messages along the road\n"
    "                  ++ A second '-verb' gives you even more fun!\n"
    "\n"
    "BUT WHERE DO WARPS COME FROM, MOMMY?\n"
    "------------------------------------\n"
    " * The program 3dAllineate with the -nwarp_save option will save a\n"
    "    displacement representation of a nonlinear warp to a 3D dataset\n"
    "    with 3 sub-bricks (1 for each of x, y, and z).\n"
    "\n"
    " * The contents of these sub-bricks are the displacments of each voxel in mm.\n"
    "   ++ The identity warp would be all zero, for example.\n"
    "\n"
    " * An input warp dataset can contain extra sub-bricks -- only the first 3\n"
    "    are used.\n"
    "\n"
#if 0
    " * Warp datasets output by this program have a 4th sub-brick, labeled\n"
    "    'hexvol', which contains the volume of each distorted hexahedron in\n"
    "    the grid.  This sub-brick is NOT used in any application of the\n"
    "    warp, such as 3dNwarpApply or further runs of 3dNwarpCalc, but can\n"
    "    help you understand how much distortion is present in the warp.\n"
    "   ++ If there are any negative values in the hexvol sub-brick, this\n"
    "      indicates that something bad happened in the warp calculation.\n"
#else
    " * If you want the volume distortion at each voxel, use the program\n"
    "   3dNwarpFuncs.\n"
#endif
    "\n"
    "OPERATORS\n"
    "---------\n"
    " * In the explanations below, the single character 'x' represents a 3D\n"
    "    coordinate vector, and a capital letter such as 'A' represents a\n"
    "    whole 3D warp function, whose output at a particular location is 'A(x)'.\n"
    " * You can replace the '&' character that starts a command with '%%' or '@',\n"
    "    if that is more convenient for you.\n"
    " * Operator names are not case sensitive: &INVERT is the same as &invert.\n"
    "\n"
    "&readnwarp(FF) == Read a 3D warp from a file and place it on top of the stack.\n"
    "  *OR*             The input file should be a 3D dataset with 3 sub-bricks\n"
    "&readwarp(FF)      (volumes) storing the xyz displacments of each grid point.\n"
    "\n"
    "&identwarp(FF) == Create an identity warp (all displacements 0) on the grid\n"
    "                   of a 3D dataset specified by the filename 'FF'.\n"
    "                  ++ This operation is to be used to create a starting point\n"
    "                     for calculations that otherwise do not involve a warp\n"
    "                     defined on a grid, such a polynomial warps.\n"
    "                  ++ The actual data in 'FF' is ignored by '&identwarp'; only the\n"
    "                     3D grid definition in the header is actually needed.\n"
    "       ----**==>> ++ Either '&identwarp' or '&readnwarp' should be the first\n"
    "                     operation, in order to define the grid for all subsequent\n"
    "                     calculations.\n"
    "\n"
    "&readpoly(FF)  == The input is a text file with one line of numbers\n"
    "                   specifying a warp as a polynomial, as output from\n"
    "                   '3dAllineate -1Dparam_save'.\n"
    "                  ++ The count of values determines the type of warp:\n"
    "                         12 ==> affine (shifts+angles+scales+shears)\n"
    "                         64 ==> cubic (3rd order) polyomial\n"
    "                        172 ==> quintic (5th order) polynomial\n"
    "                        364 ==> heptic (7th order) polynomial\n"
    "                        664 ==> nonic (9th order) polynomial\n"
    "                  ++ Any other count of values on the single input line is\n"
    "                     illegal, unconstitutional, against the laws of God,\n"
    "                     fattening, and will make you get red pimples on your nose.\n"
    "                  ++ The parameters could come, most probably, from using\n"
    "                     3dAllineate with the '-1Dparam_save' and '-nwarp' options.\n"
    "\n"
    "&read4x4(FF)   == Read an affine 4x4 transform matrix directly; the input\n"
    "                   file should contain 12 numbers in the order:\n"
    "                     r11 r12 r13 r14 r21 r22 r23 r24 r31 r32 r33 r34\n"
    "                   which will be organized into the 3D transformation matrix:\n"
    "                      r11   r12   r13   r14\n"
    "                      r21   r22   r23   r24\n"
    "                      r31   r32   r33   r34\n"
    "                      0.0   0.0   0.0   1.0\n"
    "                 ++ This matrix defines the transformation from input spatial\n"
    "                    DICOM coordinates (x,y,z) to output coordinates, in mm.\n"
    "                 ++ One way to get this matrix is via '3dAllineate -1Dmatrix_save'.\n"
    "                 ++ This matrix should have non-zero determinant!\n"
    "\n"
    "&write(FF)     == Write the 3D warp on the top of the stack to a file.\n"
    "                   The output file is always in a 3D nwarp (dataset) format\n"
    "                   -- NEVER a matrix or polynomial.\n"
    "\n"
    "&dup           == Push the duplicate of the top of the stack onto the stack:\n"
    "                   [ A B C ... ] goes to [ A A B C ... ]  after &dup.\n"
    "\n"
    "&swap          == Interchange the top two elements of the stack:\n"
    "                   [ A B C ... ] goes to [ B A C ... ]    after &swap\n"
    "                  ++ You can swap other elements of the stack by using\n"
    "                     indexes in the form '&swap(p,q)' where 'p' and 'q'\n"
    "                     are distinct non-negative integers indicating depth\n"
    "                     into the stack; '&swap' is equivalent to '&swap(0,1)'.\n"
    "\n"
    "&pop           == Remove (and delete) the top element from the stack:\n"
    "                   [ A B C ... ] goes to [ B C ... ]      after &pop\n"
    "\n"
    "&compose       == If the stack is [ A(x) B(x) C(x) ... ], compute the warp\n"
    "  *OR*             B(A(x)) and replace these top 2 elements with the result:\n"
    "&mult              [ A B C ... ] goes to [ B(A(x)) C(x) ... ] after &compose\n"
    "                  ++ If you wanted to compute A(B(x)), then you would use the\n"
    "                     operator combination '&swap &compose'.\n"
    "\n"
    "&invert        == Replace top element of the stack with its inverse:\n"
    "                   the warp J(x) such that A(J(x)) = x.\n"
    "                  ++ Inversion is done via a functional iteration:\n"
    "                       Jnew(x) = Jold( 2*x - A(Jold(x)) )\n"
    "                     which requires 1 warp composition and 1 warp interpolation\n"
    "                     for each step.\n"
    "                  ++ &invert and &invsqrt (and thus &sqrt) are slow operations\n"
    "                     due to the iterative nature of the calculations.\n"
#ifdef USE_OMP
    "                  ++ Multiple CPUS (via OpenMP) are used to help speed up\n"
    "                     these functions.\n"
#else
    "                  ++ On a system with OpenMP enabled, multiple CPUs would\n"
    "                     be used to speed up these functions.  However, this\n"
    "                     binary copy of 3dNwarpCalc has not been compiled with\n"
    "                     OpenMP (alas).\n"
#endif
    "                  ++ The '-verb' option to 3dNwarpCalc will show you the\n"
    "                     progress of the iterations for &invert and &invsqrt.\n"
    "\n"
    "&sqrt          == Replace top element of the stack with its 'square root':\n"
    "                   the warp Q(x) such that Q(Q(x)) = A(x).\n"
    "                  ++ NOTE: not all warps have square roots, so this operation\n"
    "                     is not guaranteed to work.  Be careful out there.\n"
    "                  ++ Nor is the square root of a nonlinear operator guaranteed\n"
    "                     to be unique!\n"
#ifndef USE_SQRTPAIR
    "                  ++ The basic algorithm used computes the inverse of Q(x),\n"
    "                     as in &invsqrt, so an extra warp inversion is required\n"
    "                     at the end here, so this operation is slower than &invsqrt.\n"
#endif
    "\n"
    "&invsqrt       == Replace the top element of the stack with the inverse of\n"
    "                   its square root: the warp R(x) such that A(R(R(x)) = x.\n"
    "                  ++ '&sqrtinv' is a synonym for this operation, since I always\n"
    "                     have trouble remembering which one is correct-imundo-ific.\n"
#ifndef USE_SQRTPAIR
    "                  ++ This operation is based on the functional iteration\n"
    "                       Rnew(x) = Rold( 1.5*x - 0.5*A(Rold(Rold(x))) )\n"
    "                     which is adapted from the Schulz iteration for matrix\n"
    "                     square roots, and requires 2 warp compositions and 1 warp\n"
    "                     interpolation for each step.\n"
#else
    "                  ++ This operation is based on a functional iteration\n"
    "                     adapted from the Denman-Beavers method for computing\n"
    "                     the square root of a matrix:\n"
    "                       initialize Y(x) = A(x) and Z(x) = x; then iterate\n"
    "                         Ynew(x) = 0.5*(Yold(x)+inv(Zold(x)))\n"
    "                         Znew(x) = 0.5*(Zold(x)+inv(Yold(x)))\n"
    "                       which converges to Y=sqrt(A) and Z=invsqrt(A).\n"
    "                  ++ For speed, these square root iterations are always done\n"
    "                     with linear interpolation, no matter what '-interp' is.\n"
#endif
    "\n"
    "&sqrtpair      == Compute both &sqrtinv and &sqrt, and leave both of them\n"
    "                  on the stack -- &sqrt on top, &sqrtinv 'below' it.\n"
    "\n"
    "&sqr           == Replace the top element of the stack with its 'square':\n"
    "                   the warp S(x) = A(A(x)).  Equivalent to '&dup &compose'.\n"
    "                  ++ To compute the fourth power of a warp: '&sqr &sqr'\n"
    "                  ++ To compute the third power of a warp:  '&dup &sqr &compose'\n"
    "                  ++ '&square' is a synonym for this operation.\n"
    "\n"
    "&scale(a)      == Scale the top-of-stack warp displacements by numerical\n"
    "                   factor 'a' in all 3 dimensions.\n"
    "                  ++ NOTE: this might make the warp non-invertible, (e.g., give\n"
    "                     negative results in 'hexvol') for large enough 'a'.\n"
    "                     Proceed at your own risk!\n"
    "                  ++ If a=0, then the result is the identity warp, since\n"
    "                     all the displacements are now 0.\n"
    "                  ++ The case a=-1 is NOT the inverse warp!\n"
    "\n"
    "&sum           == Add the displacements of the two warps on the stack,\n"
    "                   then replace BOTH of them with the result.\n"
    "                  ++ You can do something like '&sum(0.5,0.5)' to average\n"
    "                     the displacements, or '&sum(1,-1)' to difference them.\n"
    "                     In this case, the first value scales the displacements\n"
    "                     of the stack's top warp, and the second value scales the\n"
    "                     displacements of the stack's second warp.\n"
    "                  ++ NOTE: you can produce a non-invertible warp this way!\n"
    "\n"
    "&apply(DD,PP)  == Apply the 3D warp at the top of the stack to a dataset\n"
    "                   whose name is given by the 'DD' argument, to produce\n"
    "                   a dataset whose prefix is given by the 'PP' argument.\n"
    "                 ++ This operation does not affect the stack of warps.\n"
    "                 ++ &apply is provided to make your life simpler and happier :-)\n"
    "                 ++ &apply is like 3dNwarpApply with the output dataset PP\n"
    "                    always being on the same grid as the input dataset DD.\n"
    "                 ++ The grid of dataset DD does NOT have to be the same as the\n"
    "                    grid defining the warp defined on the stack.  If needed,\n"
    "                    the warp will be interpolated to be used with DD.\n"
    "                 ++ Program 3dNwarpApply provides more options to control\n"
    "                    the way that a warp is applied to a dataset; for example,\n"
    "                    to control the output grid spacing.\n"
    "\n"
    "EXAMPLES\n"
    "--------\n"
    "** Read a warp from a dataset, invert it, save the inverse.\n"
    "\n"
    " 3dNwarpCalc '&readnwarp(Warp+tlrc.HEAD) &invert &write(WarpInv)'\n"
    "\n"
    "** Do the same, but also compute the composition of the warp with the inverse,\n"
    "   and save that -- ideally, the output warp displacements would be identically\n"
    "   zero (i.e., the identity warp), and the 'hexvol' entries would be constant\n"
    "   and equal to the voxel volume.\n"
    "\n"
    " 3dNwarpCalc -verb '&readnwarp(Warp+tlrc.HEAD) &dup &invert' \\\n"
    "             '&write(WarpInv) &compose &write(WarpOut)'\n"
    "\n"
    "** Read in a warp, compute its inverse square root, then square that, and compose\n"
    "   the result with the original warp -- the result should be the identity warp\n"
    "   (i.e., all zero displacments) -- except for numerical errors, of course.\n"
    "\n"
    " 3dNwarpCalc '&readnwarp(Warp+tlrc.HEAD) &dup &invsqrt &sqr &compose &write(WarpOut)'\n"
   ) ;

   printf(
    "\n"
    "AUTHOR -- RWCox -- August 2011\n"
   ) ;

   PRINT_AFNI_OMP_USAGE("3dNwarpCalc",NULL) ; PRINT_COMPILE_DATE ;
   exit(0) ;
}
Exemplo n.º 6
0
int main( int argc , char *argv[] )
{
   THD_3dim_dataset *xset=NULL , *cset ;
   int nopt=1, datum=MRI_float, nvals, ii;
   MRI_IMAGE *ysim=NULL ;
   char *prefix = "Tcorr1D", *smethod="pearson";
   char *xnam=NULL , *ynam=NULL ;
   byte *mask=NULL ; int mask_nx,mask_ny,mask_nz , nmask=0 ;
   int do_atanh = 0 ; /* 12 Jan 2018 */

   /*----*/

   AFNI_SETUP_OMP(0) ;  /* 24 Jun 2013 */

   if( argc < 2 || strcmp(argv[1],"-help") == 0 ){
      printf("Usage: 3dTcorr1D [options] xset y1D\n"
             "Computes the correlation coefficient between each voxel time series\n"
             "in the input 3D+time dataset 'xset' and each column in the 1D time\n"
             "series file 'y1D', and stores the output values in a new dataset.\n"
             "\n"
             "OPTIONS:\n"
             "  -pearson  = Correlation is the normal Pearson (product moment)\n"
             "                correlation coefficient [this is the default method].\n"
             "  -spearman = Correlation is the Spearman (rank) correlation\n"
             "                coefficient.\n"
             "  -quadrant = Correlation is the quadrant correlation coefficient.\n"
             "  -ktaub    = Correlation is Kendall's tau_b coefficient.\n"
             "              ++ For 'continuous' or finely-discretized data, tau_b and\n"
             "                 rank correlation are nearly equivalent (but not equal).\n"
             "  -dot      = Doesn't actually compute a correlation coefficient; just\n"
             "                calculates the dot product between the y1D vector(s)\n"
             "                and the dataset time series.\n"
             "\n"
             "  -Fisher   = Apply the 'Fisher' (inverse hyperbolic tangent) transformation\n"
             "                to the results.\n"
             "              ++ It does not make sense to use this with '-ktaub', but if\n"
             "                 you want to do it, the program will not stop you.\n"
             "              ++ Cannot be used with '-dot'!\n"
             "\n"
             "  -prefix p = Save output into dataset with prefix 'p'\n"
             "               [default prefix is 'Tcorr1D'].\n"
             "\n"
             "  -mask mmm = Only process voxels from 'xset' that are nonzero\n"
             "                in the 3D mask dataset 'mmm'.\n"
             "              ++ Other voxels in the output will be set to zero.\n"
             "\n"
             "  -float    = Save results in float format [the default format].\n"
             "  -short    = Save results in scaled short format [to save disk space].\n"
             "              ++ Cannot be used with '-dot'!\n"
             "\n"
             "NOTES:\n"
             "* The output dataset is functional bucket type, with one sub-brick\n"
             "   per column of the input y1D file.\n"
             "* No detrending, blurring, or other pre-processing options are available;\n"
             "   if you want these things, see 3dDetrend or 3dTproject or 3dcalc.\n"
             "   [In other words, this program presumes you know what you are doing!]\n"
             "* Also see 3dTcorrelate to do voxel-by-voxel correlation of TWO\n"
             "   3D+time datasets' time series, with similar options.\n"
             "* You can extract the time series from a single voxel with given\n"
             "   spatial indexes using 3dmaskave, and then run it with 3dTcorr1D:\n"
             "    3dmaskave -quiet -ibox 40 30 20 epi_r1+orig > r1_40_30_20.1D\n"
             "    3dTcorr1D -pearson -Fisher -prefix c_40_30_20 epi_r1+orig r1_40_30_20.1D\n"
             "* http://en.wikipedia.org/wiki/Correlation\n"
             "* http://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient\n"
             "* http://en.wikipedia.org/wiki/Spearman%%27s_rank_correlation_coefficient\n"
             "* http://en.wikipedia.org/wiki/Kendall_tau_rank_correlation_coefficient\n"
             "\n"
             "-- RWCox - Apr 2010\n"
             "         - Jun 2010: Multiple y1D columns; OpenMP; -short; -mask.\n"
            ) ;
      PRINT_AFNI_OMP_USAGE("3dTcorr1D",NULL) ;
      PRINT_COMPILE_DATE ; exit(0) ;
   }

   mainENTRY("3dTcorr1D main"); machdep(); AFNI_logger("3dTcorr1D",argc,argv);
   PRINT_VERSION("3dTcorr1D") ; THD_check_AFNI_version("3dTcorr1D") ;

   /*-- option processing --*/

   while( nopt < argc && argv[nopt][0] == '-' ){

     if( strcmp(argv[nopt],"-mask") == 0 ){  /* 28 Jun 2010 */
       THD_3dim_dataset *mset ;
       if( ++nopt >= argc ) ERROR_exit("Need argument after '-mask'") ;
       if( mask != NULL )   ERROR_exit("Can't have two mask inputs") ;
       mset = THD_open_dataset( argv[nopt] ) ;
       CHECK_OPEN_ERROR(mset,argv[nopt]) ;
       DSET_load(mset) ; CHECK_LOAD_ERROR(mset) ;
       mask_nx = DSET_NX(mset); mask_ny = DSET_NY(mset); mask_nz = DSET_NZ(mset);
       mask = THD_makemask( mset , 0 , 0.5f, 0.0f ) ; DSET_delete(mset) ;
       if( mask == NULL ) ERROR_exit("Can't make mask from dataset '%s'",argv[nopt]) ;
       nmask = THD_countmask( mask_nx*mask_ny*mask_nz , mask ) ;
       INFO_message("Number of voxels in mask = %d",nmask) ;
       if( nmask < 2 ) ERROR_exit("Mask is too small to process") ;
       nopt++ ; continue ;
     }

      if( strcasecmp(argv[nopt],"-float") == 0 ){  /* 27 Jun 2010 */
        datum = MRI_float ; nopt++ ; continue ;
      }
      if( strcasecmp(argv[nopt],"-short") == 0 ){
        datum = MRI_short ; nopt++ ; continue ;
      }

      if( strcasecmp(argv[nopt],"-pearson") == 0 ){
        smethod = "pearson" ; nopt++ ; continue ;
      }

      if( strcasecmp(argv[nopt],"-dot") == 0 ){
        smethod = "dot" ; nopt++ ; continue ;
      }


      if( strcasecmp(argv[nopt],"-spearman") == 0 || strcasecmp(argv[nopt],"-rank") == 0 ){
        smethod = "spearman" ; nopt++ ; continue ;
      }

      if( strcasecmp(argv[nopt],"-quadrant") == 0 ){
        smethod = "quadrant" ; nopt++ ; continue ;
      }

      if( strcasecmp(argv[nopt],"-ktaub") == 0 || strcasecmp(argv[nopt],"-taub") == 0 ){
        smethod = "ktaub" ; nopt++ ; continue ;
      }

      if( strcasecmp(argv[nopt],"-fisher") == 0 ){ /* 12 Jan 2018 */
        do_atanh = 1 ; nopt++ ; continue ;
      }

      if( strcmp(argv[nopt],"-prefix") == 0 ){
        prefix = argv[++nopt] ;
        if( !THD_filename_ok(prefix) ) ERROR_exit("Illegal value after -prefix!") ;
        nopt++ ; continue ;
      }

      ERROR_exit("Unknown option: %s",argv[nopt]) ;
   }

   /*------------ open datasets, check for legality ------------*/

   if( nopt+1 >= argc )
     ERROR_exit("Need 2 non-option arguments on command line!?") ;

   /* despite what the help says, if the 1D file is first, that's OK */

   if( STRING_HAS_SUFFIX(argv[nopt],"1D") ){
     ININFO_message("reading 1D file %s",argv[nopt]) ;
     ysim = mri_read_1D( argv[nopt] ) ; ynam = argv[nopt] ;
     if( ysim == NULL ) ERROR_exit("Can't read 1D file %s",argv[nopt]) ;
   } else {
     ININFO_message("reading dataset file %s",argv[nopt]) ;
     xset = THD_open_dataset( argv[nopt] ) ; xnam = argv[nopt] ;
     if( xset == NULL ) ERROR_exit("Can't open dataset %s",argv[nopt]) ;
   }

   /* read whatever type of file (3D or 1D) we don't already have */

   nopt++ ;
   if( xset != NULL ){
     ININFO_message("reading 1D file %s",argv[nopt]) ;
     ysim = mri_read_1D( argv[nopt] ) ; ynam = argv[nopt] ;
     if( ysim == NULL ) ERROR_exit("Can't read 1D file %s",argv[nopt]) ;
   } else {
     ININFO_message("reading dataset file %s",argv[nopt]) ;
     xset = THD_open_dataset( argv[nopt] ) ; xnam = argv[nopt] ;
     if( xset == NULL ) ERROR_exit("Can't open dataset %s",argv[nopt]) ;
   }

   nvals = DSET_NVALS(xset) ;  /* number of time points */
   ii    = (strcmp(smethod,"dot")==0) ? 2 : 3 ;
   if( nvals < ii )
     ERROR_exit("Input dataset %s length is less than ii?!",xnam,ii) ;

   if( ysim->nx < nvals )
     ERROR_exit("1D file %s has %d time points, but dataset has %d values",
                ynam,ysim->nx,nvals) ;
   else if( ysim->nx > nvals )
     WARNING_message("1D file %s has %d time points, dataset has %d",
                     ynam,ysim->nx,nvals) ;

   if( mri_allzero(ysim) )
     ERROR_exit("1D file %s is all zero!",ynam) ;

   if( ysim->ny > 1 )
     INFO_message("1D file %s has %d columns: correlating with ALL of them!",
                   ynam,ysim->ny) ;

   if( strcmp(smethod,"dot") == 0 && do_atanh ){
     WARNING_message("'-dot' turns off '-Fisher'") ; do_atanh = 0 ;
   }
   if( strcmp(smethod,"dot") == 0 && datum == MRI_short ){
     WARNING_message("'-dot' turns off '-short'") ; datum = MRI_float ;
   }

   cset = THD_Tcorr1D( xset, mask, nmask, ysim, smethod,
                       prefix, (datum==MRI_short) , do_atanh );
   tross_Make_History( "3dTcorr1D" , argc,argv , cset ) ;

   DSET_unload(xset) ;  /* no longer needful */

   /* finito */

   DSET_write(cset) ;
   INFO_message("Wrote dataset: %s\n",DSET_BRIKNAME(cset)) ;
   exit(0) ;
}
Exemplo n.º 7
0
int main( int argc , char *argv[] )
{
   int iarg=1 , ii,nvox , nvals ;
   THD_3dim_dataset *inset=NULL, *outset=NULL , *mset=NULL ;
   char *prefix="./blurinmask" ;
   float fwhm_goal=0.0f ; int fwhm_2D=0 ;
   byte *mask=NULL ; int mask_nx=0,mask_ny=0,mask_nz=0 , automask=0 , nmask=0 ;
   float dx,dy,dz=0.0f , *bar , val ;
   int floatize=0 ;    /* 18 May 2009 */

   MRI_IMAGE *immask=NULL ;    /* 07 Oct 2009 */
   short      *mmask=NULL ;
   short      *unval_mmask=NULL ; int nuniq_mmask=0 ;
   int do_preserve=0 , use_qsar ;         /* 19 Oct 2009 */

   THD_3dim_dataset *fwhmset=NULL ;
   MRI_IMAGE *fxim=NULL, *fyim=NULL, *fzim=NULL ; /* 13 Jun 2016 */
   int niter_fxyz=0 ; float dmax=0.0f , dmin=0.0f ;

   /*------- help the pitifully ignorant luser? -------*/

   AFNI_SETUP_OMP(0) ;  /* 24 Jun 2013 */

   if( argc < 2 || strcmp(argv[1],"-help") == 0 ){
     printf(
      "Usage: ~1~\n"
      "3dBlurInMask [options]\n"
      "Blurs a dataset spatially inside a mask. That's all. Experimental.\n"
      "\n"
      "OPTIONS ~1~\n"
      "-------\n"
      " -input  ddd = This required 'option' specifies the dataset\n"
      "               that will be smoothed and output.\n"
      " -FWHM   f   = Add 'f' amount of smoothness to the dataset (in mm).\n"
      "              **N.B.: This is also a required 'option'.\n"
      " -FWHMdset d = Read in dataset 'd' and add the amount of smoothness\n"
      "               given at each voxel -- spatially variable blurring.\n"
      "              ** EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL **\n"
      " -mask   mmm = Mask dataset, if desired.  Blurring will\n"
      "               occur only within the mask.  Voxels NOT in\n"
      "               the mask will be set to zero in the output.\n"
      " -Mmask  mmm = Multi-mask dataset -- each distinct nonzero\n"
      "               value in dataset 'mmm' will be treated as\n"
      "               a separate mask for blurring purposes.\n"
      "              **N.B.: 'mmm' must be byte- or short-valued!\n"
      " -automask   = Create an automask from the input dataset.\n"
      "              **N.B.: only 1 masking option can be used!\n"
      " -preserve   = Normally, voxels not in the mask will be\n"
      "               set to zero in the output.  If you want the\n"
      "               original values in the dataset to be preserved\n"
      "               in the output, use this option.\n"
      " -prefix ppp = Prefix for output dataset will be 'ppp'.\n"
      "              **N.B.: Output dataset is always in float format.\n"
      " -quiet      = Don't be verbose with the progress reports.\n"
      " -float      = Save dataset as floats, no matter what the\n"
      "               input data type is.\n"
      "              **N.B.: If the input dataset is unscaled shorts, then\n"
      "                      the default is to save the output in short\n"
      "                      format as well.  In EVERY other case, the\n"
      "                      program saves the output as floats. Thus,\n"
      "                      the ONLY purpose of the '-float' option is to\n"
      "                      force an all-shorts input dataset to be saved\n"
      "                      as all-floats after blurring.\n"
      "\n"
      "NOTES ~1~\n"
      "-----\n"
      " * If you don't provide a mask, then all voxels will be included\n"
      "     in the blurring.  (But then why are you using this program?)\n"
      " * Note that voxels inside the mask that are not contiguous with\n"
      "     any other voxels inside the mask will not be modified at all!\n"
      " * Works iteratively, similarly to 3dBlurToFWHM, but without\n"
      "     the extensive overhead of monitoring the smoothness.\n"
      " * But this program will be faster than 3dBlurToFWHM, and probably\n"
      "     slower than 3dmerge.\n"
      " * Since the blurring is done iteratively, rather than all-at-once as\n"
      "     in 3dmerge, the results will be slightly different than 3dmerge's,\n"
      "     even if no mask is used here (3dmerge, of course, doesn't take a mask).\n"
      " * If the original FWHM of the dataset was 'S' and you input a value\n"
      "     'F' with the '-FWHM' option, then the output dataset's smoothness\n"
      "     will be about sqrt(S*S+F*F).  The number of iterations will be\n"
      "     about (F*F/d*d) where d=grid spacing; this means that a large value\n"
      "     of F might take a lot of CPU time!\n"
      " * The spatial smoothness of a 3D+time dataset can be estimated with a\n"
      "     command similar to the following:\n"
      "          3dFWHMx -detrend -mask mmm+orig -input ddd+orig\n"
     ) ;
     printf(
      " * The minimum number of voxels in the mask is %d\n",MASK_MIN) ;
     printf(
      " * Isolated voxels will be removed from the mask!\n") ;

     PRINT_AFNI_OMP_USAGE("3dBlurInMask",NULL) ;
     PRINT_COMPILE_DATE ; exit(0) ;
   }

   /*---- official startup ---*/

   PRINT_VERSION("3dBlurInMask"); mainENTRY("3dBlurInMask main"); machdep();
   AFNI_logger("3dBlurInMask",argc,argv); AUTHOR("RW Cox") ;

   /*---- loop over options ----*/

   while( iarg < argc && argv[iarg][0] == '-' ){

     if( strncmp(argv[iarg],"-preserve",5) == 0 ){  /* 19 Oct 2009 */
       do_preserve = 1 ; iarg++ ; continue ;
     }

     if( strncmp(argv[iarg],"-qui",4) == 0 ){
       verb = 0 ; iarg++ ; continue ;
     }
     if( strncmp(argv[iarg],"-ver",4) == 0 ){
       verb++ ; iarg++ ; continue ;
     }

     if( strcmp(argv[iarg],"-input") == 0 || strcmp(argv[iarg],"-dset") == 0 ){
       if( inset != NULL  ) ERROR_exit("Can't have two -input options") ;
       if( ++iarg >= argc ) ERROR_exit("Need argument after '-input'") ;
       inset = THD_open_dataset( argv[iarg] );
       CHECK_OPEN_ERROR(inset,argv[iarg]) ;
       iarg++ ; continue ;
     }

     if( strcmp(argv[iarg],"-prefix") == 0 ){
       if( ++iarg >= argc ) ERROR_exit("Need argument after '-prefix'") ;
       prefix = strdup(argv[iarg]) ;
       if( !THD_filename_ok(prefix) ) ERROR_exit("Bad name after '-prefix'") ;
       iarg++ ; continue ;
     }

     if( strcasecmp(argv[iarg],"-Mmask") == 0 ){   /* 07 Oct 2009 */
       if( ++iarg >= argc ) ERROR_exit("Need argument after '-Mmask'") ;
       if( mmask != NULL || mask != NULL || automask ) ERROR_exit("Can't have two mask inputs") ;
       mset = THD_open_dataset( argv[iarg] ) ;
       CHECK_OPEN_ERROR(mset,argv[iarg]) ;
       DSET_load(mset) ; CHECK_LOAD_ERROR(mset) ;
       mask_nx = DSET_NX(mset); mask_ny = DSET_NY(mset); mask_nz = DSET_NZ(mset);
#if 0
       if( !MRI_IS_INT_TYPE(DSET_BRICK_TYPE(mset,0)) )
         ERROR_exit("-Mmask dataset is not integer type!") ;
#endif
       immask = mri_to_short( 1.0 , DSET_BRICK(mset,0) ) ;
       mmask  = MRI_SHORT_PTR(immask) ;
       unval_mmask = UniqueShort( mmask, mask_nx*mask_ny*mask_nz, &nuniq_mmask, 0 ) ;
       if( unval_mmask == NULL || nuniq_mmask == 0 )
         ERROR_exit("-Mmask dataset cannot be processed!?") ;
       if( nuniq_mmask == 1 && unval_mmask[0] == 0 )
         ERROR_exit("-Mmask dataset is all zeros!?") ;
       if( verb ){
         int qq , ww ;
         for( ii=qq=0 ; ii < nuniq_mmask ; ii++ ) if( unval_mmask[ii] != 0 ) qq++ ;
         for( ii=ww=0 ; ii < immask->nvox ; ii++ ) if( mmask[ii] != 0 ) ww++ ;
         INFO_message("%d unique nonzero values in -Mmask; %d nonzero voxels",qq,ww) ;
       }
       iarg++ ; continue ;
     }

     if( strcmp(argv[iarg],"-mask") == 0 ){
       if( ++iarg >= argc ) ERROR_exit("Need argument after '-mask'") ;
       if( mmask != NULL || mask != NULL || automask ) ERROR_exit("Can't have two mask inputs") ;
       mset = THD_open_dataset( argv[iarg] ) ;
       CHECK_OPEN_ERROR(mset,argv[iarg]) ;
       DSET_load(mset) ; CHECK_LOAD_ERROR(mset) ;
       mask_nx = DSET_NX(mset); mask_ny = DSET_NY(mset); mask_nz = DSET_NZ(mset);
       mask = THD_makemask( mset , 0 , 0.5f, 0.0f ) ; DSET_unload(mset) ;
       if( mask == NULL ) ERROR_exit("Can't make mask from dataset '%s'",argv[iarg]) ;
       ii = THD_mask_remove_isolas( mask_nx,mask_ny,mask_nz , mask ) ;
       if( verb && ii > 0 ) INFO_message("Removed %d isola%s from mask dataset",ii,(ii==1)?"\0":"s") ;
       nmask = THD_countmask( mask_nx*mask_ny*mask_nz , mask ) ;
       if( verb ) INFO_message("Number of voxels in mask = %d",nmask) ;
       if( nmask < MASK_MIN ) ERROR_exit("Mask is too small to process") ;
       iarg++ ; continue ;
     }

     if( strcmp(argv[iarg],"-automask") == 0 ){
       if( mmask != NULL || mask != NULL ) ERROR_exit("Can't have 2 mask inputs") ;
       automask = 1 ; iarg++ ; continue ;
     }

     if( strcasecmp(argv[iarg],"-FWHM") == 0 || strcasecmp(argv[iarg],"-FHWM") == 0 ){
       if( ++iarg >= argc ) ERROR_exit("Need argument after '%s'",argv[iarg-1]);
       val = (float)strtod(argv[iarg],NULL) ;
       if( val <= 0.0f ) ERROR_exit("Illegal value after '%s': '%s'",
                                    argv[iarg-1],argv[iarg]) ;
       fwhm_goal = val ; fwhm_2D = 0 ;
       iarg++ ; continue ;
     }

     if( strcasecmp(argv[iarg],"-FWHMdset") == 0 ){
       if( ++iarg >= argc ) ERROR_exit("Need argument after '%s'",argv[iarg-1]);
       if( fwhmset != NULL ) ERROR_exit("You can't use option '-FWHMdset' twice :(") ;
       fwhmset = THD_open_dataset( argv[iarg] ) ;
       CHECK_OPEN_ERROR(fwhmset,argv[iarg]) ;
       do_preserve = 1 ; iarg++ ; continue ;
     }

     if( strncmp(argv[iarg],"-float",6) == 0 ){    /* 18 May 2009 */
       floatize = 1 ; iarg++ ; continue ;
     }

#if 0
     if( strcmp(argv[iarg],"-FWHMxy") == 0 || strcmp(argv[iarg],"-FHWMxy") == 0 ){
       if( ++iarg >= argc ) ERROR_exit("Need argument after '%s'",argv[iarg-1]);
       val = (float)strtod(argv[iarg],NULL) ;
       if( val <= 0.0f ) ERROR_exit("Illegal value after '%s': '%s'",
                                    argv[iarg-1],argv[iarg]) ;
       fwhm_goal = val ; fwhm_2D = 1 ;
       iarg++ ; continue ;
     }
#endif

     ERROR_exit("Uknown option '%s'",argv[iarg]) ;

   } /*--- end of loop over options ---*/

   /*----- check for stupid inputs, load datasets, et cetera -----*/

   if( fwhmset == NULL && fwhm_goal == 0.0f )
     ERROR_exit("No -FWHM option given! What do you want?") ;

   if( fwhmset != NULL && fwhm_goal > 0.0f ){
     WARNING_message("-FWHMdset option replaces -FWHM value") ;
     fwhm_goal = 0.0f ;
   }

   if( fwhmset != NULL && mmask != NULL )
     ERROR_exit("Sorry: -FWHMdset and -Mmask don't work together (yet)") ;

   if( inset == NULL ){
     if( iarg >= argc ) ERROR_exit("No input dataset on command line?") ;
     inset = THD_open_dataset( argv[iarg] ) ;
     CHECK_OPEN_ERROR(inset,argv[iarg]) ;
   }

   nvox = DSET_NVOX(inset)     ;
   dx   = fabs(DSET_DX(inset)) ; if( dx == 0.0f ) dx = 1.0f ;
   dy   = fabs(DSET_DY(inset)) ; if( dy == 0.0f ) dy = 1.0f ;
   dz   = fabs(DSET_DZ(inset)) ; if( dz == 0.0f ) dz = 1.0f ;

   dmax = MAX(dx,dy) ; if( dmax < dz ) dmax = dz ;  /* 13 Jun 2016 */
   dmin = MIN(dx,dy) ; if( dmin > dz ) dmin = dz ;

   if( !floatize ){    /* 18 May 2009 */
     if( !THD_datum_constant(inset->dblk)     ||
         THD_need_brick_factor(inset)         ||
         DSET_BRICK_TYPE(inset,0) != MRI_short  ){
       if( verb ) INFO_message("forcing output to be stored in float format") ;
       floatize = 1 ;
     } else {
       if( verb ) INFO_message("output dataset will be stored as shorts") ;
     }
   } else {
       if( verb ) INFO_message("output dataset will be stored as floats") ;
   }

#if 0
   if( DSET_NZ(inset) == 1 && !fwhm_2D ){
     WARNING_message("Dataset is 2D ==> switching from -FWHM to -FWHMxy") ;
     fwhm_2D = 1 ;
   }
#endif

   /*--- deal with mask or automask ---*/

   if( mask != NULL ){
     if( mask_nx != DSET_NX(inset) ||
         mask_ny != DSET_NY(inset) ||
         mask_nz != DSET_NZ(inset)   )
       ERROR_exit("-mask dataset grid doesn't match input dataset") ;

   } else if( automask ){
     mask = THD_automask( inset ) ;
     if( mask == NULL )
       ERROR_message("Can't create -automask from input dataset?") ;
     nmask = THD_countmask( DSET_NVOX(inset) , mask ) ;
     if( verb ) INFO_message("Number of voxels in automask = %d",nmask);
     if( nmask < MASK_MIN ) ERROR_exit("Automask is too small to process") ;

   } else if( mmask != NULL ){
     if( mask_nx != DSET_NX(inset) ||
         mask_ny != DSET_NY(inset) ||
         mask_nz != DSET_NZ(inset)   )
       ERROR_exit("-Mmask dataset grid doesn't match input dataset") ;

   } else {
     mask = (byte *)malloc(sizeof(byte)*nvox) ; nmask = nvox ;
     memset(mask,1,sizeof(byte)*nvox) ;
     if( verb ) INFO_message("No mask ==> processing all %d voxels",nvox);
   }

   /*--- process FWHMdset [13 Jun 2016] ---*/

   if( fwhmset != NULL ){
     float *fxar,*fyar,*fzar , *fwar ; MRI_IMAGE *fwim ;
     float fwmax=0.0f , fsx,fsy,fsz ; int ii, nfpos=0 ;

     if( DSET_NX(inset) != DSET_NX(fwhmset) ||
         DSET_NY(inset) != DSET_NY(fwhmset) ||
         DSET_NZ(inset) != DSET_NZ(fwhmset)   )
       ERROR_exit("grid dimensions for FWHMdset and input dataset do not match :(") ;

STATUS("get fwim") ;
     DSET_load(fwhmset) ;
     fwim = mri_scale_to_float(DSET_BRICK_FACTOR(fwhmset,0),DSET_BRICK(fwhmset,0));
     fwar = MRI_FLOAT_PTR(fwim);
     DSET_unload(fwhmset) ;
STATUS("process fwar") ;
     for( ii=0 ; ii < nvox ; ii++ ){
       if( mask[ii] && fwar[ii] > 0.0f ){
         nfpos++ ;
         if( fwar[ii] > fwmax ) fwmax = fwar[ii] ;
       } else {
         fwar[ii] = 0.0f ; mask[ii] = 0 ;
       }
     }
     if( nfpos < 100 )
       ERROR_exit("Cannot proceed: too few (%d) voxels are positive in -FWHMdset!",nfpos) ;

     niter_fxyz = (int)rintf(2.0f*fwmax*fwmax*FFAC/(0.05f*dmin*dmin)) + 1 ;

     if( verb ) INFO_message("-FWHMdset: niter=%d  npos=%d",niter_fxyz,nfpos) ;

STATUS("create fxim etc.") ;

     fxim = mri_new_conforming(fwim,MRI_float); fxar = MRI_FLOAT_PTR(fxim);
     fyim = mri_new_conforming(fwim,MRI_float); fyar = MRI_FLOAT_PTR(fyim);
     fzim = mri_new_conforming(fwim,MRI_float); fzar = MRI_FLOAT_PTR(fzim);

     fsx = FFAC/(dx*dx*niter_fxyz) ;
     fsy = FFAC/(dy*dy*niter_fxyz) ;
     fsz = FFAC/(dz*dz*niter_fxyz) ;
/** INFO_message("fsx=%g fsy=%g fsz=%g",fsx,fsy,fsz) ; **/

     for( ii=0 ; ii < nvox ; ii++ ){
       if( fwar[ii] > 0.0f ){
         fxar[ii] = fwar[ii]*fwar[ii] * fsx ;
         fyar[ii] = fwar[ii]*fwar[ii] * fsy ;
         fzar[ii] = fwar[ii]*fwar[ii] * fsz ;
       } else {
         fxar[ii] = fyar[ii] = fzar[ii] = 0.0f ;
       }
     }

STATUS("free(fwim)") ;
     mri_free(fwim) ;
   }

   /*--- process input dataset ---*/

STATUS("load input") ;

   DSET_load(inset) ; CHECK_LOAD_ERROR(inset) ;

   outset = EDIT_empty_copy( inset ) ;   /* moved here 04 Jun 2007 */
   EDIT_dset_items( outset , ADN_prefix , prefix , ADN_none ) ;
   EDIT_dset_items( outset , ADN_brick_fac , NULL , ADN_none ) ;  /* 11 Sep 2007 */
   tross_Copy_History( inset , outset ) ;
   tross_Make_History( "3dBlurInMask" , argc,argv , outset ) ;

   nvals = DSET_NVALS(inset) ;

   use_qsar = (do_preserve || mmask != NULL) ; /* 19 Oct 20090 */

 AFNI_OMP_START ;
#pragma omp parallel if( nvals > 1 )
 {
   MRI_IMAGE *dsim ; int ids,qit ; byte *qmask=NULL ; register int vv ;
   MRI_IMAGE *qim=NULL, *qsim=NULL; float *qar=NULL, *dsar, *qsar=NULL;
#pragma omp critical (MALLOC)
   { if( use_qsar ){
       qsim  = mri_new_conforming(DSET_BRICK(inset,0),MRI_float); qsar = MRI_FLOAT_PTR(qsim);
     }
     if( mmask != NULL ){
       qmask = (byte *)malloc(sizeof(byte)*nvox) ;
       qim   = mri_new_conforming(immask,MRI_float); qar  = MRI_FLOAT_PTR(qim);
       qim->dx = dx ; qim->dy = dy ; qim->dz = dz ;
     }
   }
#pragma omp for
   for( ids=0 ; ids < nvals ; ids++ ){
#pragma omp critical (MALLOC)
     { dsim = mri_scale_to_float(DSET_BRICK_FACTOR(inset,ids),DSET_BRICK(inset,ids));
       DSET_unload_one(inset,ids) ;
     }
     dsim->dx = dx ; dsim->dy = dy ; dsim->dz = dz ; dsar = MRI_FLOAT_PTR(dsim) ;

     /* if needed, initialize qsar with data to be preserved in output */

     if( do_preserve ){
       for( vv=0 ; vv < nvox ; vv++ ) qsar[vv] = dsar[vv] ;
     } else if( mmask != NULL ){
       for( vv=0 ; vv < nvox ; vv++ ) qsar[vv] = 0.0f ;
     }

     if( fwhmset != NULL ){       /* 13 Jun 2016: spatially variable blurring */

       for( qit=0 ; qit < niter_fxyz ; qit++ ){
         mri_blur3D_variable( dsim , mask , fxim,fyim,fzim ) ;
       }
       if( do_preserve ){
         for( vv=0 ; vv < nvox ; vv++ ) if( mask[vv] ) qsar[vv] = dsar[vv] ;
       }

     } else if( mmask != NULL ){         /* 07 Oct 2009: multiple masks */
       int qq ; register short uval ;
       for( qq=0 ; qq < nuniq_mmask ; qq++ ){
         uval = unval_mmask[qq] ; if( uval == 0 ) continue ;
         for( vv=0 ; vv < nvox ; vv++ ) qmask[vv] = (mmask[vv]==uval) ; /* make mask */
         (void)THD_mask_remove_isolas( mask_nx,mask_ny,mask_nz , qmask ) ;
         nmask = THD_countmask( nvox , qmask ) ;
         if( verb && ids==0 ) ININFO_message("voxels in Mmask[%d] = %d",uval,nmask) ;
         if( nmask >= MASK_MIN ){
           /* copy data from dataset to qar */
           for( vv=0 ; vv < nvox ; vv++ ) if( qmask[vv] ) qar[vv] = dsar[vv] ;
           /* blur qar (output will be zero where qmask==0) */
           mri_blur3D_addfwhm( qim , qmask , fwhm_goal ) ;  /** the real work **/
           /* copy results back to qsar */
           for( vv=0 ; vv < nvox ; vv++ ) if( qmask[vv] ) qsar[vv] = qar[vv] ;
         }
       }

     } else {                      /* the olden way: 1 mask */

       mri_blur3D_addfwhm( dsim , mask , fwhm_goal ) ;  /** all the work **/

       /* dsim will be zero where mask==0;
          if we want to preserve the input values, copy dsar into qsar now
          at all mask!=0 voxels, since qsar contains the original data values */

       if( do_preserve ){
         for( vv=0 ; vv < nvox ; vv++ ) if( mask[vv] ) qsar[vv] = dsar[vv] ;
       }

     }

     /* if necessary, copy combined results in qsar to dsar for output */

     if( use_qsar ){
       for( vv=0 ; vv < nvox ; vv++ ) dsar[vv] = qsar[vv] ;
     }

     if( floatize ){
       EDIT_substitute_brick( outset , ids , MRI_float , dsar ) ;
     } else {
#pragma omp critical (MALLOC)
       { EDIT_substscale_brick( outset , ids , MRI_float , dsar ,
                                               MRI_short , 1.0f  ) ;
         mri_free(dsim) ;
       }
     }
   } /* end of loop over sub-bricks */

#pragma omp critical (MALLOC)
   { if( qsim   != NULL ) mri_free(qsim);
     if( immask != NULL ){ free(qmask); mri_free(qim); }
   }
 } /* end OpenMP */
 AFNI_OMP_END ;

   if(   mask != NULL )     free(  mask) ;
   if( immask != NULL ) mri_free(immask) ;

   DSET_unload(inset) ;
   DSET_write(outset) ;
   WROTE_DSET(outset) ;
   exit(0) ;
}
Exemplo n.º 8
0
int main( int argc , char *argv[] )
{
   THD_3dim_dataset *dset , *oset=NULL , *tset=NULL ;
   int nvals , iv , nxyz , ii,jj,kk , iarg , kz,kzold ;
   float cut1=2.5,cut2=4.0 , sq2p,sfac , fq ;
   MRI_IMAGE *flim ;
   char *prefix="despike" , *tprefix=NULL ;

   int corder=-1 , nref , ignore=0 , polort=2 , nuse , nomask=0 ;
   int nspike, nbig, nproc ;
   float **ref ;
   float  c21,ic21 , pspike,pbig ;
   short  *sar , *qar ;
   byte   *tar , *mask=NULL ;
   float  *zar , *yar ;
   int     datum ;
   int     localedit=0 ;  /* 04 Apr 2007 */
   int     verb=1 ;

   int     do_NEW = 0 ;   /* 29 Nov 2013 */
   MRI_IMAGE *NEW_psinv=NULL ;
   int     dilate = 4 ;   /* 04 Dec 2013 */
   int     ctim   = 0 ;

   /*----- Read command line -----*/

   AFNI_SETUP_OMP(0) ;  /* 24 Jun 2013 */

   if( argc < 2 || strcmp(argv[1],"-help") == 0 ){
      printf("Usage: 3dDespike [options] dataset\n"
             "Removes 'spikes' from the 3D+time input dataset and writes\n"
             "a new dataset with the spike values replaced by something\n"
             "more pleasing to the eye.\n"
             "\n"
             "Method:\n"
             " * L1 fit a smooth-ish curve to each voxel time series\n"
             "    [see -corder option for description of the curve]\n"
             "    [see -NEW option for a different & faster fitting method]\n"
             " * Compute the MAD of the difference between the curve and\n"
             "    the data time series (the residuals).\n"
             " * Estimate the standard deviation 'sigma' of the residuals\n"
             "    as sqrt(PI/2)*MAD.\n"
             " * For each voxel value, define s = (value-curve)/sigma.\n"
             " * Values with s > c1 are replaced with a value that yields\n"
             "    a modified s' = c1+(c2-c1)*tanh((s-c1)/(c2-c1)).\n"
             " * c1 is the threshold value of s for a 'spike' [default c1=2.5].\n"
             " * c2 is the upper range of the allowed deviation from the curve:\n"
             "    s=[c1..infinity) is mapped to s'=[c1..c2)   [default c2=4].\n"
             "\n"
             "Options:\n"
             " -ignore I  = Ignore the first I points in the time series:\n"
             "               these values will just be copied to the\n"
             "               output dataset [default I=0].\n"
             " -corder L  = Set the curve fit order to L:\n"
             "               the curve that is fit to voxel data v(t) is\n"
             "\n"
             "                       k=L [        (2*PI*k*t)          (2*PI*k*t) ]\n"
             " f(t) = a+b*t+c*t*t + SUM  [ d * sin(--------) + e * cos(--------) ]\n"
             "                       k=1 [  k     (    T   )    k     (    T   ) ]\n"
             "\n"
             "               where T = duration of time series;\n"
             "               the a,b,c,d,e parameters are chosen to minimize\n"
             "               the sum over t of |v(t)-f(t)| (L1 regression);\n"
             "               this type of fitting is is insensitive to large\n"
             "               spikes in the data.  The default value of L is\n"
             "               NT/30, where NT = number of time points.\n"
             "\n"
             " -cut c1 c2 = Alter default values for the spike cut values\n"
             "               [default c1=2.5, c2=4.0].\n"
             " -prefix pp = Save de-spiked dataset with prefix 'pp'\n"
             "               [default pp='despike']\n"
             " -ssave ttt = Save 'spikiness' measure s for each voxel into a\n"
             "               3D+time dataset with prefix 'ttt' [default=no save]\n"
             " -nomask    = Process all voxels\n"
             "               [default=use a mask of high-intensity voxels, ]\n"
             "               [as created via '3dAutomask -dilate 4 dataset'].\n"
             " -dilate nd = Dilate 'nd' times (as in 3dAutomask).  The default\n"
             "               value of 'nd' is 4.\n"
             " -q[uiet]   = Don't print '++' informational messages.\n"
             "\n"
             " -localedit = Change the editing process to the following:\n"
             "                If a voxel |s| value is >= c2, then replace\n"
             "                the voxel value with the average of the two\n"
             "                nearest non-spike (|s| < c2) values; the first\n"
             "                one previous and the first one after.\n"
             "                Note that the c1 cut value is not used here.\n"
             "\n"
             " -NEW       = Use the 'new' method for computing the fit, which\n"
             "              should be faster than the L1 method for long time\n"
             "              series (200+ time points); however, the results\n"
             "              are similar but NOT identical. [29 Nov 2013]\n"
             "              * You can also make the program use the 'new'\n"
             "                method by setting the environment variable\n"
             "                  AFNI_3dDespike_NEW\n"
             "                to the value YES; as in\n"
             "                  setenv AFNI_3dDespike_NEW YES  (csh)\n"
             "                  export AFNI_3dDespike_NEW=YES  (bash)\n"
             "              * If this variable is set to YES, you can turn off\n"
             "                the '-NEW' processing by using the '-OLD' option.\n"
             "          -->>* For time series more than 500 points long, the\n"
             "                '-OLD' algorithm is tremendously slow.  You should\n"
             "                use the '-NEW' algorith in such cases.\n"
             "             ** At some indeterminate point in the future, the '-NEW'\n"
             "                method will become the default!\n"
             "          -->>* As of 29 Sep 2016, '-NEW' is the default if there\n"
             "                is more than 500 points in the time series dataset.\n"
             "\n"
             " -NEW25     = A slightly more aggressive despiking approach than\n"
             "              the '-NEW' method.\n"
             "\n"
             "Caveats:\n"
             "* Despiking may interfere with image registration, since head\n"
             "   movement may produce 'spikes' at the edge of the brain, and\n"
             "   this information would be used in the registration process.\n"
             "   This possibility has not been explored or calibrated.\n"
             "* [LATER] Actually, it seems like the registration problem\n"
             "   does NOT happen, and in fact, despiking seems to help!\n"
             "* Check your data visually before and after despiking and\n"
             "   registration!\n"
             "   [Hint: open 2 AFNI controllers, and turn Time Lock on.]\n"
            ) ;

      PRINT_AFNI_OMP_USAGE("3dDespike",NULL) ;
      PRINT_COMPILE_DATE ; exit(0) ;
   }

   /** AFNI package setup and logging **/

   mainENTRY("3dDespike main"); machdep(); AFNI_logger("3dDespike",argc,argv);
   PRINT_VERSION("3dDespike") ; AUTHOR("RW Cox") ;

   /** parse options **/

   if( AFNI_yesenv("AFNI_3dDespike_NEW") ) do_NEW = 1 ;  /* 29 Nov 2013 */

   iarg = 1 ;
   while( iarg < argc && argv[iarg][0] == '-' ){

      if( strncmp(argv[iarg],"-q",2) == 0 ){       /* 04 Apr 2007 */
        verb = 0 ; iarg++ ; continue ;
      }
      if( strncmp(argv[iarg],"-v",2) == 0 ){
        verb++ ; iarg++ ; continue ;
      }

      if( strcmp(argv[iarg],"-NEW") == 0 ){       /* 29 Nov 2013 */
        do_NEW = 1 ; iarg++ ; continue ;
      }
      if( strcmp(argv[iarg],"-NEW25") == 0 ){     /* 29 Sep 2016 */
        do_NEW = 1 ; use_des25 = 1 ; cut1 = 2.5f ; cut2 = 3.2f ; iarg++ ; continue ;
      }
      if( strcmp(argv[iarg],"-OLD") == 0 ){
        do_NEW = 0 ; iarg++ ; continue ;
      }

      /** -localedit **/

      if( strcmp(argv[iarg],"-localedit") == 0 ){  /* 04 Apr 2007 */
        localedit = 1 ; iarg++ ; continue ;
      }

      /** don't use masking **/

      if( strcmp(argv[iarg],"-nomask") == 0 ){
        nomask = 1 ; iarg++ ; continue ;
      }

      /** dilation count [04 Dec 2013] **/

      if( strcmp(argv[iarg],"-dilate") == 0 ){
        dilate = (int)strtod(argv[++iarg],NULL) ;
             if( dilate <=  0 ) dilate = 1 ;
        else if( dilate >  99 ) dilate = 99 ;
        iarg++ ; continue ;
      }

      /** output dataset prefix **/

      if( strcmp(argv[iarg],"-prefix") == 0 ){
        prefix = argv[++iarg] ;
        if( !THD_filename_ok(prefix) ) ERROR_exit("-prefix is not good");
        iarg++ ; continue ;
      }

      /** ratio dataset prefix **/

      if( strcmp(argv[iarg],"-ssave") == 0 ){
        tprefix = argv[++iarg] ;
        if( !THD_filename_ok(tprefix) ) ERROR_exit("-ssave prefix is not good");
        iarg++ ; continue ;
      }

      /** trigonometric polynomial order **/

      if( strcmp(argv[iarg],"-corder") == 0 ){
        corder = strtol( argv[++iarg] , NULL , 10 ) ;
        if( corder < 0 ) ERROR_exit("Illegal value of -corder");
        iarg++ ; continue ;
      }

      /** how much to ignore at start **/

      if( strcmp(argv[iarg],"-ignore") == 0 ){
        ignore = strtol( argv[++iarg] , NULL , 10 ) ;
        if( ignore < 0 ) ERROR_exit("Illegal value of -ignore");
        iarg++ ; continue ;
      }

      /** thresholds for s ratio **/

      if( strcmp(argv[iarg],"-cut") == 0 ){
        cut1 = strtod( argv[++iarg] , NULL ) ;
        cut2 = strtod( argv[++iarg] , NULL ) ;
        if( cut1 < 1.0 || cut2 < cut1+0.5 )
          ERROR_exit("Illegal values after -cut");
        iarg++ ; continue ;
      }

      ERROR_exit("Unknown option: %s",argv[iarg]) ;
   }

   c21 = cut2-cut1 ; ic21 = 1.0/c21 ;

   /*----- read input dataset -----*/

   if( iarg >= argc ) ERROR_exit("No input dataset!!??");

   dset = THD_open_dataset( argv[iarg] ) ;
   CHECK_OPEN_ERROR(dset,argv[iarg]) ;
   datum = DSET_BRICK_TYPE(dset,0) ;
   if( (datum != MRI_short && datum != MRI_float) || !DSET_datum_constant(dset) )
     ERROR_exit("Can't process non-short, non-float dataset!") ;

   if( verb ) INFO_message("Input data type = %s\n",MRI_TYPE_name[datum]) ;
   nvals = DSET_NUM_TIMES(dset) ; nuse = nvals - ignore ;
   if( nuse < 15 )
     ERROR_exit("Can't use dataset with < 15 time points per voxel!") ;

   if( nuse > 500 && !do_NEW ){
     INFO_message("Switching to '-NEW' method since number of time points = %d > 500",nuse) ;
     do_NEW = 1 ;
   }
   if( use_des25 && nuse < 99 ) use_des25 = 0 ;

   if( verb ) INFO_message("ignoring first %d time points, using last %d",ignore,nuse);
   if( corder > 0 && 4*corder+2 > nuse ){
     ERROR_exit("-corder %d is too big for NT=%d",corder,nvals) ;
   } else if( corder < 0 ){
     corder = rint(nuse/30.0) ; if( corder > 50 && !do_NEW ) corder = 50 ;
     if( verb ) INFO_message("using %d time points => -corder %d",nuse,corder) ;
   } else {
     if( verb ) INFO_message("-corder %d set from command line",corder) ;
   }
   nxyz = DSET_NVOX(dset) ;
   if( verb ) INFO_message("Loading dataset %s",argv[iarg]) ;
   DSET_load(dset) ; CHECK_LOAD_ERROR(dset) ;

   /*-- create automask --*/

   if( !nomask ){
     mask = THD_automask( dset ) ;
     if( verb ){
       ii = THD_countmask( DSET_NVOX(dset) , mask ) ;
       INFO_message("%d voxels in the automask [out of %d in dataset]",ii,DSET_NVOX(dset)) ;
     }
     for( ii=0 ; ii < dilate ; ii++ )
       THD_mask_dilate( DSET_NX(dset), DSET_NY(dset), DSET_NZ(dset), mask, 3 ) ;
     if( verb ){
       ii = THD_countmask( DSET_NVOX(dset) , mask ) ;
       INFO_message("%d voxels in the dilated automask [out of %d in dataset]",ii,DSET_NVOX(dset)) ;
     }
   } else {
     if( verb ) INFO_message("processing all %d voxels in dataset",DSET_NVOX(dset)) ;
   }

   /*-- create empty despiked dataset --*/

   oset = EDIT_empty_copy( dset ) ;
   EDIT_dset_items( oset ,
                      ADN_prefix    , prefix ,
                      ADN_brick_fac , NULL ,
                      ADN_datum_all , datum ,
                    ADN_none ) ;

   if( THD_deathcon() && THD_is_file(DSET_HEADNAME(oset)) )
     ERROR_exit("output dataset already exists: %s",DSET_HEADNAME(oset));

   tross_Copy_History( oset , dset ) ;
   tross_Make_History( "3dDespike" , argc , argv , oset ) ;

   /* create bricks (will be filled with zeros) */

   for( iv=0 ; iv < nvals ; iv++ )
     EDIT_substitute_brick( oset , iv , datum , NULL ) ;

   /* copy the ignored bricks */

   switch( datum ){
     case MRI_short:
       for( iv=0 ; iv < ignore ; iv++ ){
         sar = DSET_ARRAY(oset,iv) ;
         qar = DSET_ARRAY(dset,iv) ;
         memcpy( sar , qar , DSET_BRICK_BYTES(dset,iv) ) ;
         DSET_unload_one(dset,iv) ;
       }
     break ;
     case MRI_float:
       for( iv=0 ; iv < ignore ; iv++ ){
         zar = DSET_ARRAY(oset,iv) ;
         yar = DSET_ARRAY(dset,iv) ;
         memcpy( zar , yar , DSET_BRICK_BYTES(dset,iv) ) ;
         DSET_unload_one(dset,iv) ;
       }
     break ;
   }

   /*-- setup to save a threshold statistic dataset, if desired --*/

   if( tprefix != NULL ){
     float *fac ;
     tset = EDIT_empty_copy( dset ) ;
     fac  = (float *) malloc( sizeof(float) * nvals ) ;
     for( ii=0 ; ii < nvals ; ii++ ) fac[ii] = TFAC ;
     EDIT_dset_items( tset ,
                        ADN_prefix    , tprefix ,
                        ADN_brick_fac , fac ,
                        ADN_datum_all , MRI_byte ,
                        ADN_func_type , FUNC_FIM_TYPE ,
                      ADN_none ) ;
     free(fac) ;

     tross_Copy_History( tset , dset ) ;
     tross_Make_History( "3dDespike" , argc , argv , tset ) ;

#if 0
     if( THD_is_file(DSET_HEADNAME(tset)) )
       ERROR_exit("-ssave dataset already exists");
#endif

     tross_Copy_History( tset , dset ) ;
     tross_Make_History( "3dDespike" , argc , argv , tset ) ;

     for( iv=0 ; iv < nvals ; iv++ )
       EDIT_substitute_brick( tset , iv , MRI_byte , NULL ) ;
   }

   /*-- setup to find spikes --*/

   sq2p  = sqrt(0.5*PI) ;
   sfac  = sq2p / 1.4826f ;

   /* make ref functions */

   nref = 2*corder+3 ;
   ref  = (float **) malloc( sizeof(float *) * nref ) ;
   for( jj=0 ; jj < nref ; jj++ )
     ref[jj] = (float *) malloc( sizeof(float) * nuse ) ;

   /* r(t) = 1 */

   for( iv=0 ; iv < nuse ; iv++ ) ref[0][iv] = 1.0 ;
   jj = 1 ;

   /* r(t) = t - tmid */

   { float tm = 0.5 * (nuse-1.0) ; float fac = 2.0 / nuse ;
     for( iv=0 ; iv < nuse ; iv++ ) ref[1][iv] = (iv-tm)*fac ;
     jj = 2 ;

     /* r(t) = (t-tmid)**jj */

     for( ; jj <= polort ; jj++ )
       for( iv=0 ; iv < nuse ; iv++ )
         ref[jj][iv] = pow( (iv-tm)*fac , (double)jj ) ;
   }

   for( kk=1 ; kk <= corder ; kk++ ){
     fq = (2.0*PI*kk)/nuse ;

     /* r(t) = sin(2*PI*k*t/N) */

     for( iv=0 ; iv < nuse ; iv++ )
       ref[jj][iv] = sin(fq*iv) ;
     jj++ ;

     /* r(t) = cos(2*PI*k*t/N) */

     for( iv=0 ; iv < nuse ; iv++ )
       ref[jj][iv] = cos(fq*iv) ;
     jj++ ;
   }

   /****** setup for the NEW solution method [29 Nov 2013] ******/

   if( do_NEW ){
     NEW_psinv = DES_get_psinv(nuse,nref,ref) ;
     INFO_message("Procesing time series with NEW model fit algorithm") ;
   } else {
     INFO_message("Procesing time series with OLD model fit algorithm") ;
   }

   /*--- loop over voxels and do work ---*/

#define Laplace_t2p(val) ( 1.0 - nifti_stat2cdf( (val), 15, 0.0, 1.4427 , 0.0 ) )

   if( verb ){
    if( !localedit ){
      INFO_message("smash edit thresholds: %.1f .. %.1f MADs",cut1*sq2p,cut2*sq2p) ;
      ININFO_message("  [ %.3f%% .. %.3f%% of normal distribution]",
                     200.0*qg(cut1*sfac) , 200.0*qg(cut2*sfac) ) ;
      ININFO_message("  [ %.3f%% .. %.3f%% of Laplace distribution]" ,
                   100.0*Laplace_t2p(cut1) , 100.0*Laplace_t2p(cut2) ) ;
    } else {
      INFO_message("local edit threshold:  %.1f MADS",cut2*sq2p) ;
      ININFO_message("  [ %.3f%% of normal distribution]",
                    200.0*qg(cut2*sfac) ) ;
      ININFO_message("  [ %.3f%% of Laplace distribution]",
                   100.0*Laplace_t2p(cut1) ) ;
    }
    INFO_message("%d slices to process",DSET_NZ(dset)) ;
   }
   kzold  = -1 ;
   nspike =  0 ; nbig = 0 ; nproc = 0 ; ctim = NI_clock_time() ;

 AFNI_OMP_START ;
#pragma omp parallel if( nxyz > 6666 )
 { int ii , iv , iu , id , jj ;
   float *far , *dar , *var , *fitar , *ssp , *fit , *zar ;
   short *sar , *qar ; byte *tar ;
   float fsig , fq , cls , snew , val ;
   float *NEW_wks=NULL ;

#pragma omp critical (DESPIKE_malloc)
  { far   = (float *) malloc( sizeof(float) * nvals ) ;
    dar   = (float *) malloc( sizeof(float) * nvals ) ;
    var   = (float *) malloc( sizeof(float) * nvals ) ;
    fitar = (float *) malloc( sizeof(float) * nvals ) ;
    ssp   = (float *) malloc( sizeof(float) * nvals ) ;
    fit   = (float *) malloc( sizeof(float) * nref  ) ;
    if( do_NEW ) NEW_wks = (float *)malloc(sizeof(float)*DES_workspace_size(nuse,nref)) ;
  }

#ifdef USE_OMP
   INFO_message("start OpenMP thread #%d",omp_get_thread_num()) ;
#endif

#pragma omp for
   for( ii=0 ; ii < nxyz ; ii++ ){   /* ii = voxel index */

      if( mask != NULL && mask[ii] == 0 ) continue ;   /* skip this voxel */

#ifndef USE_OMP
      kz = DSET_index_to_kz(dset,ii) ;       /* starting a new slice */
      if( kz != kzold ){
        if( verb ){
          fprintf(stderr, "++ start slice %2d",kz ) ;
          if( nproc > 0 ){
            pspike = (100.0*nspike)/nproc ;
            pbig   = (100.0*nbig  )/nproc ;
            fprintf(stderr,
                    "; so far %d data points, %d edits [%.3f%%], %d big edits [%.3f%%]",
                    nproc,nspike,pspike,nbig,pbig ) ;
          }
          fprintf(stderr,"\n") ;
        }
        kzold = kz ;
      }
#else
      if( verb && ii % 2345 == 1234 ) fprintf(stderr,".") ;
#endif

      /*** extract ii-th time series into far[] ***/

      switch( datum ){
        case MRI_short:
          for( iv=0 ; iv < nuse ; iv++ ){
            qar = DSET_ARRAY(dset,iv+ignore) ;   /* skip ignored data */
            far[iv] = (float)qar[ii] ;
          }
        break ;
        case MRI_float:
          for( iv=0 ; iv < nuse ; iv++ ){
            zar = DSET_ARRAY(dset,iv+ignore) ;
            far[iv] = zar[ii] ;
          }
        break ;
      }

      AAmemcpy(dar,far,sizeof(float)*nuse) ;   /* copy time series into dar[] */

      /*** solve for L1 fit ***/

      if( do_NEW )
        cls = DES_solve( NEW_psinv , far , fit , NEW_wks ) ; /* 29 Nov 2013 */
      else
        cls = cl1_solve( nuse , nref , far , ref , fit,0 ) ; /* the slow part */

      if( cls < 0.0f ){                      /* fit failed! */
#if 0
        fprintf(stderr,"curve fit fails at voxel %d %d %d\n",
                DSET_index_to_ix(dset,ii) ,
                DSET_index_to_jy(dset,ii) ,
                DSET_index_to_kz(dset,ii)  ) ;
#endif
        continue ;                           /* skip this voxel */
      }

      for( iv=0 ; iv < nuse ; iv++ ){        /* detrend */
        val =  fit[0]
             + fit[1]*ref[1][iv]             /* quadratic part of curve fit */
             + fit[2]*ref[2][iv] ;
        for( jj=3 ; jj < nref ; jj++ )       /* rest of curve fit */
          val += fit[jj] * ref[jj][iv] ;

        fitar[iv] = val ;                    /* save curve fit value */
        var[iv]   = dar[iv]-val ;            /* remove fitted value = resid */
        far[iv]   = fabsf(var[iv]) ;         /* abs value of resid */
      }

      /*** compute estimate standard deviation of detrended data ***/

      fsig = sq2p * qmed_float(nuse,far) ;   /* also mangles far array */

      /*** process time series for spikes, editing data in dar[] ***/

      if( fsig > 0.0f ){                     /* data wasn't fit perfectly */

        /* find spikiness for each point in time */

        fq = 1.0f / fsig ;
        for( iv=0 ; iv < nuse ; iv++ ){
          ssp[iv] = fq * var[iv] ;           /* spikiness s = how many sigma out */
        }

        /* save spikiness in -ssave datset */

        if( tset != NULL ){
          for( iv=0 ; iv < nuse ; iv++ ){
            tar     = DSET_ARRAY(tset,iv+ignore) ;
            snew    = ITFAC*fabsf(ssp[iv]) ;  /* scale for byte storage */
            tar[ii] = BYTEIZE(snew) ;         /* cf. mrilib.h */
          }
        }

        /* process values of |s| > cut1, editing dar[] */

        for( iv=0 ; iv < nuse ; iv++ ){ /* loop over time points */
          if( !localedit ){             /** classic 'smash' edit **/
            if( ssp[iv] > cut1 ){
              snew = cut1 + c21*mytanh((ssp[iv]-cut1)*ic21) ;   /* edit s down */
              dar[iv] = fitar[iv] + snew*fsig ;
#pragma omp critical (DESPIKE_counter)
              { nspike++ ; if( ssp[iv] > cut2 ) nbig++ ; }
            } else if( ssp[iv] < -cut1 ){
              snew = -cut1 + c21*mytanh((ssp[iv]+cut1)*ic21) ;  /* edit s up */
              dar[iv] = fitar[iv] + snew*fsig ;
#pragma omp critical (DESPIKE_counter)
              { nspike++ ; if( ssp[iv] < -cut2 ) nbig++ ; }
            }
          } else {                      /** local edit: 04 Apr 2007 **/
            if( ssp[iv] >= cut2 || ssp[iv] <= -cut2 ){
              for( iu=iv+1 ; iu < nuse ; iu++ )  /* find non-spike above */
                if( ssp[iu] < cut2 && ssp[iu] > -cut2 ) break ;
              for( id=iv-1 ; id >= 0   ; id-- )  /* find non-spike below */
                if( ssp[id] < cut2 && ssp[id] > -cut2 ) break ;
              switch( (id>=0) + 2*(iu<nuse) ){   /* compute replacement val */
                case 3: val = 0.5*(dar[iu]+dar[id]); break; /* iu and id OK */
                case 2: val =      dar[iu]         ; break; /* only iu OK   */
                case 1: val =              dar[id] ; break; /* only id OK   */
               default: val = fitar[iv]            ; break; /* shouldn't be */
              }
              dar[iv] = val ;
#pragma omp critical (DESPIKE_counter)
              { nspike++ ; nbig++ ; }
            }
          }
        } /* end of loop over time points */
#pragma omp atomic
        nproc += nuse ;  /* number data points processed */

      } /* end of processing time series when fsig is positive */

      /* put dar[] time series (possibly edited above) into output bricks */

      switch( datum ){
        case MRI_short:
          for( iv=0 ; iv < nuse ; iv++ ){
            sar = DSET_ARRAY(oset,iv+ignore) ; /* output brick */
            sar[ii] = (short)dar[iv] ;         /* original or mutated data */
          }
        break ;
        case MRI_float:
          for( iv=0 ; iv < nuse ; iv++ ){
            zar = DSET_ARRAY(oset,iv+ignore) ; /* output brick */
            zar[ii] = dar[iv] ;                /* original or mutated data */
          }
        break ;
      }

   } /* end of loop over voxels #ii */

#pragma omp critical (DESPIKE_malloc)
   { free(fit); free(ssp); free(fitar); free(var); free(dar); free(far);
     if( do_NEW ) free(NEW_wks) ; }

 } /* end OpenMP */
 AFNI_OMP_END ;

#ifdef USE_OMP
   if( verb ) fprintf(stderr,"\n") ;
#endif
   ctim = NI_clock_time() - ctim ;
   INFO_message( "Elapsed despike time = %s" , nice_time_string(ctim) ) ;
   if( ctim > 345678 && !do_NEW )
     ININFO_message("That was SLOW -- try the '-NEW' option for a speedup") ;

#ifdef USE_OMP
   if( verb ) fprintf(stderr,"\n") ;
#endif

   /*--- finish up ---*/

   if( do_NEW ) mri_free(NEW_psinv) ;

   DSET_delete(dset) ; /* delete input dataset */

   if( verb ){
     if( nproc > 0 ){
       pspike = (100.0*nspike)/nproc ;
       pbig   = (100.0*nbig  )/nproc ;
       INFO_message("FINAL: %d data points, %d edits [%.3f%%], %d big edits [%.3f%%]",
               nproc,nspike,pspike,nbig,pbig ) ;
     } else {
       INFO_message("FINAL: no good voxels found to process!!??") ;
     }
   }

   /* write results */

   DSET_write(oset) ;
   if( verb ) WROTE_DSET(oset) ;
   DSET_delete(oset) ;

   if( tset != NULL ){
     DSET_write(tset) ;
     if( verb ) WROTE_DSET(tset) ;
     DSET_delete(tset) ;
   }

   exit( THD_get_write_error_count() ) ;
}