Пример #1
0
BOOL DrawScale(HDC hdc, RECT rect)
{
    // Font height

    enum
    {FONT_HEIGHT = 16};

    static HBITMAP bitmap;
    static HFONT font;
    static HDC hbdc;

    // Plain vanilla font

    static LOGFONT lf =
	{0, 0, 0, 0,
	 FW_NORMAL,
	 FALSE, FALSE, FALSE,
	 DEFAULT_CHARSET,
	 OUT_DEFAULT_PRECIS,
	 CLIP_DEFAULT_PRECIS,
	 DEFAULT_QUALITY,
	 DEFAULT_PITCH | FF_DONTCARE,
	 ""};

    // Draw nice etched edge

    DrawEdge(hdc, &rect , EDGE_SUNKEN, BF_ADJUST | BF_RECT);

    // Calculate bitmap dimensions

    int width = rect.right - rect.left;
    int height = rect.bottom - rect.top;

    // Create bitmap

    if (bitmap == NULL)
    {
	bitmap = CreateCompatibleBitmap(hdc, width, height);

	// Create DC

	hbdc = CreateCompatibleDC(hdc);
	SelectObject(hbdc, bitmap);
	SelectObject(hbdc, GetStockObject(DC_PEN));

	// Create font

	lf.lfHeight = FONT_HEIGHT;
	font = CreateFontIndirect(&lf);
	SelectObject(hbdc, font);
	SetTextAlign(hbdc, TA_CENTER | TA_BOTTOM);
    }

    // Erase background

    RECT brct =
	{0, 0, width, height};
    FillRect(hbdc, &brct, GetStockObject(WHITE_BRUSH));

    // Translate viewport

    SetViewportOrgEx(hbdc, width / 2, height / 2, NULL);

    // Draw scale

    for (int i = 1; i < 11; i++)
    {
	int x = round(FREQ_SCALE * log10(i)) - scale.v;

	for (int j = 0; j < 4; j++)
	{
	    MoveToEx(hbdc, x, 0, NULL);
	    LineTo(hbdc, x, height / 2);
	    x += FREQ_SCALE;
	}
    }

    for (int i = 3; i < 20; i += 2)
    {
	int x = round(FREQ_SCALE * log10(i / 2.0)) - scale.v;

	for (int j = 0; j < 4; j++)
	{
	    MoveToEx(hbdc, x, 6, NULL);
	    LineTo(hbdc, x, height / 2);
	    x += FREQ_SCALE;
	}
    }

    int a[] = {1, 2, 3, 4, 5, 6, 7, 8};
    for (int i = 0; i < LENGTH(a); i++)
    {
    	int x = round(FREQ_SCALE * log10(a[i])) - scale.v;

    	for (int j = 0; j < 2; j++)
    	{
	    static char s[8];

	    sprintf(s, "%d", a[i]);
    	    TextOut(hbdc, x, 0, s, strlen(s)); 

	    sprintf(s, "%d", a[i] * 10);
    	    TextOut(hbdc, x + FREQ_SCALE, 0, s, strlen(s));

    	    x += 2 * FREQ_SCALE;
    	}
    }

    // Move the origin back

    SetViewportOrgEx(hbdc, 0, 0, NULL);

    // Draw centre line

    MoveToEx(hbdc, width / 2, 0, NULL);
    LineTo(hbdc, width / 2, height);

    // Copy the bitmap

    BitBlt(hdc, rect.left, rect.top, width, height,
    	   hbdc, 0, 0, SRCCOPY);

    return TRUE;
}
Пример #2
0
pulsesequence()
{

/* DECLARE AND LOAD VARIABLES */

char        f1180[MAXSTR],   		      /* Flag to start t1 @ halfdwell */
            f2180[MAXSTR],    		      /* Flag to start t2 @ halfdwell */
            mag_flg[MAXSTR];      /* magic-angle coherence transfer gradients */
 
int         icosel,          			  /* used to get n and p type */
            t1_counter,  		        /* used for states tppi in t1 */
            t2_counter,  	 	        /* used for states tppi in t2 */
	    ni2 = getval("ni2");

double      tau1,         				         /*  t1 delay */
            tau2,        				         /*  t2 delay */
            timeTN = getval("timeTN"),     /* constant time for 15N evolution */
	    kappa = 5.4e-3,
	    lambda = 2.4e-3,
            
	pwClvl = getval("pwClvl"), 	        /* coarse power for C13 pulse */
        pwC = getval("pwC"),          /* C13 90 degree pulse length at pwClvl */
	rf0,            	  /* maximum fine power when using pwC pulses */

/* the following pulse lengths for SLP pulses are automatically calculated    */
/* by the macro "biocal".  SLP pulse shapes, "offC3" etc are called       */
/* directly from your shapelib.                    			      */
   pwC3 = getval("pwC3"),  /*180 degree pulse at Ca(56ppm) null at CO(174ppm) */
   pwC3a,                      /* pwC3a=pwC3, but not set to zero when pwC3=0 */
   phshift3,             /* phase shift induced on CO by pwC3 ("offC3") pulse */
   pwZ,					   /* the largest of pwC3 and 2.0*pwN */
   pwC6,                      /* 90 degree selective sinc pulse on CO(174ppm) */
   pwC8,                     /* 180 degree selective sinc pulse on CO(174ppm) */
   rf3,	                           /* fine power for the pwC3 ("offC3") pulse */
   rf6,	                           /* fine power for the pwC6 ("offC6") pulse */
   rf8,	                           /* fine power for the pwC8 ("offC8") pulse */
   bw, ofs, ppm,  /* bandwidth, offset, ppm - temporary Pbox parameters */

   compH = getval("compH"),       /* adjustment for C13 amplifier compression */
   compC = getval("compC"),       /* adjustment for C13 amplifier compression */

   	pwHs = getval("pwHs"),	        /* H1 90 degree pulse length at tpwrs */
   	tpwrsf = getval("tpwrsf"),      /* fine power for pwHs pulse          */
   	tpwrs,	  	              /* power for the pwHs ("H2Osinc") pulse */

   	pwHd,	    		        /* H1 90 degree pulse length at tpwrd */
   	tpwrd,	  	                   /* rf for WALTZ decoupling */

        waltzB1 = getval("waltzB1"),  /* waltz16 field strength (in Hz)     */
	pwNlvl = getval("pwNlvl"),	              /* power for N15 pulses */
        pwN = getval("pwN"),          /* N15 90 degree pulse length at pwNlvl */

	sw1 = getval("sw1"),
	sw2 = getval("sw2"),

	gt1 = getval("gt1"),  		       /* coherence pathway gradients */
        gzcal  = getval("gzcal"),            /* g/cm to DAC conversion factor */
	gzlvl1 = getval("gzlvl1"),
	gzlvl2 = getval("gzlvl2"),

	gt0 = getval("gt0"),				   /* other gradients */
	gt3 = getval("gt3"),
	gt4 = getval("gt4"),
	gt5 = getval("gt5"),
	gzlvl0 = getval("gzlvl0"),
	gzlvl3 = getval("gzlvl3"),
	gzlvl4 = getval("gzlvl4"),
	gzlvl5 = getval("gzlvl5"),
	gzlvl6 = getval("gzlvl6");

    getstr("f1180",f1180);
    getstr("f2180",f2180);
    getstr("mag_flg",mag_flg);



/*   LOAD PHASE TABLE    */

	settable(t3,2,phi3);
	settable(t4,1,phx);
	settable(t5,4,phi5);
       {settable(t8,1,phx);
	settable(t9,8,phi9);
	settable(t10,1,phx);
	settable(t11,1,phy);
	settable(t12,4,rec);}

/*   INITIALIZE VARIABLES   */

    if( dpwrf < 4095 )
	{ printf("reset dpwrf=4095 and recalibrate C13 90 degree pulse");
	  psg_abort(1); }

    /* maximum fine power for pwC pulses */
	rf0 = 4095.0;


      setautocal();                      /* activate auto-calibration */   

      if (autocal[0] == 'n') 
      {
    /* offC3 - 180 degree pulse on Ca, null at CO 118ppm away */
          pwC3a = getval("pwC3a");    
          rf3 = (compC*4095.0*pwC*2.0)/pwC3a;
	  rf3 = (int) (rf3 + 0.5);  
	
    /* 90 degree one-lobe sinc pulse on CO, null at Ca 118ppm away */	
          pwC6 = getval("pwC6");    
	  rf6 = (compC*4095.0*pwC*1.69)/pwC6;	/* needs 1.69 times more     */
	  rf6 = (int) (rf6 + 0.5);		/* power than a square pulse */

    /* 180 degree one-lobe sinc pulse on CO, null at Ca 118ppm away */
          pwC8 = getval("pwC8");
	  rf8 = (compC*4095.0*pwC*2.0*1.65)/pwC8;  /* needs 1.65 times more     */
	  rf8 = (int) (rf8 + 0.5);		   /* power than a square pulse */

    /* selective H20 one-lobe sinc pulse */
          tpwrs = tpwr - 20.0*log10(pwHs/(compH*pw*1.69)); /* needs 1.69 times more */
          tpwrs = (int) (tpwrs);                       /* power than a square pulse */

    /* power level and pulse time for WALTZ 1H decoupling */
	  pwHd = 1/(4.0 * waltzB1) ;                              /* 7.5 kHz rf   */
	  tpwrd = tpwr - 20.0*log10(pwHd/(compH*pw));
	  tpwrd = (int) (tpwrd + 0.5);
      }
      else      /* if autocal = 'y'(yes), 'q'(quiet), 'r'(read) or 's'(semi) */
      {
        if(FIRST_FID)                                         /* make shapes */
        {
          ppm = getval("dfrq"); 
          bw = 118.0*ppm; ofs = -bw; 
          offC3 = pbox_make("offC3", "square180n", bw, ofs, compC*pwC, pwClvl);
          offC6 = pbox_make("offC6", "sinc90n", bw, 0.0, compC*pwC, pwClvl);
          offC8 = pbox_make("offC8", "sinc180n", bw, 0.0, compC*pwC, pwClvl);
          H2Osinc = pbox_Rsh("H2Osinc", "sinc90", pwHs, 0.0, compH*pw, tpwr);
          bw = 2.8*7500.0;
          wz16 = pbox_Dcal("WALTZ16", 2.8*waltzB1, 0.0, compH*pw, tpwr);

          ofs_check(H1ofs, C13ofs, N15ofs, H2ofs);
        }
        pwC3a = offC3.pw; rf3 = offC3.pwrf;             /* set up parameters */
        pwC6 = offC6.pw; rf6 = offC6.pwrf; 
        pwC8 = offC8.pw; rf8 = offC8.pwrf;
        pwHs = H2Osinc.pw; tpwrs = H2Osinc.pwr-1.0;  /* 1dB correction applied */
        tpwrd = wz16.pwr; pwHd = 1.0/wz16.dmf;  
      }

      if (tpwrsf < 4095.0) tpwrs = tpwrs + 6.0;

    /* the pwC3 pulse at the middle of t1  */
       if (pwC3a > 2.0*pwN) pwZ = pwC3a; else pwZ = 2.0*pwN;
       phshift3=0.0;
       if(pwC3 > 0) phshift3 = 48.0;

/* CHECK VALIDITY OF PARAMETER RANGES */

    if ( 0.5*ni2*1/(sw2) > timeTN - pwC3a - WFG3_START_DELAY)
       { printf(" ni2 is too big. Make ni2 equal to %d or less.\n", 
  	 ((int)((timeTN - WFG3_START_DELAY)*2.0*sw2))); psg_abort(1);}

    if ( dm[A] == 'y' || dm[B] == 'y' || dm[C] == 'y' )
       { printf("incorrect dec1 decoupler flags! Should be 'nnn' "); psg_abort(1);}

    if ( dm2[A] == 'y' || dm2[B] == 'y' )
       { printf("incorrect dec2 decoupler flags! Should be 'nny' "); psg_abort(1);}

    if ( dpwr2 > 50 )
       { printf("dpwr2 too large! recheck value  "); psg_abort(1);}

    if ( pw > 50.0e-6 )
       { printf(" pw too long ! recheck value "); psg_abort(1);} 
  
    if ( (pwN > 100.0e-6) && (ni>1 || ni2>1))
       { printf(" pwN too long! recheck value "); psg_abort(1);} 
 

/* PHASES AND INCREMENTED TIMES */

/*  Phase incrementation for hypercomplex 2D data, States-Haberkorn element */

    if (phase1 == 2)   tsadd(t3,1,4);  
    {  
       if (phase2 == 2)  {tsadd(t10,2,4); icosel = +1;}
       else 			       icosel = -1;    
    }


/*  Set up f1180  */
   
    tau1 = d2;
    if(f1180[A] == 'y') 
	{ tau1 += ( 1.0 / (2.0*sw1) ); if(tau1 < 0.2e-6) tau1 = 0.0; }
    tau1 = tau1/2.0;


/*  Set up f2180  */

    tau2 = d3;
    if(f2180[A] == 'y') 
	{ tau2 += ( 1.0 / (2.0*sw2) ); if(tau2 < 0.2e-6) tau2 = 0.0; }
    tau2 = tau2/2.0;


/* Calculate modifications to phases for States-TPPI acquisition          */

   if( ix == 1) d2_init = d2;
   t1_counter = (int) ( (d2-d2_init)*sw1 + 0.5 );
   if(t1_counter % 2) 
	{ tsadd(t3,2,4); tsadd(t12,2,4); }

   if( ix == 1) d3_init = d3;
   t2_counter = (int) ( (d3-d3_init)*sw2 + 0.5 );
   if(t2_counter % 2) 
	{ tsadd(t8,2,4); tsadd(t12,2,4); }



/* BEGIN PULSE SEQUENCE */

status(A);
    delay(d1);
    rcvroff();
    obspower(tpwr);
    decpower(pwClvl);
 	dec2power(pwNlvl);
	decpwrf(rf0);
	obsoffset(tof);
	txphase(zero);
   	delay(1.0e-5);

	dec2rgpulse(pwN, zero, 0.0, 0.0);  /*destroy N15 and C13 magnetization*/
	decrgpulse(pwC, zero, 0.0, 0.0);
	zgradpulse(gzlvl0, 0.5e-3);
	delay(1.0e-4);
	dec2rgpulse(pwN, one, 0.0, 0.0);
	decrgpulse(pwC, zero, 0.0, 0.0);
	zgradpulse(0.7*gzlvl0, 0.5e-3);
	delay(5.0e-4);

   	rgpulse(pw,zero,0.0,0.0);                      /* 1H pulse excitation */

   	dec2phase(zero);
	zgradpulse(gzlvl0, gt0);
	delay(lambda - gt0);

   	sim3pulse(2.0*pw, 0.0, 2.0*pwN, zero, zero, zero, 0.0, 0.0);

   	txphase(one);
	zgradpulse(gzlvl0, gt0);
	delay(lambda - gt0);

 	rgpulse(pw, one, 0.0, 0.0);

    if (tpwrsf < 4095.0) obspwrf(tpwrsf);
    obspower(tpwrs);
    txphase(two);
    shaped_pulse("H2Osinc", pwHs, two, 2.0e-6, 0.0);
    obspower(tpwrd);
    if (tpwrsf < 4095.0) obspwrf(4095.0);
    zgradpulse(gzlvl3, gt3);
    delay(2.0e-4);
    dec2rgpulse(pwN, zero, 0.0, 0.0);

    txphase(one);
    delay(kappa - pwHd - 2.0e-6 - PRG_START_DELAY);

    rgpulse(pwHd,one,0.0,0.0);
    txphase(zero);
    delay(2.0e-6);
    obsprgon("waltz16", pwHd, 90.0);	          /* PRG_START_DELAY */
    xmtron();
    decphase(zero);
    dec2phase(zero);
    decpwrf(rf8);
    delay(timeTN - kappa - WFG3_START_DELAY);
   
							  /* WFG3_START_DELAY */
	sim3shaped_pulse("", "offC8", "", 0.0, pwC8, 2.0*pwN, zero, zero, zero, 
								     0.0, 0.0);
	decphase(t3);
	decpwrf(rf6);
	delay(timeTN);

	dec2rgpulse(pwN, zero, 0.0, 0.0);
   xmtroff();
   obsprgoff();
   rgpulse(pwHd,three,2.0e-6,0.0);
   zgradpulse(gzlvl3, gt3);
   delay(2.0e-4);
    rgpulse(pwHd,one,0.0,0.0);
    txphase(zero);
    delay(2.0e-6);
    obsprgon("waltz16", pwHd, 90.0);	          /* PRG_START_DELAY */
    xmtron();
/*   xxxxxxxxxxxxxxxxxxxxxx       13CO EVOLUTION        xxxxxxxxxxxxxxxxxx    */

   decshaped_pulse("offC6", pwC6, t3, 1.0e-6, 0.0);
   decphase(zero);


   if((tau1 - 2.0*pwC6/3.14 - WFG3_START_DELAY - 0.5*pwZ - POWER_DELAY) > SAPS_DELAY)
   {
      decpwrf(rf3);
      delay(tau1 - 2.0*pwC6/3.14 - WFG3_START_DELAY - 0.5*pwZ - POWER_DELAY);
      sim3shaped_pulse("", "offC3", "", 0.0, pwC3a, 2.0*pwN, zero, zero, zero,0.0,0.0);
      initval(phshift3, v3);
      decstepsize(1.0);
      dcplrphase(v3);  				        /* SAPS_DELAY */
      decpwrf(rf6);
      decphase(t5);
      delay(tau1 - 2.0*pwC6/3.14 - SAPS_DELAY - 0.5*pwZ- WFG3_START_DELAY - POWER_DELAY);
   }
   else
   {
       decpwrf(rf8);
       decshaped_pulse("offC8", pwC8, zero, 2.0e-6, 0.0);
       decpwrf(rf6);
       decphase(t5);
       delay(2.0e-6);
   }

   decshaped_pulse("offC6", pwC6, t5, 0.5e-6, 1.0e-6);
   xmtroff();
   obsprgoff();
   rgpulse(pwHd,three,2.0e-6,0.0);

/*  xxxxxxxxxxxxxxxxxx    N15 EVOLUTION    xxxxxxxxxxxxxxxxxxxxx  */

   dec2phase(t8);
   zgradpulse(gzlvl4, gt4);
   dcplrphase(zero);
   obspower(tpwr);
   delay(2.0e-4);

   dec2rgpulse(pwN, t8, 0.0, 0.0);
   decpwrf(rf3);
   decphase(zero);
   delay((timeTN - tau2 - pwC3a)/2.0);
   decshaped_pulse("offC3", pwC3a, zero, 0.0, 0.0);

   dec2phase(t9);
   decpwrf(rf8);
   delay((timeTN - tau2 - pwC3a)/2.0);
							 /* WFG3_START_DELAY  */
/*
   sim3shaped_pulse("", "offC8", "", 2.0*pw, pwC8, 2.0*pwN, zero, zero, t9, 
								   0.0, 0.0);
*/
   sim3shaped_pulse("", "offC8", "",0.0,pwC8,2.0*pwN,zero,zero,t9,0.0,0.0);
   dec2phase(t10);
   decpwrf(rf3);

   delay((timeTN + tau2 - pwC3a)/2.0);
   decshaped_pulse("offC3", pwC3a, zero, 0.0, 0.0);
   delay((timeTN + tau2 - pwC3a)/2.0 - 2.75e-3 - 2.0*pw);
   rgpulse(2.0*pw,zero, 0.0, 0.0);
   if (mag_flg[A]=='y')   
   {
        magradpulse(gzcal*gzlvl1, gt1);
   }
   else
   {
        zgradpulse(gzlvl1, gt1);  
        delay(4.0*GRADIENT_DELAY);
   }
   txphase(t4);
   delay(2.75e-3 - gt1 - 6.0*GRADIENT_DELAY);     

/*  xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx  */
   sim3pulse(pw, 0.0, pwN, t4, zero, t10, 0.0, 0.0);

   txphase(zero);
   dec2phase(zero);
   zgradpulse(gzlvl5, gt5);
   delay(lambda - 1.3*pwN - gt5);

   sim3pulse(2.0*pw, 0.0, 2.0*pwN, zero, zero, zero, 0.0, 0.0);

   zgradpulse(gzlvl5, gt5);
   txphase(one);
   dec2phase(t11);
   delay(lambda - 1.3*pwN - gt5);

   sim3pulse(pw, 0.0, pwN, one, zero, t11, 0.0, 0.0);

   txphase(zero);
   dec2phase(zero);
   zgradpulse(gzlvl6, gt5);
   delay(lambda - 1.3*pwN - gt5);

   sim3pulse(2.0*pw, 0.0, 2.0*pwN, zero, zero, zero, 0.0, 0.0);

   zgradpulse(gzlvl6, gt5);
   delay(lambda - 0.65*pwN - gt5);

   rgpulse(pw, zero, 0.0, 0.0); 

   delay((gt1/10.0) + 1.0e-4 - 0.5*pw + 2.0*GRADIENT_DELAY + POWER_DELAY);

   rgpulse(2.0*pw, zero, 0.0,0.0);
   dec2power(dpwr2);				       /* POWER_DELAY */
   if (mag_flg[A] == 'y')    magradpulse(icosel*gzcal*gzlvl2, gt1/10.0);
   else   zgradpulse(icosel*gzlvl2, gt1/10.0);            /* 2.0*GRADIENT_DELAY */
   rcvron();
statusdelay(C,1.0e-4);

	setreceiver(t12);
}		 
Пример #3
0
void WaveInData(WPARAM wParam, LPARAM lParam)
{
    // Create buffers for processing the audio data

    static double buffer[SAMPLES];
    static complex x[STEP];
    static double xa[RANGE];

    static double K = 2.0 * M_PI / (double)SAMPLE_RATE;

    // Initialise data structs

    if (spectrum.data == NULL)
    {
	spectrum.data = xa;
	spectrum.length = RANGE;
    }

    // Copy the input data

    memmove(buffer, buffer + STEP, (SAMPLES - STEP) * sizeof(double));

    short *data = (short *)((WAVEHDR *)lParam)->lpData;

    for (int i = 0; i < STEP; i++)
	buffer[SAMPLES - STEP + i] = (double)data[i];

    // Give the buffer back

    waveInAddBuffer(audio.hwi, (WAVEHDR *)lParam, sizeof(WAVEHDR));

    // Maximum data value

    static double dmax;

    if (dmax < 4096.0)
	dmax = 4096.0;

    // Calculate normalising value

    double norm = dmax;
    dmax = 0.0;

    // Copy data to FFT input arrays

    for (int i = 0; i < STEP; i++)
    {
	// Find the magnitude

	if (dmax < fabs(buffer[i]))
	    dmax = fabs(buffer[i]);

	// Calculate the window

	double window =
	    0.5 - 0.5 * cos(2.0 * M_PI *
			    i / STEP);

	// Normalise and window the input data

	x[i].r = (double)buffer[i] / norm * window;
    }

    // do FFT

    fftr(x, STEP);

    // Process FFT output

    for (int i = 1; i < RANGE; i++)
    {
	double real = x[i].r;
	double imag = x[i].i;

	xa[i] = hypot(real, imag);
    }

    // Do cross correlation

    double imag = 0.0;
    double real = 0.0;

    for (int i = 0; i < SAMPLES; i++)
    {
	double window =
	    (0.5 - 0.5 * cos(2.0 * M_PI * i / SAMPLES));

	imag += (buffer[i] / 32768.0) * window * sin(i * display.f * K);

	real += (buffer[i] / 32768.0) * window * cos(i * display.f * K);
    }

    double level = hypot(real, imag);

    level = level / (SAMPLES / (4.0 * sqrt(2.0)));

    meter.l = level / pow(10.0, 0.15);

    double dB = log10(level) * 20.0;

    if (dB < -80.0)
	dB = -80.0;

    static long timer;

    // Update display

    if ((timer % 4) == 0)
	InvalidateRgn(spectrum.hwnd, NULL, TRUE);

    if ((timer % 16) == 0)
    {
	    display.l = dB;
	    InvalidateRgn(display.hwnd, NULL, TRUE);
    }

    timer++;
}
Пример #4
0
local void get_UBVRI_of_star(hdyn *bi, vec pos,
	real &Us, real &Bs, real &Vs, real &Rs, real &Is) {


  // To solar radii
  //  vec pos = bi->get_pos() - dc_pos;
  pos[0] = bi->get_starbase()->conv_r_dyn_to_star(pos[0]);
  pos[1] = bi->get_starbase()->conv_r_dyn_to_star(pos[1]);
  pos[2] = bi->get_starbase()->conv_r_dyn_to_star(pos[2]);

  real time = bi->get_starbase()->conv_t_dyn_to_star(bi->get_system_time());

  // And now to parsec
  real Rsun_per_parsec = cnsts.parameters(solar_radius)
                       / cnsts.parameters(parsec);
  pos[0] *= Rsun_per_parsec;
  pos[1] *= Rsun_per_parsec;
  pos[2] *= Rsun_per_parsec;
  

     star_type_spec tpe_class = NAC;
     spectral_class star_class;
     stellar_type stype = NAS;
     stellar_type_summary sstype = ZAMS;
     real t_cur, m_rel, m_env, m_core, mco_core, T_eff, L_eff, p_rot, b_fld;
     real t_rel=0, R_eff=0;
     real M_tot;
     if (bi->get_use_sstar()) {
       	stype = bi->get_starbase()->get_element_type();
	M_tot  = bi->get_starbase()->conv_m_dyn_to_star(bi->get_mass());
        t_cur = bi->get_starbase()->get_current_time();
        t_rel = bi->get_starbase()->get_relative_age();
        T_eff = bi->get_starbase()->temperature();
        L_eff = bi->get_starbase()->get_luminosity();
        star_class = get_spectral_class(T_eff);
	R_eff = bi->get_starbase()->get_effective_radius();
	ltm_to_ubvri(log10(L_eff), log10(T_eff), M_tot,
		     Us, Bs, Vs, Rs, Is);

     }
     else if (bi->get_star_story()) {

       extract_story_chapter(stype, t_cur, t_rel, 
			     m_rel, m_env, m_core, mco_core,
			     T_eff, L_eff, p_rot, b_fld,
			     *bi->get_star_story());

       M_tot = m_env + m_core;
       sstype = summarize_stellar_type(stype);
       star_class = get_spectral_class(T_eff);
       
       ltm_to_ubvri(log10(L_eff), log10(T_eff), M_tot,
		     Us, Bs, Vs, Rs, Is);
       
       if (find_qmatch(bi->get_star_story(), "Class"))
	 tpe_class = extract_stellar_spec_summary_string(
             getsq(bi->get_star_story(), "Class"));
       if (L_eff>0)
          R_eff = 
	    * sqrt(L_eff)/pow(T_eff/cnsts.parameters(solar_temperature), 2);
     }
     else {
       cout << "    No stellar information found: " << endl;
       return;
     }
}
Пример #5
0
local void print_star(hdyn *bi, bool bound, vec pos, vec vel,
		      real &Up, real &Bp, real &Vp, real &Rp, real &Ip, 
		      bool verbose) { 


  int id = bi->get_index();

  //  vec cod_vel = 0;
  //  bool bound = star_is_bound(bi, cod_vel);

  // To solar radii
  //  vec pos = bi->get_pos() - dc_pos;
  pos[0] = bi->get_starbase()->conv_r_dyn_to_star(pos[0]);
  pos[1] = bi->get_starbase()->conv_r_dyn_to_star(pos[1]);
  pos[2] = bi->get_starbase()->conv_r_dyn_to_star(pos[2]);

  real time = bi->get_starbase()->conv_t_dyn_to_star(bi->get_system_time());

  // And now to parsec
  real Rsun_per_parsec = cnsts.parameters(solar_radius)
                       / cnsts.parameters(parsec);
  pos[0] *= Rsun_per_parsec;
  pos[1] *= Rsun_per_parsec;
  pos[2] *= Rsun_per_parsec;

//  real to_Rsun_Myr = cnsts.physics(km_per_s) * cnsts.physics(Myear)
//               / cnsts.parameters(solar_radius);
                       
//      real to_Rsun_Myr = cnsts.physics(km_per_s) * cnsts.physics(Myear)
//	               / cnsts.parameters(solar_radius);
//                       
//      real to_dyn      = bi->get_starbase()->conv_r_star_to_dyn(1)
//                       / bi->get_starbase()->conv_t_star_to_dyn(1);
//      vel = vel/(to_Rsun_Myr * to_dyn);
                       
  real to_km_per_second = cnsts.parameters(solar_radius)
                        / (cnsts.physics(km_per_s) * cnsts.physics(Myear));
  real to_star      = bi->get_starbase()->conv_r_dyn_to_star(1)/
	              bi->get_starbase()->conv_t_dyn_to_star(1);
  to_km_per_second = to_star*to_km_per_second;

//	PRC(to_star);PRL(to_km_per_second);
  vel[0] *= to_km_per_second;
  vel[1] *= to_km_per_second;
  vel[2] *= to_km_per_second;

     star_type_spec tpe_class = NAC;
     spectral_class star_class;
     stellar_type stype = NAS;
     stellar_type_summary sstype = ZAMS;
     real t_cur, m_rel, m_env, m_core, mco_core, T_eff, L_eff, p_rot, b_fld;
     real t_rel=0, R_eff=0;
     real M_tot, Us, Bs, Vs, Rs, Is;	
     if (bi->get_use_sstar()) {
       	stype = bi->get_starbase()->get_element_type();
	M_tot  = bi->get_starbase()->conv_m_dyn_to_star(bi->get_mass());
        t_cur = bi->get_starbase()->get_current_time();
        t_rel = bi->get_starbase()->get_relative_age();
        T_eff = bi->get_starbase()->temperature();
        L_eff = bi->get_starbase()->get_luminosity();
        star_class = get_spectral_class(T_eff);
	R_eff = bi->get_starbase()->get_effective_radius();
	ltm_to_ubvri(log10(L_eff), log10(T_eff), M_tot,
		     Us, Bs, Vs, Rs, Is);

     }
     else if (bi->get_star_story()) {

       extract_story_chapter(stype, t_cur, t_rel, 
			     m_rel, m_env, m_core, mco_core,
			     T_eff, L_eff, p_rot, b_fld,
			     *bi->get_star_story());

       M_tot = m_env + m_core;
       sstype = summarize_stellar_type(stype);
       star_class = get_spectral_class(T_eff);
       
       ltm_to_ubvri(log10(L_eff), log10(T_eff), M_tot,
		     Us, Bs, Vs, Rs, Is);
       
       if (find_qmatch(bi->get_star_story(), "Class"))
	 tpe_class = extract_stellar_spec_summary_string(
             getsq(bi->get_star_story(), "Class"));
       if (L_eff>0)
          R_eff = pow(T_eff/cnsts.parameters(solar_temperature), 2)
	        * sqrt(L_eff);
     }
     else {
       cout << "    No stellar information found for: ";
       bi->pretty_print_node(cout);
       return;
     }

     real U, B, V, R, I;
     combine_ubvri(Up, Bp, Vp, Rp, Ip,
 	           Us, Bs, Vs, Rs, Is,
                   U, B, V, R, I);

     if(verbose)
       cout << " Time= " << time << " id= " << id << " b= " << bound 
	    << " type= " << stype << " m= " << M_tot << " R= " << R_eff
	    << " L= " << L_eff 
	    << " T_eff= " << T_eff 
	    << " r= "  << pos[0] << " " << pos[1] << " " << pos[2] 
	    << " v= "  << vel[0] << " " << vel[1] << " " << vel[2] 
	    << " ubvri= "<<  U << " " << B << " " << V << " " << R << " "
	    << I	<< " :: ";
     else {
       cout << time <<" "<< id << bound <<" "
	    <<" "<< stype <<" "
	    << M_tot <<" "<< R_eff <<" "<< L_eff  <<" "<< T_eff 
	    <<" "<< pos[0] << " " << pos[1] << " " << pos[2] 
	    <<" "<< vel[0] << " " << vel[1] << " " << vel[2] 
	    <<" "<< U << " " << B << " " << V << " " << R << " "
	    << I	<< " ";
       //       PRC(id);PRC(pos[0]);PRC(pos[1]);PRL(pos[2]);
     }

     Up=U;
     Bp=B;
     Vp=V;
     Ip=I;
}
Пример #6
0
local void makeking(dyn * b, int n, real w0, bool n_flag, bool u_flag, int test)

// Create a King model, and optionally initialize an N-body system
// with total mass = n, core radius = 1.

{
    int i, iz, j, jcore, jhalf;
    real dz, z, rho0;
    real rhalf, zmcore;

    int nprof;
    real v20;

    if (w0 > 16) err_exit("makeking: must specify w0 < 16");

    initialize_global();

    // Compute the cluster density/velocity/potential profile
    
    poisson(rr, NM, w0, nprof, v20);

    if (test == 1)
	dump_model_and_exit(nprof);

    // Determine statistics and characteristic scales of the King model.

    rho0 = 1 / zm[nprof];		 // Central density for total mass = 1

    // Unit of velocity = sig, where rc^2 = 9 sig^2 / (4 pi G rho0)

    real sig = sqrt(four3pi * rho0 / 3); // This 3 was v20 in the f77 version...
					 // v20 is central vel. disp. / sig^2

    // Scale the zm array to unit total mass.

    for (i = 0; i <= nprof; i++)
	zm[i] = zm[i] / zm[nprof];

    // Index the mass distribution, and determine the core mass and
    // the half-mass radius.

    // By construction, rr[indx[j]] and rr[indx[j+1]] bracket the
    // radius containing a fraction j / NINDX of the total mass.

    indx[0] = 0;
    indx[NINDX] = nprof;

    dz = 1.0/NINDX;
    z = dz;
    iz = 1;
    for (j = 1; j <= nprof - 1; j++) {
	if (rr[j] < 1) jcore = j;
	if (zm[j] < 0.5) jhalf = j; 
	if (zm[j] > z) {
	    indx[iz] = j - 1;
	    z = z + dz;
	    iz = iz + 1;
	}
    }

    zmcore = zm[jcore] + (zm[jcore+1] - zm[jcore]) * (1 - rr[jcore])
		/ (rr[jcore+1] - rr[jcore]);

    rhalf = rr[jhalf] + (rr[jhalf+1] - rr[jhalf])
		* (0.5 - zm[jhalf])
		    / (zm[jhalf+1] - zm[jhalf]);

    // Compute the kinetic and potential energies, and determine the
    // virial radius and ratio.

    real kin = 0, pot =0;

    for (i = 1; i <= nprof; i++) {
	kin += (zm[i] - zm[i-1]) * (v2[i-1] + v2[i]);
	pot -= (zm[i] - zm[i-1]) * (zm[i] + zm[i-1]) / (rr[i-1] + rr[i]);
    }
    kin *= 0.25*sig*sig*v20;

    real rvirial = -0.5/pot;

    cerr << endl << "King model";
    cerr << "\n    w0 = " << w0 << "  beta = " << beta << "  nprof =" << nprof
	 <<        "  V20/sig2 = " << v20
	 <<        "  Mc/M = " << zmcore << endl
         <<   "    Rt/Rc = " << rr[nprof] << " (c = " << log10(rr[nprof])
         <<        ")  Rh/Rc = " << rhalf
         <<        "  Rvir/Rc = " << rvirial // << "  -T/U = " << -kin/pot
	 << endl
         <<   "    Rc/Rvir = " << 1/rvirial
         <<        "  Rh/Rvir = " << rhalf/rvirial
         <<        "  Rt/Rvir = " << rr[nprof]/rvirial
	 << "\n\n";

    if (test == 2) {

	// Scaling factors are for Mtotal = 1, Rvir = 1:

	dump_model_and_exit(nprof, rho0, 1/rvirial);
    }

    if (b == NULL || n < 1) return;

    // Initialize the N-body system.

    sprintf(tmp_string,
    "         King model, w0 = %.2f, Rt/Rc = %.3f, Rh/Rc = %.3f, Mc/M = %.3f",
	      w0, rr[nprof], rhalf, zmcore);
    b->log_comment(tmp_string);

    // Write essential model information to root dyn story.

    putrq(b->get_log_story(), "initial_mass", 1.0);

    // putrq(b->get_log_story(), "initial_rvirial", 0.25/kin); // assumes a lot!
							       // -- too much...

    putrq(b->get_log_story(), "initial_rtidal_over_rvirial",
	  rr[nprof] / (0.25/kin));

    // Assign positions and velocities. Note that it may actually
    // be preferable to do this in layers instead.

    for_all_daughters(dyn, b, bi) {

	if (test == 3) {

	    // Test: For a pure King model, getvel should generate a
	    //       distribution of velocities with maximum speed
	    //       sqrt(-2*p) and <v^2> = v2*v20, where p and v2
	    //       are the scaled potential and mean-square
	    //       velocity at any given radius.

	    real nsum = 10000;

	    for (int zone = 0; zone < 0.95*nprof; zone += nprof/15) {
		real v2sum = 0;
		real v2max = 0;
		for (int jj = 0; jj < nsum; jj++) {
		    setvel(bi, psi[zone]);
		    real vsq = bi->get_vel()*bi->get_vel();
		    v2sum += vsq;
		    v2max = Starlab::max(v2max, vsq);
		}

		cerr << "zone " << zone << "  r = " << rr[zone]
		     << "  v2max = " << v2max<< "  ?= "  << -2*psi[zone]
		     << "  v2mean = " << v2sum/nsum << "  ?= " << v2[zone]*v20
		     << endl;
	    }

	    exit(0);

	}

	if (n_flag)
	    bi->set_mass(1.0/n);

	real pot;
	setpos(bi, pot);
	setvel(bi, pot);

	// Unit of length = rc.
	// Unit of velocity = sig.

	bi->scale_vel(sig);
    }

    // System is in virial equilibrium in a consistent set of units
    // with G, core radius, and total mass = 1.

    // Convenient to have the "unscaled" system (-u on the command line)
    // be as close to standard units as possible, so rescale here to force
    // the virial radius to 1.  (Steve, 9/04)

    real xfac = 1/rvirial;
    real vfac = 1/sqrt(xfac);

    for_all_daughters(dyn, b, bi) {
	bi->set_pos(xfac*bi->get_pos());
	bi->set_vel(vfac*bi->get_vel());
    }
Пример #7
0
void poisson(real x[], int nmax, real w0, int& nprof, real& v20)

//       Self-contained 1-D (spherical) Poisson's equation solver.
//       Currently knows about normal and lowered King models.
//
//        Input:  nmax is the maximum number of points allowed
//                w0 is the dimensionless central potential
//                iout allows messages if nonzero

//        Output: x   is scaled radius (r/rc)
//                d   is scaled density (1 at center)
//                v2  is scaled velocity dispersion (1 at center)
//                psi is scaled potential (-W0 at center)
//                zm  is cumulative mass (scaling from x, d scalings)
//                nprof is the actual number of points generated
//                v20 is the central 3-D velocity dispersion (unit = sig^2)

{

  int i, iflag2; 
  real psi0, xn, xo, fac;
  real y[2];

  psi0 = - abs(w0);

  // Initialize at center of cluster.

  xn = 0;
  y[0] = 0;
  y[1] = psi0;
  x[0] = 0;
  psi[0] = psi0;
  v2[0] = 1;
  zm[0] = 0;

  // Establish density scaling factor.

  get_dens_and_vel(psi0, d[0], v2[0]);
  dc_inverse = 1./d[0];

  fac = pow(10, (log10(RMAX/RLIN) / (nmax-NLIN)));

// 	Poisson's equation is:
//
// 		(1/r^2) d/dr (r^2 dphi/dr)  =  4 pi G rho,
//
// 	where r is radius, phi is potential, and rho is density, given
// 	(for equal-mass stars) by
//
// 		rho	=  {integral (v < ve)} 4 pi v^2 f(v) dv,
//
// 	where ve is the escape velocity,
//
// 		ve^2	=  -2 phi.
//
// 	The (3-D) velocity distribution is
//
// 		f(v)	=  A (exp(-v^2 / 2 sig^2)
// 					 - exp(-ve^2 / 2 sig^2)),
//
// 	where sig^2 is a 1-D velocity dispersion (not quite the
// 	central velocity dispersion, except in the limit of infinite
// 	central potential).  In King's (1966) paper, he uses
// 	j^2 = 1 / (2 sig^2).
//
// 	Following King, we define the core radius rc by
//
// 		rc^2	=  9 sig^2 / (4 pi G rho0)
//
// 	and the dimensionless depth as
//
// 		W0	=  -phi0 / sig^2,
//
// 	where rho0 and phi0 are the central density and potential,
// 	respectively.
//
// 	We then scale as follows:
//
// 		x	=  r / rc
//
// 		d	=  rho / rho0
//
// 		psi	=  phi / sig^2,
//
// 	to obtain
//
// 		(x psi)''  =  9 x d,
//
// 	where ' = d/dx.
//
// 	We integrate this ODE from the cluster center (x = 0, d = 1,
// 	psi = -W0) to the tidal radius (d = 0) by defining
//
//		y(0)	=  (x psi)
//		y(1)	=  y(0)'
//
//	We cover the first RLIN core radii linearly with NLIN points;
//	the remaining coverage is logarithmic, out to RMAX core radii,
//	if necessary.  We stop when d <= 0.

  iflag2 = 0;

  for (i = 1; i <= nmax; i++) {

      xo = xn;
      if (i <= NLIN)
          xn = (RLIN * i) / NLIN;
      else
	  xn = fac * xo;

      real dx = 0.051*(xn-xo);

      rk4(xo, xn, y, 2, dx);

      //  N.B. Remember that y(1) is x*psi and xo is updated by step.

      xn = xo;

      x[i] = xn;
      psi[i] = y[0] / xn;

      v2[i] = 1;
      get_dens_and_vel(psi[i], d[i], v2[i]);

      if (d[i] < 0) {

 	// Density is negative, calculation is over.
 	// Interpolate to the tidal radius.

 	x[i] = x[i-1] + (x[i] - x[i-1]) / (0.1 - d[i]/d[i-1]);
 	d[i] = 0;
 	v2[i] = 0;

      }

      zm[i] = x[i] * y[1] - y[0];

      if (d[i] > 0) {

	  // Strange syntax because d = NaN (because of earlier error)
	  // will test FALSE in "if (d < 0)".

      } else {

          iflag2 = 1;
          break;

      }
  }

  if (iflag2 == 0) i = nmax;

  nprof = i;

  // Scale d and v2 to their central values.  Save v20 (unit = sig^2).

  v20 = v2[0];
  for (i = nprof; i >= 0; i--) {
      d[i] = d[i] / d[0];
      v2[i] = v2[i] / v2[0];
      zm[i] = (fourpi/9) * zm[i];
  }

}
Пример #8
0
//      How to do a correlate-style CLASSIFY on some text.
//
int crm_expr_correlate_classify(CSL_CELL *csl, ARGPARSE_BLOCK *apb,
        VHT_CELL **vht,
        CSL_CELL *tdw,
        char *txtptr, int txtstart, int txtlen)
{
    //      classify the sparse spectrum of this input window
    //      as belonging to a particular type.
    //
    //       This code should look very familiar- it's cribbed from
    //       the code for LEARN
    //
    int i, j, k;
    char ptext[MAX_PATTERN]; //  the regex pattern
    int plen;
    //  the hash file names
    char htext[MAX_PATTERN + MAX_CLASSIFIERS * MAX_FILE_NAME_LEN];
    int htext_maxlen = MAX_PATTERN + MAX_CLASSIFIERS * MAX_FILE_NAME_LEN;
    int hlen;
    //  the match statistics variable
    char stext[MAX_PATTERN + MAX_CLASSIFIERS * (MAX_FILE_NAME_LEN + 100)];
    int stext_maxlen = MAX_PATTERN + MAX_CLASSIFIERS * (MAX_FILE_NAME_LEN + 100);
    int slen;
    char svrbl[MAX_PATTERN]; //  the match statistics text buffer
    int svlen;
    int fnameoffset;
    char fname[MAX_FILE_NAME_LEN];
    int eflags;
    int cflags;

    struct stat statbuf;    //  for statting the hash file
    //regex_t regcb;

    unsigned int fcounts[MAX_CLASSIFIERS]; // total counts for feature normalize

    double cpcorr[MAX_CLASSIFIERS];         // corpus correction factors
    int64_t linear_hits[MAX_CLASSIFIERS];   // actual hits per classifier
    int64_t square_hits[MAX_CLASSIFIERS];   // square of runlenths of match
    int64_t cube_hits[MAX_CLASSIFIERS];     // cube of runlength matches
    int64_t quad_hits[MAX_CLASSIFIERS];     // quad of runlength matches
    int incr_hits[MAX_CLASSIFIERS];         // 1+2+3... hits per classifier

    int64_t total_linear_hits; // actual total linear hits for all classifiers
    int64_t total_square_hits; // actual total square hits for all classifiers
    int64_t total_cube_hits;   // actual total cube hits for all classifiers
    int64_t total_quad_hits;   // actual total cube hits for all classifiers
    int64_t total_features;    // total number of characters in the system

    hitcount_t totalhits[MAX_CLASSIFIERS];
    double tprob;       //  total probability in the "success" domain.

    int textlen;  //  text length  - rougly corresponds to
    //  information content of the text to classify

    double ptc[MAX_CLASSIFIERS]; // current running probability of this class
    double renorm = 0.0;

    char *hashes[MAX_CLASSIFIERS];
    int hashlens[MAX_CLASSIFIERS];
    char *hashname[MAX_CLASSIFIERS];
    int succhash;
    int vbar_seen;     // did we see '|' in classify's args?
    int maxhash;
    int fnstart, fnlen;
    int fn_start_here;
    int textoffset;
    int bestseen;
    int thistotal;

    if (internal_trace)
        fprintf(stderr, "executing a CLASSIFY\n");

    //          we use the main line txtptr, txtstart, and txtlen now,
    //          so we don't need to extract anything from the b1start stuff.

    //           extract the hash file names
    hlen = crm_get_pgm_arg(htext, htext_maxlen, apb->p1start, apb->p1len);
    hlen = crm_nexpandvar(htext, hlen, htext_maxlen, vht, tdw);

    //           extract the "this is a word" regex
    //
    plen = crm_get_pgm_arg(ptext, MAX_PATTERN, apb->s1start, apb->s1len);
    plen = crm_nexpandvar(ptext, plen, MAX_PATTERN, vht, tdw);

    //            extract the optional "match statistics" variable
    //
    svlen = crm_get_pgm_arg(svrbl, MAX_PATTERN, apb->p2start, apb->p2len);
    svlen = crm_nexpandvar(svrbl, svlen, MAX_PATTERN, vht, tdw);
    {
        int vstart, vlen;
        if (crm_nextword(svrbl, svlen, 0, &vstart, &vlen))
        {
            crm_memmove(svrbl, &svrbl[vstart], vlen);
            svlen = vlen;
            svrbl[vlen] = 0;
        }
        else
        {
            svlen = 0;
            svrbl[0] = 0;
        }
    }

    //     status variable's text (used for output stats)
    //
    stext[0] = 0;
    slen = 0;

    //            set our flags, if needed.  The defaults are
    //            "case"
    cflags = REG_EXTENDED;
    eflags = 0;

    if (apb->sflags & CRM_NOCASE)
    {
        if (user_trace)
            fprintf(stderr, " setting NOCASE for tokenization\n");
        cflags += REG_ICASE;
        eflags = 1;
    }


    //       Now, the loop to open the files.
    bestseen = 0;
    thistotal = 0;

    //      initialize our arrays for N .css files
    for (i = 0; i < MAX_CLASSIFIERS; i++)
    {
        fcounts[i] = 0;       // check later to prevent a divide-by-zero
                              // error on empty .css file
        cpcorr[i] = 0.0;      // corpus correction factors
        linear_hits[i] = 0;   // linear hits
        square_hits[i] = 0;   // square of the runlength
        cube_hits[i] = 0;     // cube of the runlength
        quad_hits[i] = 0;     // quad of the runlength
        incr_hits[i] = 0;     // 1+2+3... hits hits
        totalhits[i] = 0;     // absolute hit counts
        ptc[i] = 0.5;         // priori probability
    }

    //

    vbar_seen = 0;
    maxhash = 0;
    succhash = 0;
    fnameoffset = 0;

    //    now, get the file names and mmap each file
    //     get the file name (grody and non-8-bit-safe, but doesn't matter
    //     because the result is used for open() and nothing else.
    //   GROT GROT GROT  this isn't NULL-clean on filenames.  But then
    //    again, stdio.h itself isn't NULL-clean on filenames.
    if (user_trace)
        fprintf(stderr, "Classify list: -%.*s-\n", hlen, htext);
    fn_start_here = 0;
    fnlen = 1;
    while (fnlen > 0 && ((maxhash < MAX_CLASSIFIERS - 1)))
    {
        if (crm_nextword(htext,
                         hlen, fn_start_here,
                         &fnstart, &fnlen)
           && fnlen > 0)
        {
            strncpy(fname, &htext[fnstart], fnlen);
            fname[fnlen] = 0;
            //      fprintf(stderr, "fname is '%s' len %d\n", fname, fnlen);
            fn_start_here = fnstart + fnlen + 1;
            if (user_trace)
            {
                fprintf(stderr, "Classifying with file -%s- succhash=%d, maxhash=%d\n",
                        fname, succhash, maxhash);
            }
            if (fname[0] == '|' && fname[1] == 0)
            {
                if (vbar_seen)
                {
                    nonfatalerror("Only one '|' allowed in a CLASSIFY.\n",
                                  "We'll ignore it for now.");
                }
                else
                {
                    succhash = maxhash;
                }
                vbar_seen++;
            }
            else
            {
                //  be sure the file exists
                //             stat the file to get it's length
                k = stat(fname, &statbuf);
                //             quick check- does the file even exist?
                if (k != 0)
                {
                    nonfatalerror("Nonexistent Classify table named: ",
                                  fname);
                }
                else
                {
                    // [i_a] check hashes[] range BEFORE adding another one!
                    if (maxhash >= MAX_CLASSIFIERS)
                    {
                        nonfatalerror("Too many classifier files.",
                                      "Some may have been disregarded");
                    }
                    else
                    {
                        //  file exists - do the mmap
                        //
                        hashlens[maxhash] = statbuf.st_size;
                        // [i_a] hashlens[maxhash] must be fixed for the header size!
                        hashes[maxhash] = crm_mmap_file(fname,
                                                        0,
                                                        hashlens[maxhash],
                                                        PROT_READ,
                                                        MAP_SHARED,
                                                        CRM_MADV_RANDOM,
                                                        &hashlens[maxhash]);
                        if (hashes[maxhash] == MAP_FAILED)
                        {
                            nonfatalerror("Couldn't memory-map the table file",
                                          fname);
                        }
                        else
                        {
                            //
                            //     Check to see if this file is the right version
                            //
                            //     FIXME : for now, there's no version number
                            //     associated with a .correllation file
                            // int fev;
                            // if (0)
                            //(hashes[maxhash][0].hash != 1 ||
                            //  hashes[maxhash][0].key  != 0)
                            //{
                            //  fev = fatalerror ("The .css file is the wrong version!  Filename is: ",
                            //                   fname);
                            //  return (fev);
                            //}

                            //
                            //     save the name for later...
                            //
                            hashname[maxhash] = (char *)calloc((fnlen + 10), sizeof(hashname[maxhash][0]));
                            if (!hashname[maxhash])
                            {
                                untrappableerror(
                                    "Couldn't alloc hashname[maxhash]\n", "We need that part later, so we're stuck.  Sorry.");
                            }
                            else
                            {
                                strncpy(hashname[maxhash], fname, fnlen);
                                hashname[maxhash][fnlen] = 0;
                            }
                            maxhash++;
                        }
                    }
                }
            }
        }
    }

    //
    //    If there is no '|', then all files are "success" files.
    if (succhash == 0)
        succhash = maxhash;

    //    a CLASSIFY with no arguments is always a "success".
    if (maxhash == 0)
        return 0;

    if (user_trace)
    {
        fprintf(stderr, "Running with %d files for success out of %d files\n",
                succhash, maxhash);
    }

    // sanity checks...  Uncomment for super-strict CLASSIFY.
    //
    //    do we have at least 1 valid .css files?
    if (maxhash == 0)
    {
        return nonfatalerror("Couldn't open at least 1 .css file for classify().", "");
    }

#if 0
    //    do we have at least 1 valid .css file at both sides of '|'?
    if (!vbar_seen || succhash <= 0 || (maxhash <= succhash))
    {
        return nonfatalerror("Couldn't open at least 1 .css file per SUCC | FAIL category "
                             "for classify().\n", "Hope you know what are you doing.");
    }
#endif

    //
    //   now all of the files are mmapped into memory,
    //   and we can do the correlations and add up matches.
    i = 0;
    j = 0;
    k = 0;
    thistotal = 0;

    //     put in the ptr/start/len values we got from the outside caller
    textoffset = txtstart;
    textlen = txtlen;

    //
    //    We keep track of the hits in these categories
    //  linear_hits[MAX_CLASSIFIERS];  // actual hits per classifier
    //  square_hits[MAX_CLASSIFIERS];  // square of runlenths of match
    //  incr_hits[MAX_CLASSIFIERS];  // 1+2+3... hits per classifier
    //

    //   Now we do the actual correllation.
    //   for each file...
    //    slide the incoming text (mdw->filetext[textofset])
    //     across the corpus text (hashes[] from 0 to hashlens[])
    //      and count the bytes that are the same, the runlengths,
    //       etc.

    for (k = 0; k < maxhash; k++)
    {
        int it;  // it is the start index into the tested text
        int ik;  // ik is the start index into the known corpus text
        int ilm; // ilm is the "local" matches (N in a row)

        //    for each possible displacement of the known  (ik) text...
        for (ik = 0;
             ik < hashlens[k];
             ik++)
        {
            int itmax;

            ilm = 0;
            itmax = textlen;
            if (ik + itmax > hashlens[k])
                itmax = hashlens[k] - ik;
            // for each position in the test (it) text...
            for (it = 0;
                 it < itmax;
                 it++)
            {
                //   do the characters in this position match?
                if (hashes[k][ik + it] == txtptr[textoffset + it])
                {
                    // yes they matched
                    linear_hits[k]++;
                    ilm++;
                    square_hits[k] = square_hits[k] + (ilm * ilm);
                    cube_hits[k] = cube_hits[k] + (ilm * ilm * ilm);
                    quad_hits[k] = quad_hits[k] + (ilm * ilm * ilm * ilm);
                }
                else
                {
                    //   nope, they didn't match.
                    //   So, we do the end-of-runlength stuff:
                    ilm = 0;
                }
                if (0)
                    fprintf(stderr, "ik: %d  it: %d  chars %c %c lin: %lld  sqr: %lld cube: %lld quad: %lld\n",
                            ik, it,
                            hashes[k][ik + it],
                            txtptr[textoffset + it],
                            (long long int)linear_hits[k],
                            (long long int)square_hits[k],
                            (long long int)cube_hits[k],
                            (long long int)quad_hits[k]);
            }
        }
    }


    //   Now we have the total hits for each text corpus.  We can then
    //  turn that into a vague probability measure, and then renormalize
    //  that to get probabilities.
    //
    //   But first, let's reflect on what we've got here.  We our test
    //   text, and we have a corpus which is "nominally correllated",
    //   and another corpus that is nominally uncorrellated.
    //
    //   The uncorrellated text will have an average match rate of 1/256'th
    //   in the linear domain (well, for random bytes; english text will match
    //   a lot more often, due to the fact that ASCII only uses the low 7
    //   bits, most text is written in lower case, Zipf's law, etc.
    //
    //   We can calculate a predicted total on a per-character basis for all
    //   of the corpi, then use that as an average expectation.

    //    Calculate total hits
    total_linear_hits = 0;
    total_square_hits = 0;
    total_cube_hits = 0;
    total_quad_hits = 0;
    total_features = 0;
    for (k = 0; k < maxhash; k++)
    {
        total_linear_hits += linear_hits[k];
        total_square_hits += square_hits[k];
        total_cube_hits += cube_hits[k];
        total_quad_hits += quad_hits[k];
        total_features += hashlens[k];
    }


    for (k = 0; k < maxhash; k++)
    {
        if (hashlens[k] > 0
           && total_features > 0)
        {
            //     Note that we don't normalize the probabilities yet- we do
            //     that down below.
            //
            //     .00397 is not a magic number - it's the random coincidence
            //     rate for 1 chance in 256, with run-length-squared boost.
            //     .00806 is the random coincidence rate for 7-bit characters.
            //
            //ptc[k] = ((0.0+square_hits[k] - (.00397 * hashlens[k] )));
            //      ptc[k] = ((0.0+square_hits[k] - (.00806 * hashlens[k] )))
            //        / hashlens[k];

            //      ptc[k] = (0.0+square_hits[k] ) / hashlens[k];
            //      ptc[k] = (0.0+ quad_hits[k] ) / hashlens[k];
            ptc[k] = (0.0 + quad_hits[k]) / linear_hits[k];

            if (ptc[k] < 0)
                ptc[k] = 10 * DBL_MIN;
        }
        else
        {
            ptc[k] = 0.5;
        }
    }


    //    ptc[k] = (sqrt (0.0 + square_hits[k])-linear_hits[k] ) / hashlens[k] ;
    //    ptc[k] =  (0.0 + square_hits[k] - linear_hits[k] ) ;
    //    ptc[k] =  ((0.0 + square_hits[k]) / hashlens[k]) ;
    //    ptc[k] = sqrt ((0.0 + square_hits[k]) / hashlens[k]) ;
    //    ptc[k] = ((0.0 + linear_hits[k]) / hashlens[k]) ;


    //   calculate renormalizer (the Bayesian formula's denomenator)
    renorm = 0.0;

    //   now calculate the per-ptc numerators
    for (k = 0; k < maxhash; k++)
        renorm = renorm + (ptc[k]);

    //   check for a zero normalizer
    if (renorm == 0)
        renorm = 1.0;

    //  and renormalize
    for (k = 0; k < maxhash; k++)
        ptc[k] = ptc[k] / renorm;

    //   if we have underflow (any probability == 0.0 ) then
    //   bump the probability back up to 10^-308, or
    //   whatever a small multiple of the minimum double
    //   precision value is on the current platform.
    //
    for (k = 0; k < maxhash; k++)
    {
        if (ptc[k] < 10 * DBL_MIN)
            ptc[k] = 10 * DBL_MIN;
    }

    if (internal_trace)
    {
        for (k = 0; k < maxhash; k++)
        {
            fprintf(stderr,
                    " file: %d  linear: %lld  square: %lld  RMS: %6.4e  ptc[%d] = %6.4e\n",
                    k, (long long int)linear_hits[k], (long long int)square_hits[k],
                    sqrt(0.0 + square_hits[k]), k, ptc[k]);
        }
    }

    //  end of repeat-the-regex loop


    //  cleanup time!
    //  remember to let go of the fd's and mmaps
    for (k = 0; k < maxhash; k++)
    {
        crm_munmap_file(hashes[k]);
    }

    if (user_trace)
    {
        for (k = 0; k < maxhash; k++)
            fprintf(stderr, "Probability of match for file %d: %f\n", k, ptc[k]);
    }
    //
    tprob = 0.0;
    for (k = 0; k < succhash; k++)
        tprob = tprob + ptc[k];
    //
    //      Do the calculations and format some output, which we may or may
    //      not use... but we need the calculated result anyway.
    //
    if (1 /* svlen > 0 */)
    {
        char buf[1024];
        double accumulator;
        double remainder;
        double overall_pR;
        int m;
        buf[0] = 0;
        accumulator = 10 * DBL_MIN;
        for (m = 0; m < succhash; m++)
        {
            accumulator += ptc[m];
        }
        remainder = 10 * DBL_MIN;
        for (m = succhash; m < maxhash; m++)
        {
            remainder += ptc[m];
        }
        overall_pR = log10(accumulator) - log10(remainder);

        //   note also that strcat _accumulates_ in stext.
        //  There would be a possible buffer overflow except that _we_ control
        //   what gets written here.  So it's no biggie.

        if (tprob > 0.5)
        {
            sprintf(buf, "CLASSIFY succeeds; (correlate) success probability: %6.4f  pR: %6.4f\n", tprob, overall_pR);
        }
        else
        {
            sprintf(buf, "CLASSIFY fails; (correlate) success probability: %6.4f  pR: %6.4f\n", tprob, overall_pR);
        }
        if (strlen(stext) + strlen(buf) <= stext_maxlen)
            strcat(stext, buf);

        //   find best single matching file
        //
        bestseen = 0;
        for (k = 0; k < maxhash; k++)
        {
            if (ptc[k] > ptc[bestseen])
            {
                bestseen = k;
            }
        }
        remainder = 10 * DBL_MIN;
        for (m = 0; m < maxhash; m++)
        {
            if (bestseen != m)
            {
                remainder += ptc[m];
            }
        }

        //   ... and format some output of best single matching file
        //
        snprintf(buf, WIDTHOF(buf), "Best match to file #%d (%s) "
                                    "prob: %6.4f  pR: %6.4f\n",
                 bestseen,
                 hashname[bestseen],
                 ptc[bestseen],
                 (log10(ptc[bestseen]) - log10(remainder)));
        buf[WIDTHOF(buf) - 1] = 0;
        if (strlen(stext) + strlen(buf) <= stext_maxlen)
            strcat(stext, buf);
        sprintf(buf, "Total features in input file: %d\n", hashlens[bestseen]);
        if (strlen(stext) + strlen(buf) <= stext_maxlen)
            strcat(stext, buf);

        //     Now do the per-file breakdowns:
        //
        for (k = 0; k < maxhash; k++)
        {
            int m;
            remainder = 10 * DBL_MIN;
            for (m = 0; m < maxhash; m++)
            {
                if (k != m)
                {
                    remainder += ptc[m];
                }
            }
            snprintf(buf, WIDTHOF(buf),
                     "#%d (%s):"
                     " features: %d, L1: %lld L2: %lld L3: %lld, L4: %lld prob: %3.2e, pR: %6.2f\n",
                     k,
                     hashname[k],
                     hashlens[k],
                     (long long int)linear_hits[k],
                     (long long int)square_hits[k],
                     (long long int)cube_hits[k],
                     (long long int)quad_hits[k],
                     ptc[k],
                     (log10(ptc[k]) - log10(remainder)));
            buf[WIDTHOF(buf) - 1] = 0;
            // strcat (stext, buf);
            if (strlen(stext) + strlen(buf) <= stext_maxlen)
                strcat(stext, buf);
        }
        // check here if we got enough room in stext to stuff everything
        // perhaps we'd better rise a nonfatalerror, instead of just
        // whining on stderr
        if (strcmp(&(stext[strlen(stext) - strlen(buf)]), buf) != 0)
        {
            nonfatalerror("WARNING: not enough room in the buffer to create "
                          "the statistics text.  Perhaps you could try bigger "
                          "values for MAX_CLASSIFIERS or MAX_FILE_NAME_LEN?",
                          " ");
        }
        if (svlen > 0)
        {
            crm_destructive_alter_nvariable(svrbl, svlen,                    stext, (int)strlen(stext), csl->calldepth);
        }
    }

    //
    //  Free the hashnames, to avoid a memory leak.
    //
    for (i = 0; i < maxhash; i++)
        free(hashname[i]);
    if (tprob <= 0.5)
    {
        if (user_trace)
            fprintf(stderr, "CLASSIFY was a FAIL, skipping forward.\n");
        //    and do what we do for a FAIL here
        CRM_ASSERT(csl->cstmt >= 0);
        CRM_ASSERT(csl->cstmt <= csl->nstmts);
#if defined(TOLERATE_FAIL_AND_OTHER_CASCADES)
        csl->next_stmt_due_to_fail = csl->mct[csl->cstmt]->fail_index;
#else
        csl->cstmt = csl->mct[csl->cstmt]->fail_index - 1;
#endif
        if (internal_trace)
        {
            fprintf(stderr, "CLASSIFY.CORRELATE is jumping to statement line: %d/%d\n", csl->mct[csl->cstmt]->fail_index, csl->nstmts);
        }
        CRM_ASSERT(csl->cstmt >= 0);
        CRM_ASSERT(csl->cstmt <= csl->nstmts);
        csl->aliusstk[csl->mct[csl->cstmt]->nest_level] = -1;
        return 0;
    }


    //
    //   all done... if we got here, we should just continue execution
    if (user_trace)
        fprintf(stderr, "CLASSIFY was a SUCCESS, continuing execution.\n");
// regcomp_failed:
    return 0;
}