int
ForcAdMedian(char *state, double *forecast)
{
	struct ad_median_state *s = (struct ad_median_state *)state;
	double forc;
	
	if(s->M_count == 0)
	{
		return(0);
	}
	
	if(s->M_count < s->max)
	{
		forc = FindMedian(s->M_array,
				  s->M_ts,
				  s->M_count,
				  s->artificial_time,
				  s->M_count);
	}
	else
	{
		forc = FindMedian(s->M_array,
				  s->M_ts,
				  s->M_count,
				  s->artificial_time,
				  s->win);
		
	}
	
	*forecast = forc;
	
	return(1);
}
int main()
{
    int *dizi;
    int elemanSayisi;
    int i,j;
    int temp;
    int pivot;

    printf("Diziniz kac elemanli olacak> ");
    scanf("%d",&elemanSayisi);

    dizi = (int *) malloc (elemanSayisi * sizeof(int));

    for(i=0;i<elemanSayisi;i++){
        printf("%d. elemani giriniz> ",i+1);
        scanf("%d",&dizi[i]);
    }


    FindMedian(dizi,0,elemanSayisi-1,elemanSayisi);

    for(i=0;i<elemanSayisi;i++)
        printf("%d ",dizi[i]);


    getch();
    free(dizi);
    return 0;
}
//driver program
main()
{
	struct node *head = NULL;
	int i;
	for (i = 5; i > 0; i--)//for loop for creating a linked list
	{
		insert(&head, i);
		printList(head);
		FindMedian(head);
	}
}
Exemple #4
0
void Test() {
    std::vector<int> vec;
    for (int i = 0; i < 50; i++)
        vec.push_back(rand() % 100);
    int median = FindMedian(vec);
    std::sort(vec.begin(), vec.end());
    std::cout << "Input: ";
    for (int i = 0; i < vec.size(); i++)
        std::cout << vec[i] << " ";
    std::cout << std::endl;
    std::cout << "Real Median is " << vec[24] << std::endl;
    std::cout << "Median I found is " << median << std::endl;
    assert(vec[24] == median);
}
void FindMedian(int dizi[], int l, int r,int elemanSayisi){

    int pivotAddress;
    int i = l;
    int j = r;

    if(l < r){
        pivotAddress = PivotChoose2(dizi,l,r);

        do{

            while((i < r) && (dizi[i] <= dizi[pivotAddress])) i++;
            while((j > l) && (dizi[j] > dizi[pivotAddress])) j--;
            Swap(&dizi[i],&dizi[j]);

        }while(i < j);

        Swap(&dizi[i],&dizi[j]);
        Swap(&dizi[pivotAddress],&dizi[j]);
        pivotAddress = j;

        if(pivotAddress == elemanSayisi/2 - 1){
            printf("\n\n");
            printf("Medyan: %d \n", dizi[pivotAddress] );
            printf("Medyan adresi: %d\n", pivotAddress);
        }
        else if(pivotAddress > elemanSayisi/2 - 1){
            FindMedian(dizi,l,pivotAddress-1,elemanSayisi);
        }
        else if(pivotAddress < elemanSayisi/2 - 1){
            FindMedian(dizi,pivotAddress+1,r,elemanSayisi);
        }

    }


}
double SimpleAutofocus::SharpnessAtZ(const double z)
{
   MMThreadGuard g(busyLock_);
   busy_ = true;
   Z(z);
   min1_ = 1.e8;
   min2_ = 1.e8;
   max1_ = -1.e8;
   max2_ = -1.e8;
   // the crop factor, median filter and 3x3 high-pass process follows the java implementation from Pakpoom Subsoontorn & Hernan Garcia  -- KH
   int w0 = 0, h0 = 0, d0 = 0;
   double sharpness = 0;
   pCore_->GetImageDimensions(w0, h0, d0);
   int width =  (int)(cropFactor_*w0);
   int height = (int)(cropFactor_*h0);
   int ow = (int)(((1-cropFactor_)/2)*w0);
   int oh = (int)(((1-cropFactor_)/2)*h0);
   const unsigned long thisSize = sizeof(*pSmoothedIm_)*width*height;
   if( thisSize != sizeOfSmoothedIm_)
   {
      if(NULL!=pSmoothedIm_)
         free(pSmoothedIm_);
      // malloc is faster than new...
      pSmoothedIm_ = (float*)malloc(thisSize);
      if(NULL!=pSmoothedIm_)
      {
         sizeOfSmoothedIm_ = thisSize;
      }
      else // todo throw out of here...
      {
         busy_=false;
         return sharpness;
      }
   }
   // copy from MM image to the working buffer
   ImgBuffer image(w0,h0,d0);
   //snap an image
   const unsigned char* pI = reinterpret_cast<const unsigned char*>(pCore_->GetImage());
   const unsigned short* pSInput = reinterpret_cast<const unsigned short*>(pI);
   int iindex;
   bool legalFormat = false;
   // to keep it simple always copy to a short array
   if( 0 != pSInput)
   {
      switch( d0)
      {
      case 1:
         legalFormat = true;
         if( sizeOfTempShortBuffer_ != sizeof(unsigned short)*w0*h0)
         {
            if( NULL != pShort_)
               free(pShort_);
            // malloc is faster than new...
            pShort_ = (unsigned short*)malloc( sizeof(unsigned short)*w0*h0);
            if( NULL!=pShort_)
            {
               sizeOfTempShortBuffer_ = sizeof(unsigned short)*w0*h0;
            }
         }
         for(iindex = 0; iindex < w0*h0; ++iindex)
         {
            pShort_[iindex] = pI[iindex];
         }
         break;
      case 2:
         legalFormat = true;
         if( sizeOfTempShortBuffer_ != sizeof(unsigned short)*w0*h0)
         {
            if( NULL != pShort_)
               free(pShort_);
            pShort_ = (unsigned short*)malloc( sizeof(unsigned short)*w0*h0);
            if( NULL!=pShort_)
            {
               sizeOfTempShortBuffer_ = sizeof(unsigned short)*w0*h0;
            }
         }
         for(iindex = 0; iindex < w0*h0; ++iindex)
         {
            pShort_[iindex] = pSInput[iindex];
         }
         break;
      default:
         break;
      }
   }
   if(legalFormat)
   {
      // calculate the standard deviation & mean
      long nPts = 0;
      mean_ = 0;
      double M2 = 0;
      double delta;
      // one-pass algorithm for mean and std from Welford / Knuth  - KH
      for (int i=0; i<width; i++)
      {
         for (int j=0; j<height; j++)
         {
            ++nPts;
            long value = pShort_[ow+i+ width*(oh+j)];
            delta = value - mean_;
            mean_ = mean_ + delta/nPts;
            M2 = M2 + delta*(value - mean_); // #This expression uses the new value of mean_
         }
      }
      //double variance_n = M2/nPts;
      double variance = M2/(nPts - 1);
      standardDeviationOverMean_ = 0.;
      double meanScaling = 1.;
      if( 0. != mean_)
      {
         standardDeviationOverMean_ = pow(variance,0.5)/mean_;
         meanScaling = 1./mean_;
      }
      LogMessage("N " + boost::lexical_cast<std::string,long>(nPts) + " mean " +  boost::lexical_cast<std::string,float>((float)mean_) + " nrmlzd std " +  boost::lexical_cast<std::string,float>((float)standardDeviationOverMean_) );
      // ToDO -- eliminate copy above.
      int x[9];
      int y[9];
      /*Apply 3x3 median filter to reduce shot noise*/
      for (int i=0; i<width; i++) {
         for (int j=0; j<height; j++) {
            float theValue;
            x[0]=ow+i-1;
            y[0]= (oh+j-1);
            x[1]=ow+i;
            y[1]= (oh+j-1);
            x[2]=ow+i+1;
            y[2]= (oh+j-1);
            x[3]=ow+i-1;
            y[3]=(oh+j);
            x[4]=ow+i;
            y[4]=(oh+j);
            x[5]=ow+i+1;
            y[5]=(oh+j);
            x[6]=ow+i-1;
            y[6]=(oh+j+1);
            x[7]=ow+i;
            y[7]=(oh+j+1);
            x[8]=ow+i+1;
            y[8]=(oh+j+1);
            // truncate the median filter window  -- duplicate edge points
            // this could be more efficient, we could fill in the interior image [1,w0-1]x[1,h0-1] then explicitly fill in the edge pixels.
            for(int ij =0; ij < 9; ++ij)
            {
               if( x[ij] < 0)
                  x[ij] = 0;
               else if( w0-1 < x[ij])
                  x[ij] = w0-1;
               if( y[ij] < 0)
                  y[ij] = 0;
               else if( h0-1 < y[ij])
                  y[ij] = h0-1;
            }
            std::vector<unsigned short> windo;
            for(int ij = 0; ij < 9; ++ij)
            {
               windo.push_back(pShort_[ x[ij] + w0*y[ij]]);
            }
            // N.B. this window filler as ported from java needs to have a pad guaranteed around the cropped image!!!! KH
            //windo[0] = pShort_[ow+i-1 + width*(oh+j-1)];
            //windo[1] = pShort_[ow+i+ width*(oh+j-1)];
            //windo[2] = pShort_[ow+i+1+ width*(oh+j-1)];
            //windo[3] = pShort_[ow+i-1+ width*(oh+j)];
            //windo[4] = pShort_[ow+i+ width*(oh+j)];
            //windo[5] = pShort_[ow+i+1+ width*(oh+j)];
            //windo[6] = pShort_[ow+i-1+ width*(oh+j+1)];
            //windo[7] = pShort_[ow+i+ width*(oh+j+1)];
            //windo[8] = pShort_[ow+i+1+ width*(oh+j+1)];
            // to reduce effect of bleaching on the high-pass sharpness measurement, i use the image normalized by the mean - KH.
            theValue = (float)((double)FindMedian(windo)*meanScaling);
            pSmoothedIm_[i + j*width] = theValue;
            // the dynamic range of the normalized image is a very strong function of the image sharpness, also  - KH
            // here I'm using dynamic range of the median-filter image
            // a faster measure could skip the median filter, but use the sum of the 5 - 10 highest and 5 - 10 lowest normalized pixels
            // average over a couple of points to lessen effect of fluctuations & noise
            // todo - make the active measure of image sharpness user-selectable
            // save the  max points and the min points
            if( theValue < min1_ )
            {
               min2_ = min1_;
               min1_ = theValue;
            }
            else if (theValue < min2_)
            {
               min2_=theValue;
            }
            if( max1_ < theValue)
            {
               max2_ = max1_;
               max1_ = theValue;
            }
            else if (max2_ < theValue )
            {
               max2_=theValue;
            }
         }
      }
      /*Edge detection using a 3x3 filter: [-2 -1 0; -1 0 1; 0 1 2]. Then sum all pixel values. Ideally, the sum is large if most edges are sharp*/
      for (int k=1; k<width-1; k++) {
         for (int l=1; l<height-1; l++)
         {
            double convolvedValue = -2.0*pSmoothedIm_[k-1 + width*(l-1)] - pSmoothedIm_[k+ width*(l-1)]-pSmoothedIm_[k-1 + width*l]+pSmoothedIm_[k+1 + width*l]+pSmoothedIm_[k+ width*(l+1)]+2.0*pSmoothedIm_[k+1+ width*(l+1)];
            sharpness = sharpness + convolvedValue*convolvedValue;
         }
      }
      //free(pShort);
   }
   busy_ = false;
   latestSharpness_ = sharpness;
   pPoints_->InsertPoint(acquisitionSequenceNumber_++,(float)z,(float)mean_,(float)standardDeviationOverMean_,latestSharpness_,(float)( 0.5*((max1_+max2_)-(min1_+min2_))));
   return sharpness;
}
Exemple #7
0
double GetScore(unsigned short* img, int w0, int h0, double cropFactor)
{
   unsigned short windo[9];
   int width =  (int)(cropFactor * w0);
   int height = (int)(cropFactor * h0);
   int ow = (int)(((1-cropFactor)/2)*w0);
   int oh = (int)(((1-cropFactor)/2)*h0);
   unsigned short* smoothedImage = new unsigned short[width*height];
   // calculate the standard deviation & mean
   long nPts = 0;
   double mean = 0;
   double M2 = 0;
   double delta;
   // one-pass algorithm for mean and std from Welford / Knuth  - KH
   for (int i=0; i<width; i++)
   {
      for (int j=0; j<height; j++)
      {
         ++nPts;
         long value = img[ow+i+ width*(oh+j)];
         delta = value - mean;
         mean = mean + delta/nPts;
         M2 = M2 + delta*(value - mean); // #This expression uses the new value of mean_
      }
   }
   //double variance_n = M2/nPts;
   double variance = M2/(nPts - 1);
   double stdOverMean = 0.;
   double meanScaling = 1.;
   if( 0. != mean)
   {
      stdOverMean = pow(variance,0.5)/mean;
      meanScaling = 1./mean;
   }
   //LogMessage("N " + boost::lexical_cast<std::string,long>(nPts) + " mean " +  boost::lexical_cast<std::string,float>((float)mean_) + " nrmlzd std " +  boost::lexical_cast<std::string,float>((float)standardDeviationOverMean_) );
   // ToDO -- eliminate copy above.
   int x[9];
   int y[9];
   /*Apply 3x3 median filter to reduce shot noise*/
   for (int i=0; i<width; i++)
   {
      for (int j=0; j<height; j++)
      {
         float theValue;
         x[0]=ow+i-1;
         y[0]= (oh+j-1);
         x[1]=ow+i;
         y[1]= (oh+j-1);
         x[2]=ow+i+1;
         y[2]= (oh+j-1);
         x[3]=ow+i-1;
         y[3]=(oh+j);
         x[4]=ow+i;
         y[4]=(oh+j);
         x[5]=ow+i+1;
         y[5]=(oh+j);
         x[6]=ow+i-1;
         y[6]=(oh+j+1);
         x[7]=ow+i;
         y[7]=(oh+j+1);
         x[8]=ow+i+1;
         y[8]=(oh+j+1);
         // truncate the median filter window  -- duplicate edge points
         // this could be more efficient, we could fill in the interior image [1,w0-1]x[1,h0-1] then explicitly fill in the edge pixels.
         for(int ij =0; ij < 9; ++ij)
         {
            if( x[ij] < 0)
               x[ij] = 0;
            else if( w0-1 < x[ij])
               x[ij] = w0-1;
            if( y[ij] < 0)
               y[ij] = 0;
            else if( h0-1 < y[ij])
               y[ij] = h0-1;
         }
         for(int ij = 0; ij < 9; ++ij)
         {
            windo[ij] = img[x[ij] + w0*y[ij]];
         }
         // N.B. this window filler as ported from java needs to have a pad guaranteed around the cropped image!!!! KH
         //windo[0] = pShort_[ow+i-1 + width*(oh+j-1)];
         //windo[1] = pShort_[ow+i+ width*(oh+j-1)];
         //windo[2] = pShort_[ow+i+1+ width*(oh+j-1)];
         //windo[3] = pShort_[ow+i-1+ width*(oh+j)];
         //windo[4] = pShort_[ow+i+ width*(oh+j)];
         //windo[5] = pShort_[ow+i+1+ width*(oh+j)];
         //windo[6] = pShort_[ow+i-1+ width*(oh+j+1)];
         //windo[7] = pShort_[ow+i+ width*(oh+j+1)];
         //windo[8] = pShort_[ow+i+1+ width*(oh+j+1)];
         // to reduce effect of bleaching on the high-pass sharpness measurement, i use the image normalized by the mean - KH.
         theValue = (float)((double)FindMedian(windo,8)*meanScaling);
         smoothedImage[i + j*width] = (unsigned short)theValue;
         // the dynamic range of the normalized image is a very strong function of the image sharpness, also  - KH
         // here I'm using dynamic range of the median-filter image
         // a faster measure could skip the median filter, but use the sum of the 5 - 10 highest and 5 - 10 lowest normalized pixels
         // average over a couple of points to lessen effect of fluctuations & noise
         // todo - make the active measure of image sharpness user-selectable
         // save the  max points and the min points
         double min1 = 1.e8;
         double min2 = 1.e8;
         double max1 = -1.e8;
         double max2 = -1.e8;
         if( theValue < min1 )
         {
            min2 = min1;
            min1 = theValue;
         }
         else if (theValue < min2)
         {
            min2 = theValue;
         }
         if( max1 < theValue)
         {
            max2 = max1;
            max1 = theValue;
         }
         else if (max2 < theValue )
         {
            max2 = theValue;
         }
      }
   }
   /*Edge detection using a 3x3 filter: [-2 -1 0; -1 0 1; 0 1 2]. Then sum all pixel values. Ideally, the sum is large if most edges are sharp*/
   double sharpness(0.0);
   for (int k=1; k<width-1; k++) {
      for (int l=1; l<height-1; l++)
      {
         double convolvedValue = -2.0*smoothedImage[k-1 + width*(l-1)] - smoothedImage[k+ width*(l-1)]-smoothedImage[k-1 + width*l]+smoothedImage[k+1 + width*l]+smoothedImage[k+ width*(l+1)]+2.0*smoothedImage[k+1+ width*(l+1)];
         sharpness = sharpness + convolvedValue*convolvedValue;
      }
   }
   delete[] smoothedImage;
   return sharpness;
}
void
UpdateAdMedian(char *state, double ts, double value) 
{
	struct ad_median_state *s = (struct ad_median_state *)state;
	int curr_size;
	int win;
	double less_val;
	double eq_val;
	double more_val;
	double less_err;
	double eq_err;
	double more_err;
	int lo_offset;
	int hi_offset;
	

	curr_size = F_COUNT(s->series);

	/*
	 * M_size is the current window size, and M_count is the
	 * current amount of data in the median buffer
	 */
	
	if(curr_size > s->max)
	{
		s->M_count = curr_size = s->max;
	}
	else
	{
		s->M_count = curr_size;
	}

	/*
	 * update the sorted list
	 */
	
	/*
	 * increment the artificial time stamp
	 */
	s->artificial_time = s->artificial_time + 1;

	/*
	 * use artificial time stamp instead of real one to
	 * keep things in terms of entries instead of seconds
	 */
	MSort(s->M_array,s->M_ts,value,s->artificial_time,curr_size);
	
	
	/*
	 * calculate the window based on how much data there is
	 */
	if(curr_size > s->win)
	{
		win = s->win;
	}
	else
	{
		win = curr_size;
	}
	/*
	 * find the median using the current
	 * window size
	 */
	eq_val = FindMedian(s->M_array,
			    s->M_ts,
			    s->M_count,
			    s->artificial_time,
			    win);
	
	/*
	 * we want to wait until there is enough data before we start
	 * to adapt.  We don't start to adjust s->win until there is
	 * enough data to get out to the max window size
	 */
	if(curr_size < s->max)
	{
		return;
	}
	
	if((win - s->offset) < 0)
	{
		lo_offset = win - 1;
	}
	else
	{
		lo_offset = s->offset;
	}
	
	if((win + s->offset) > s->M_count)
	{
		hi_offset = s->M_count - win - 1;
	}
	else
	{
		hi_offset = s->offset;
	}
	
	/*
	 * find the median for a smaller window -- offset
	 * controls how much smaller or bigger the window should be
	 * that we consider
	 */
	less_val = FindMedian(s->M_array,
			      s->M_ts,
			      s->M_count,
			      s->artificial_time,
			      lo_offset);

	/*
	 * find the median for a bigger window -- offset
	 * controls how much smaller or bigger the window should be
	 * that we consider
	 */
	more_val = FindMedian(s->M_array,
			      s->M_ts,
			      s->M_count,
			      s->artificial_time,
			      hi_offset);
	
	/*
	 * now, calculate the errors
	 */
	less_err = (value - less_val) * (value - less_val);
	more_err = (value - more_val) * (value - more_val);
	eq_err = (value - eq_val) * (value - eq_val);
	
	/*
	 * adapt the window according to the direction giving us the
	 * smallest error
	 */
	if(less_err < eq_err)
	{
		if(less_err < more_err)
		{
			win = win - 1;
		}
		else if(more_err < eq_err)
		{
			win = win + 1;
		}
	}
	else if(more_err < eq_err)
	{
		if(more_err < less_err)
		{
			win = win + 1;
		}
		else if(less_err < eq_err)
		{
			win = win - 1;
		}
	}

	s->win = win;

	return;
}