示例#1
0
int main()
{
	Global::Image *pImage = Global::DataLoad("../../../Resource/polygon2.bmp");
	Global::Image mImageParser(nWidthParser, nheightParser, Global::TYPE::BYTE, 1);
	Global::Image mImageParser_Temp(nWidthParser, nheightParser, Global::TYPE::BYTE, 1);
	Global::Image mImageParser2(nheightParser, nWidthParser,  Global::TYPE::BYTE, 1);

	Global::ReSize(pImage, &mImageParser);
	delete pImage;

	Global::Show("Test ..", &mImageParser);




	ProfTimer mProfTimer;
	while (1)
	{
		
		

		mProfTimer.Start();

		//Global::Reflection(&mImageParser, &mImageParser_Temp, Global::REFLECTION::VERTICAL);
		
		
		//unsigned char *data = mImageParser2.data.b; 
		//unsigned char *data2 = mImageParser.data.b;
		unsigned char *data = pData; 
		unsigned char *data2 = pDataCopy;
		for (long x = 0; x < uWidthLen; x++)
		{
			for (long y = 0; y < nheightParser; y++)
			{
				long lIndex(y * (uWidthLen)+x);
				memcpy(data, data2 + lIndex, 2);
				data += 2;

				//*(data) = *(data2 + lIndex);
				//*(data++) = *(data2 + lIndex + 1);
			}
		}
		
		mProfTimer.Stop();
		
		printf("mProfTimer : %f \n", mProfTimer.GetDurationInSecs());
		Global::Show("Test2 ..", &mImageParser2);
	}
	return 0;
}
示例#2
0
int main()
{
	ProfTimer mProfTimer;
	unsigned char *sTextData = TextData();


	float mArrVecParser_CSV[nWidthParser];
	float mArrVecParserCopy[nWidthParser];
	float mArrVecParserCopy2[nWidthParser];

	DataOpen("1909175498_image_profile.csv", mArrVecParser_CSV);
	//DataOpen("-1916029922_image_profile.csv", mArrVecParser_CSV);

	while (1)
	{

		memcpy(mArrVecParserCopy, mArrVecParser_CSV, sizeof(float)* nWidthParser);

		Global::Image mImageParser(nWidthParser, nheightParser, Global::TYPE::BYTE, 3);
		Global::Image mImageParser2(nWidthParser, nheightParser, Global::TYPE::BYTE, 3);
		Global::Image mImageParserReSize(img_wid, img_hig, Global::TYPE::BYTE, 3);

		for (int i = 0; i < nWidthParser; i++)
		{
			Global::Circle(&mImageParser, i, mArrVecParserCopy[i], 1, RGB8(0, 0, 255));
		}

#if 0
		int nXTest0 = 800;
		int nXTest1 = 900;
		int nTest;
		Sample sSample;
		sSample.nX0 = nXTest0;
		sSample.nX1 = nXTest1;
		Line(&mImageParser, mArrVecParserCopy, sSample,true);
		nXTest0 = 400;
		nXTest1 = 810;
		//Line(&mImageParser, nXTest0, nXTest1, mArrVecParserCopy, nTest, true);


		//DeletePoint(nWidthParser, nheightParser, 400, 700, mArrVecParserCopy);

#endif
#if 1



		int nLineCount(1);
		const int nSample = 1000;
		Sample sSample[nSample];
		while (nLineCount)
		{
			mProfTimer.Start();
			nLineCount--;

			int nArrNumderSize(0);
			int nSampleCount(0);
			memset(sSample, 0, sizeof(Sample)* nSample);

			int nMaxSample = 0;
			int nMaxSampleArrNum = 0;
			while (nSample - nArrNumderSize)
			{
				float fCutLine = nheightParser / 1.2;
				int nRandX0 = (std::rand() % nWidthParser);//중복은 나중에 고려
				int nRandX1 = (std::rand() % nWidthParser);//중복은 나중에 고려


				if (nRandX0 == nRandX1 || mArrVecParserCopy[nRandX0] < fCutLine || mArrVecParserCopy[nRandX1] < fCutLine)
				{
					continue;
				}

				sSample[nSampleCount].nX0 = nRandX0;
				sSample[nSampleCount].nX1 = nRandX1;
				if (Line(&mImageParser, mArrVecParserCopy, sSample[nSampleCount]))
				{
					if (sSample[nSampleCount].nSn > nMaxSample && sSample[nSampleCount].nSn > 10)
					{
						nMaxSample = sSample[nSampleCount].nSn;
						nMaxSampleArrNum = nSampleCount;
						nSampleCount++;
						//printf(" nMaxSampleArrNum : %d \n", nMaxSampleArrNum);
					}

					nArrNumderSize++;
					
				}
				


			}
			//mVecParserCopy

		
			if (nSampleCount > 0)
			{
				Line(0, mArrVecParserCopy, sSample[nMaxSampleArrNum], true);
				memcpy(mArrVecParserCopy2, mArrVecParser_CSV, sizeof(float)* nWidthParser);
				ReAgainst(nWidthParser, nheightParser, sSample[nMaxSampleArrNum], mArrVecParserCopy2);
			}
			mProfTimer.Stop();
			printf("mProfTimer : %f \n", mProfTimer.GetDurationInSecs());


			if (nSampleCount > 0)
			{
				Line(&mImageParser, mArrVecParserCopy, sSample[nMaxSampleArrNum], true);
				DeletePoint(nWidthParser, nheightParser, sSample[nMaxSampleArrNum], mArrVecParserCopy);


				for (int i = 0; i < nWidthParser; i++)
				{
					Global::Circle(&mImageParser2, i, mArrVecParserCopy2[i], 2, RGB8(0, 0, 255));
				}
			}
			//break;
		}
	




#endif
#if 0
		Global::Show("Parser ", &mImageParser);
		Global::Show("Test ", &mImageParser2);
#else	
		Global::Filter2D(&mImageParser2, &mImageParser2, Global::FILTER::GAUSSIAN, 1);
		Global::ReSize(&mImageParser2, &mImageParserReSize);
		Global::Show("Test ", &mImageParserReSize);

		Global::Filter2D(&mImageParser, &mImageParser, Global::FILTER::GAUSSIAN, 1);
		Global::ReSize(&mImageParser, &mImageParserReSize);
		Global::Show("Parser ", &mImageParserReSize);
#endif



		//Sleep(1);
	}

	delete sTextData;
	return 0;
}
/*
void CompareHistogram(Frame &pre,Frame &cur,Frame &next)
{
	vector<local*>::iterator pre_iter=cur.sa.begin();
    for (int i=0;i<cur.sa.size();i++)
    {
		cur.sa[i]->hist;
    }
	for (int i=0;i<4;i++)
	{
		for (int j=0;j<8;j++)
		{
			cur.sa[i*j+j]->hist;

		}
	}
	
}*/
int main(void)
{
	static long code=1000000;
	const static long start_frame=0;
	ofstream zhjg("zhjg.txt");
	ofstream logs("log.txt");
	list<vector<vector<ComponentFeature>>> video_Feature;
	CvCapture *input_video = cvCaptureFromFile(	
		"D:\\amit\\subway_exit_turnstiles.AVI"
		);
	if (input_video == NULL)
	{
		
		fprintf(stderr, "Error: Can't open video.\n");
		return -1;
	}
	CvSize frame_size;
	frame_size.height =(int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_HEIGHT );
	frame_size.width =(int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_WIDTH );

	/* Determine the number of frames in the AVI. */
	cvQueryFrame( input_video);
	long number_of_frames;
	number_of_frames=cvGetCaptureProperty(input_video,CV_CAP_PROP_FRAME_COUNT);
	cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES, 0);
    //cvNamedWindow("Optical Flow", CV_WINDOW_AUTOSIZE);
	long current_frame = start_frame;
	while(true)
	{
		static IplImage *frame = NULL, *frame1 = NULL, *frame1_1C = NULL, *frame2_1C = NULL,*frame1out=NULL,*frame2out=NULL;
		vector<IplImage*> frame1_1C_subregions;//将第一帧分成32个局部区域存储在这里
		vector<IplImage*> frame2_1C_subregions;//将第二帧分成32个局部区域存储在这里
		int width_step=0;
		//将一帧划分成frame_width*frame_height个子区域
		const int frame_width=8;
		const int frame_height=8;
		char framename[7];
		itoa(code+current_frame,framename,10);
		//code++;
		/* Go to the frame we want.  Important if multiple frames are queried in
		 * the loop which they of course are for optical flow.  Note that the very
		 * first call to this is actually not needed. (Because the correct position
		 * is set outsite the for() loop.)
		 */
		cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES, current_frame );

	    //获得第一帧
		frame = cvQueryFrame( input_video );
		if (frame == NULL)
		{
			/* Why did we get a NULL frame?  We shouldn't be at the end. */
			fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
			return -1;
		}
		 //current_frame++;
		/* Allocate another image if not already allocated.
		 * Image has ONE channel of color (ie: monochrome) with 8-bit "color" depth.
		 * This is the image format OpenCV algorithms actually operate on (mostly).
		 */

        //frame1_1C 光流法中的第一帧,单通道,深度为8
		utils::allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );
		/* Convert whatever the AVI image format is into OpenCV's preferred format.
		 * AND flip the image vertically.  Flip is a shameless hack.  OpenCV reads
		 * in AVIs upside-down by default.  (No comment :-))
		 */
		cvConvertImage(frame, frame1_1C,0);
	
		//frame1 彩色图像,用来在上面画线
		utils::allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );
		cvConvertImage(frame, frame1, 0);
		//cvShowImage("frame1",frame1);
		utils::allocateOnDemand( &frame1out, frame_size, IPL_DEPTH_8U, 3 );
		cvConvertImage(frame, frame1out, 0);
        
		/* 获得第二帧*/
		do 
		{
			frame = cvQueryFrame( input_video );
			if (frame == NULL)
			{
				fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
				return -1;
			}
			current_frame++;

		} while (current_frame%25!=0);
		
		//frame = cvQueryFrame( input_video );
		utils::allocateOnDemand( &frame2out, frame_size, IPL_DEPTH_8U, 3 );
		cvConvertImage(frame, frame2out, 0);

		//frame2_1C 光流法中的第二帧,单通道,深度为8
		utils::allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );
		cvConvertImage(frame, frame2_1C, 0);
	//	cvShowImage("frame2_1C",frame2_1C);

		ProfTimer t;
		t.Start();
	    
		vector<vector<ComponentFeature>> frame_components;//里面存储32个局部区域分别进行光流法运算之后得到的9维特征向量
		frame_components.reserve(frame_width*frame_height);//为frame_components在内存中分配frame_width*frame_height,优化vector速度
       frame1_1C_subregions.reserve(frame_width*frame_height);
       frame2_1C_subregions.reserve((frame_width*frame_height));
       //切分第一帧,存储在向量中
       frame1_1C_subregions= GetSubregions(frame1_1C,cvSize(frame_size.width/frame_width,frame_size.height/frame_height),width_step);
       //切分第二帧,存储在向量中
	   frame2_1C_subregions= GetSubregions(frame2_1C,cvSize(frame_size.width/frame_width,frame_size.height/frame_height),width_step);
	   //分别定义两个迭代器指向两个分片向量
	   vector<IplImage*> ::iterator iter_frame1_1C=frame1_1C_subregions.begin();
	   vector<IplImage*> ::iterator iter_frame2_1C=frame2_1C_subregions.begin();
	   //对每个子区域进行光流法
	   Opticalflow of;
	   int flag=0;
	   for (;iter_frame1_1C!=frame1_1C_subregions.end();iter_frame1_1C++,iter_frame2_1C++)
	   {
           vector<ComponentFeature> components;	//local_region一个整帧的1/64里面包含12个16*16的subregion
		   components.reserve(12);
		   //of.calOpticalFlowLK(frame1,*iter_frame1_1C,*iter_frame2_1C,flag,width_step,components);
		   of.calOpticalFlowVar(frame1,*iter_frame1_1C,*iter_frame2_1C,flag,width_step,components);
		   cvReleaseImage(&(*iter_frame1_1C));
		   cvReleaseImage(&(*iter_frame2_1C));
		   frame_components.push_back(components);//local_region加入到帧
		   vector<ComponentFeature>().swap(components);//清空components
		   flag++;
	   }
	  // cvShowImage("Optic",frame1);
	   //cvWaitKey(0);
	    
		video_Feature.push_back(frame_components);//将一帧加入视频流
		vector<vector<ComponentFeature>>().swap(frame_components);
	   
		t.Stop();
		cout<<"time=: "<<t.GetDurationInSecs ()*1000<<"(ms)"<<endl;
		
	//	current_frame++;
      //  cvWaitKey(1);
		
        //下面开始循环
		if (video_Feature.size()==10)
		{    
			vector<ComponentFeature> tran;
			tran.reserve(video_Feature.size());
			//整幅帧
			for (list<vector<vector<ComponentFeature>>>::iterator first=video_Feature.begin();first!=video_Feature.end();first++)
           {   //局部区域
			   for (vector<vector<ComponentFeature>>::iterator second=(*first).begin();second!=(*first).end();second++)
			   {
				   for (vector<ComponentFeature>::iterator third=(*second).begin();third!=(*second).end();third++)
				   {
					   
					   tran.push_back(*third);
					   
					  
				   }
			   }
           }
		
			Kmeans k(tran,9,10);
			k.start();
			Frame * f=new Frame[10];
		
			map<int,int> ::iterator result_iter=k.result.begin();
		
			for (;result_iter!=k.result.end();result_iter++)
			{
                  
		      CvScalar three=valueToScalar(result_iter->first-1);
       
              f[static_cast<int>(three.val[0])].sub[static_cast<int>(three.val[1])][static_cast<int>(three.val[2])]=result_iter->second;
			 

			}
			#ifdef DEBUG
			ofstream out("frame.txt");
           for (int i=0;i<10;i++)
			{
				out<<"################################frame"<<i<<endl;
				for (int j=0;j<24;j++)
				{
					for (int k=0;k<32;k++)
					{
						out<<f[i].sub[j][k]<<"\t";
					}
					out<<endl;
				}
			}
		   out.close();
            #endif

            for (int i=0;i<10;i++)
			{
				//f[i].height=frame_height;
				//f[i].width=frame_width;
				f[i].frame2locals(cvPoint(0,0));
			}
			//cout<<   f[9].label_result->data.fl[0];
			int label_result[frame_height][frame_width]={0};
			for (int k=0;k<9;k++)
			{
				mrf mrf1;

				//mrf1.GetBlock(f,10);
				mrf1.pre=&f[k];
				mrf1.cur=&f[k+1];

				mrf1.width=frame_width;
				mrf1.height=frame_height;
				mrf1.SetNoRegions(2);
				mrf1.InitOutImage();
				//mrf1.Gibbs();
				//mrf1.Metropolis();
				mrf1.ICM();
				for (int i=0;i<frame_height;i++)
				{
					for (int j=0;j<frame_width;j++)
					{
						//int s=f[9].label_result->data.fl[i*frame_width+j];
						label_result[i][j]+=
							f[k+1].label_result->data.fl[i*frame_width+j];
					}
				}

			}
			
			

			zhjg<<"##############################"<<current_frame<<endl;
		   
			for (int i=0;i<frame_height;i++)
			{   
				for (int j=0;j<frame_width;j++)
				{
                  zhjg<<label_result[i][j]<<"\t";
				  if ((label_result[i][j]<=3)&&i!=0&&i!=frame_height-1&&j!=0&&j!=frame_width-1)
				  {
					  logs<<current_frame<<endl;
					   int cvheight=frame_size.height/frame_height;
					   int cvwidth=frame_size.width/frame_width;
                       CvPoint startPoint=cvPoint(j*cvwidth,i*cvheight);
					   CvPoint endPoint =cvPoint((j+1)*cvwidth,(i+1)*cvheight);
					   cvRectangle(frame2out,startPoint,endPoint,CV_RGB(255,0,0),1,8,0);

				  }
				}
				zhjg <<endl;
			}
			zhjg<<endl;
          
			//处理掉分配的local对象
		    for (int i=0;i<10;i++)
			{
			   
			   vector<local*> ::iterator v_l_iter=f[i].sa.begin();
			   
			   for (;v_l_iter!=f[i].sa.end();v_l_iter++)
			   {
				 
				   delete (*v_l_iter);
			   }

			}
			
     	 
		 
			//break;
			//去掉最老的一帧
			video_Feature.pop_front();
		}
		//输出处理后的图像
        string end=".jpg";
		
		string fold="result\\";
		string path=fold+framename+end;
        cvSaveImage(path.c_str(),frame2out);
		/*int key_pressed;
		key_pressed = cvWaitKey(1);
		if (key_pressed=='Q'||key_pressed=='q')
		{
			break;
		}*/
		if (current_frame < start_frame)						
			current_frame = start_frame;
		//if (current_frame >= number_of_frames - 1)	//current_frame = number_of_frames - 2;
		if (current_frame>=number_of_frames-1)
		break;
   }
   zhjg.close();
   logs.close();
	
}