Пример #1
0
void VideoGeneration::on_generateVideoPushButton_clicked()
{
	//string camPath = "C:\\Users\\dehandecroos\\Desktop\\Videos\\PRG28.avi";
	QString profileId = ui.profileName_lineEdit->text();


	
	string cameraIds[] = { 
		"camera_node_6_log", 
		"camera_node_1_log", 
		"camera_node_28_log",
		"camera_node_23_log"
	};
	int cameraIdsSize = sizeof(cameraIds) / sizeof(*cameraIds);
	string finalJoinQuery = "";
	int i = 1;
	for (string cameraId : cameraIds)
	{
		finalJoinQuery += "select * from " + cameraId + " where profile_id='" + profileId.toStdString() + "'";
		if (i++ != cameraIdsSize) {
			finalJoinQuery += "union ";
		}
	}
	finalJoinQuery += "order by TimeStamp";
	
	struct CameraTimeStamp{
		string cameraId;
		double timestamp;
	};
	
	
	stmt->execute("USE camera");
	ResultSet *timeStampsForProfile = stmt->executeQuery(finalJoinQuery);
	vector<CameraTimeStamp> timeStamps;
	
	while (timeStampsForProfile->next())
	{
		CameraTimeStamp timeStamp;
		timeStamp.cameraId = timeStampsForProfile->getString("Video_ID");
		timeStamp.timestamp = timeStampsForProfile->getDouble("TimeStamp");
		timeStamps.push_back(timeStamp);
	}

	
	vector<Mat> video;
	for (CameraTimeStamp ts : timeStamps)
	{
		string camPath = "C:\\AdaptiveCameraNetworkPack\\Videos\\";
		string camId = ts.cameraId;
		camPath += "PRG" + camId + ".avi";
		VideoCapture cam;
		cam.open(camPath);
		int frameRate = cam.get(CV_CAP_PROP_FPS);

		int minutes = (int)ts.timestamp;
		int seconds = (int)((ts.timestamp-(double)minutes)*100.0);
		int milis = ((ts.timestamp - (double)minutes)*100.0-seconds)*1000;
		
		int milliseconds = (minutes * 60 + seconds) * 1000 + milis;
		qDebug() << "Extracted Frames for time " + QString::number(ts.timestamp) + ", in camera " + QString::fromStdString(camId);
		cam.set(CV_CAP_PROP_POS_MSEC, milliseconds);
		
		
		for (int frameCount = 0; frameCount < frameRate; frameCount++)
		{
			Mat frame;
			cam >> frame;
			if (frame.empty())
			{
				break;
			}
			int fontFace = FONT_HERSHEY_SIMPLEX;
			double fontScale = 1;
			int thickness = 3;
			cv::Point textOrg1(10, 50);
			putText(frame, "CAM:" + ts.cameraId, textOrg1, fontFace, fontScale, Scalar::all(0),2);
			cv::Point textOrg2(500, 50);
			video.push_back(frame);
		}
		
		

		//VideoCapture

	}

	if (video.size() == 0){
		QImage finalImage(ui.lblOutput->width(), ui.lblOutput->width(), QImage::Format_RGB888);
		QPainter qp(&finalImage);
		qp.setBrush(Qt::black);
		qp.setPen(Qt::red);
		qp.setFont(QFont("Times", 12, QFont::Bold));
		qp.drawText(finalImage.rect(), Qt::AlignCenter, "NO VIDEO FOR "+ profileId);
		ui.lblOutput->setPixmap(QPixmap::fromImage(finalImage));
	}
	else
	{
		for (Mat frameZ : video)
		{
			Mat frameForQimage;
			cvtColor(frameZ, frameForQimage, CV_BGR2RGB);
			QImage outImage((uchar*)frameForQimage.data, frameForQimage.cols, frameForQimage.rows, frameZ.step, QImage::Format_RGB888);
			ui.lblOutput->setPixmap(QPixmap::fromImage(outImage));
			imshow("Output", frameZ);
			cvWaitKey(1);

		}
	}
}
Пример #2
0
void	PM_dem::detect( Mat &img, float score_thresh, bool show_hints, bool show_img, string save_img )
{
	if( score_thresh==DEFAULT_THRESH )
		model.threshing = model.thresh;
	else
		model.threshing = score_thresh;

	hints = show_hints;

	// 1. Feature pyramid <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
	prag_start = yuGetCurrentTime('M');
	if( hints ){
		printf("Calculating feature pyramid ...\n");
		start_clock = prag_start;
	}

	featpyramid2( img, model, pyra );	

	if( hints ){
		end_clock = yuGetCurrentTime('M');
		printf("Time for _featpyramid is %gs\n",(end_clock-start_clock)/1000.f);		
	}

	// 2. Compute PCA projection of the feature pyramid <<<<<<<<<<<<<<
	if( hints ){
		printf("Compute PCA projection of the feature pyramid ...\n");
		start_clock = end_clock;
	}
	
	/*Mat Ctest = model.C2(Rect(0,155,32,1));
	cout<<Ctest;*/

	//project_pyramid( model, pyra );

	
	if( hints ){
		end_clock = yuGetCurrentTime('M');
		printf("Time for _project_pyramid() is %gs\n",(end_clock-start_clock)/1000.f);
	}

	if (hints)
	{
		end_clock = start_clock;
		printf("QT\n");
	}

	qtpyra(model,pyra);
	
	if (hints)
	{
		end_clock = yuGetCurrentTime('M');
		printf("%gs\n",(end_clock-start_clock)/1000.f);
	}
	
	if( pyra.num_levels!=pyra.feat.size() ){
		printf("pyra.num_levels!=pyra.feat.size()\n");
		throw	runtime_error("");	
	}


	// 3. Precompute location/scale scores <<<<<<<<<<<<<<<<<<<<<<<
	Mat	loc_f = loc_feat( model, pyra.num_levels );
	pyra.loc_scores.resize( model.numcomponents );
	for( int c=0; c<model.numcomponents; c++ ){
		Mat	loc_w( 1, model.loc[c].w.size(), CV_32FC1, &(model.loc[c].w[0]) ); // loc_w = model.loc[c].w 
		pyra.loc_scores[c] = loc_w * loc_f; 
	}

	// 4. Gather PCA root filters for convolution <<<<<<<<<<<<<<<<<<<
	if( hints ){
		printf("Gathering PCA root filters for convolution ...\n");
		start_clock = end_clock;
	}

	if( rootscores[0].size()!=pyra.num_levels ){
		vector<Mat>	tmp_rootscores(pyra.num_levels);
		rootscores.assign(model.numcomponents,tmp_rootscores);
	}	

//	ofstream f1("pj.txt");

	int	numrootlocs = 0;
	int	s = 0; // will hold the amount of temp storage needed by cascade()
	for( int i=0; i<pyra.num_levels; i++ ){
		s += pyra.feat[i].rows * pyra.feat[i].cols;
		if( i<model.interval )
			continue;
		static vector<Mat>	scores;
		//fconv( pyra.projfeat[i], rootfilters, 0, numrootfilters, scores );
		fconv_root_qt(model, pyra.qtfeat[i], scores);
		for( int c=0; c<model.numcomponents; c++ ){
			int u = model.components[c].rootindex;
			int v = model.components[c].offsetindex;
			float	tmp = model.offsets[v].w + pyra.loc_scores[c].at<float>(i);
			rootscores[c][i] = scores[u] + Scalar(tmp);
			numrootlocs += scores[u].total();
		}
	}
	cout<<numrootlocs<<endl;
	s = s * model.partfilters.size();

	if( hints ){
		end_clock = yuGetCurrentTime('M');
		printf("Time for gathering PCA root filters is %gs\n",(end_clock-start_clock)/1000.f);
	}

	// 5. Cascade detection in action <<<<<<<<<<<<<<<<<<<<<<<
	if( hints ){
		printf("Cascade detection in action ...\n");
		start_clock = end_clock;
	}

	//Mat coords = cascade(model, pyra, rootscores, numrootlocs, s);
	Mat coords = cascade_qt(model, pyra, rootscores, numrootlocs, s);
	//cout<<coords;
	//cout<<"??"<<endl;

	if( hints ){
		end_clock = yuGetCurrentTime('M');
		printf("Time for _cascade() is %gs\n",(end_clock-start_clock)/1000.f);
		if( coords.empty() ){
			printf("No Detection!\n");
			return;
		}
	}

	// 6. Detection results <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
	Mat boxes = getboxes( model, img_color, coords );
	Mat x1 = boxes.col(0);
	Mat y1 = boxes.col(1);
	Mat x2 = boxes.col(2);
	Mat y2 = boxes.col(3);
	Mat Score = boxes.col( boxes.cols-1 );	
	detections.resize( x1.rows );
	for( int i=0; i<x1.rows; i++ ){
		detections[i][0] = x1.at<float>(i);
		detections[i][1] = y1.at<float>(i);
		detections[i][2] = x2.at<float>(i);
		detections[i][3] = y2.at<float>(i);
		detections[i][4] = Score.at<float>(i);
	}

	if( hints ){
		prag_end = yuGetCurrentTime('M');
		printf("Total detection time is : %gs\n",(prag_end-prag_start)/1000.f);
	}

	// 6. Draw and show <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<	
	if( show_img || !save_img.empty() ){
		//showboxes( img_color, boxes );

		//
		const int fontFace = CV_FONT_HERSHEY_PLAIN;
		const double fontScale = 1;		
		const Scalar drawColor = CV_RGB(255,0,0);
		const Scalar fontColor = CV_RGB(30,250,150);
		//
		for( int i=0; i!=detections.size(); i++ ){
			float		x1 = detections[i][0], y1 = detections[i][1], x2 = detections[i][2], y2 = detections[i][3];
			float		_score = detections[i][4];			
			//
			Point2f		UL( x1, y1 );
			Point2f		BR( x2, y2 );
			rectangle( img_color, UL, BR, drawColor, 2 );
			printf("----------------------------\n");
			printf("%g  %g  %g  %g  %g\n", x1, y1, x2, y2, _score );
			//
			x1 = int(x1*10+0.5) / 10.f; // ½ö±£Áô1λСÊý
			y1 = int(y1*10+0.5) / 10.f;
			x2 = int(x2*10+0.5) / 10.f;
			y2 = int(y2*10+0.5) / 10.f;
			_score = int(_score*100+0.5) / 100.f;			
			//
			char	buf[50] = { 0 };
			sprintf_s( buf, 50, "%d", i );
			string   text = buf;
			int		  baseline = 0;
			Size	  textSize = getTextSize( text, fontFace, fontScale, 1, &baseline );
			Point2f   textOrg2( x1, y1+textSize.height+2 );
			putText( img_color, text, textOrg2, fontFace, fontScale, fontColor );
			//				
			sprintf_s( buf, 50, "%d %g %g %g %g %g", i, x1, y1, x2, y2, _score );
			text = buf;			
			textSize = getTextSize( text, fontFace, fontScale, 1, &baseline );
			Point2f	  textOrg(5,(i+1)*(textSize.height+3));
			putText( img_color, text, textOrg, fontFace, fontScale,	fontColor );
		}
		{
			char	buf[30] = { 0 };
			sprintf_s( buf, 30, "time : %gs", (prag_end-prag_start)/1000.f );
			string		time_text = buf;
			int			baseline = 0;
			Size		textSize = getTextSize( time_text, fontFace, fontScale, 1, &baseline );
			Point2f	time_orig(5,(detections.size()+1)*(textSize.height+3));
			putText( img_color, time_text, time_orig, fontFace, fontScale, fontColor );
		}
		if( show_img )
			imshow( "OK", img_color );		
		if( !save_img.empty() )
			imwrite( save_img, img_color );

	}
	
}
void Objectness::PCANet_pred(Mat img, vector<Mat> &testImg, vector<Rect> &bboxes, PCA_Train_Result* result, PCANet pcaNet){
	
	// cout << "\n ====== PCANet Testing ======= \n" << endl;	
	int testSIze = testImg.size();
	// cout << testSIze << endl;
	Hashing_Result* hashing_r;
	PCA_Out_Result *out;
	PCA_Out_Result *res;

	int coreNum = omp_get_num_procs();
	
#pragma omp parallel for default(none) num_threads(coreNum) private(out, res,hashing_r) shared(mySVM,libModel, pcaNet, testSIze, testImg, result, bboxes,img,color)
	for (int i = 0; i < testSIze; i++){
		out = new PCA_Out_Result;		
		out->OutImgIdx.push_back(0);
		out->OutImg.push_back(testImg[i]);
		PCA_output(res,out->OutImg, out->OutImgIdx, pcaNet.PatchSize,
			pcaNet.NumFilters[0], result->Filters[0], 2);
		for (int j = 1; j < pcaNet.NumFilters[1]; j++)
			res->OutImgIdx.push_back(j);
		delete out;
		out = new PCA_Out_Result;
		PCA_output(out, res->OutImg, res->OutImgIdx, pcaNet.PatchSize,
			pcaNet.NumFilters[1], result->Filters[1], 2);
		HashingHist(hashing_r,&pcaNet, out->OutImgIdx, out->OutImg);
		hashing_r->Features.convertTo(hashing_r->Features, CV_32F);
		delete res;
		delete out;

		// OpenCV SVM prediction 
		float pred =  mySVM.predict(hashing_r->Features/*, true*/);

		/*========== liblinear prediction =================*/
		//struct feature_node *svmVec;
		//svmVec = (struct feature_node *) malloc((hashing_r->Features.cols + 1)*sizeof(struct feature_node));
		//double *predictions = new double[hashing_r->Features.rows];
		//float *dataPtr = hashing_r->Features.ptr<float>(); // Get data from OpenCV Mat
		//double prob_est[2];  // Probability estimation
		//int r, c;
		//for (r = 0; r < hashing_r->Features.rows; r++)
		//{
		//	for (c = 0; c < hashing_r->Features.cols; c++)
		//	{
		//		svmVec[c].index = c + 1;  // Index starts from 1; Pre-computed kernel starts from 0
		//		svmVec[c].value = dataPtr[r*hashing_r->Features.cols + c];
		//	}
		//	svmVec[c].index = -1;   // End of line
		//}

		//double pred = predict(libModel, svmVec);
		//free(svmVec);
		//printf("pred: %f\n", pred);

		delete hashing_r;
		

#pragma omp critical 		
		if (pred < 3){
			//cout << imgnames[i] << endl;
			Rect box = bboxes[i]; 

			
			//printf("predict: %f\n", pred);

			/*========== save to patch =============*/
			//cv::Mat croppedImage;
			//img(box).copyTo(croppedImage);
			////imshow("asd", croppedImage);
			//char* patch_name = new char[30];
			//sprintf(patch_name, "%s%d%s", "patch/", k + 1, ".jpg");
			//imwrite(patch_name, croppedImage);
			//k++;
			
			rectangle(img, cvPoint(box.x, box.y), cvPoint(box.x + box.width, box.y + box.height), color, 1);

			// ========= add some text ============
			char	buf[50] = { 0 };
			if (pred == 1)
			sprintf_s(buf, 50, "right");
			else if (pred == 2)
			sprintf_s(buf, 50, "forward");
			else
			sprintf_s(buf, 50, "left");
			string   text = buf;
			int		  baseline = 0;
			Size	  textSize = getTextSize(text, fontFace, fontScale, 1, &baseline);
			Point2f   textOrg2(box.x, box.y + textSize.height + 2);
			putText(img, text, textOrg2, fontFace, fontScale, fontColor);
		}
		
	}
}