//更新场景图像 void MeasureMarkersWindow::updatePixmap(unsigned char* leftBuffer, unsigned char* rightBuffer) { QImage li = convertToQImage(leftBuffer); QImage ri = convertToQImage(rightBuffer); mLastLeft = li; mLastRight = ri; mLeftCameraPixmap->setPixmap(QPixmap::fromImage(li)); mRightCameraPixmap->setPixmap(QPixmap::fromImage(ri)); //更新图像 update(); }
void QOpenCvImageBox::putImage(IplImage *cvimage) { if (isHsvAllImageConvertedEnabled()) { // HSV conversion hsvReduce(cvimage); } // Objs detection if (isDetectionEnabled()) { // If I have a cascade file if (detected_objs_array!=NULL) { // release the memory delete [] detected_objs_array; } detected_objs_array=detect_objs(cvimage, storage, cascade, od_image_scale, objs_found, od_calc_time); draw_objs(cvimage, objs_found, detected_objs_array, CV_RGB(255,0,0), od_image_scale, 0, 0); // Sub Objs detection if (isSubDetectionEnabled()) { // If I have a cascade file for (int c=0; c<objs_found; c++) { // Finding the image part IplImage *cvSubImage=cvCreateImage(cvSize(detected_objs_array[0].width*od_image_scale, detected_objs_array[0].height*od_image_scale), IPL_DEPTH_8U, 3); // Adapt the rect to the scale CvRect scaledRect = cvRect(detected_objs_array[0].x*od_image_scale, detected_objs_array[0].y*od_image_scale, detected_objs_array[0].width*od_image_scale, detected_objs_array[0].height*od_image_scale); // Setting the ROI cvSetImageROI(cvimage, scaledRect); // Copying a sub part cvCopyImage(cvimage, cvSubImage); // Resetting the ROI cvResetImageROI(cvimage); if (detected_sub_objs_array!=NULL) { // release the memory delete [] detected_sub_objs_array; } detected_sub_objs_array=detect_objs(cvSubImage, sub_storage, sub_cascade, sub_od_image_scale, sub_objs_found, sub_od_calc_time); ui->imageLabel->setPixmap(QPixmap::fromImage(convertToQImage(cvSubImage))); draw_objs(cvimage, objs_found, detected_sub_objs_array, CV_RGB(0,255,0), sub_od_image_scale, scaledRect.x, scaledRect.y); } } } lucasKanadeOF(cvimage); // Image conversion ui->imageLabel->setPixmap(QPixmap::fromImage(convertToQImage(cvimage))); }
//捕获一张图像 void MeasureMarkersWindow::capture() { //先停止时钟 mTimer->stop(); static int COUNT = 0; COUNT++; QString name; name.sprintf("%d", COUNT); //先保存图像 QString leftname = mMeasureFolder + "left_" + name; QString rightname = mMeasureFolder + "right_" + name; mLastLeft.save(leftname + ".jpg"); mLastRight.save(rightname + ".jpg"); mImgWidth = mLastLeft.width(); mImgHeight = mLastLeft.height(); //读取图像,计算角点 cv::Mat li = cv::imread(leftname.toStdString() + ".jpg", CV_LOAD_IMAGE_UNCHANGED); cv::Mat ri = cv::imread(rightname.toStdString() + ".jpg", CV_LOAD_IMAGE_UNCHANGED); cv::Mat liGrey, riGrey; cv::cvtColor(li, liGrey, cv::COLOR_BGR2GRAY); cv::cvtColor(ri, riGrey, cv::COLOR_BGR2GRAY); std::vector<cv::Point2f> left_markers = circleDetect(liGrey); std::vector<cv::Point2f> right_markers = circleDetect(riGrey); for (int i = 0; i < left_markers.size(); i++) cv::circle(li, left_markers[i], 4, cv::Scalar(0, 0, 255), 3, 8, 0); for (int i = 0; i < right_markers.size(); i++) cv::circle(ri, right_markers[i], 4, cv::Scalar(0, 0, 255), 3, 8, 0); //保存绘制的角点图像 cv::imwrite(leftname.toStdString() + ".bmp", li); cv::imwrite(rightname.toStdString() + ".bmp", ri); if (left_markers.size() == 2 && right_markers.size() == 2) { //先暂停摄像头 preview(); //记录当前图像 mImgs.push_back(COUNT); //显示绘制角点的图像 QImage lp = convertToQImage(li); QImage rp = convertToQImage(ri); mLeftCameraPixmap->setPixmap(QPixmap::fromImage(lp)); mRightCameraPixmap->setPixmap(QPixmap::fromImage(rp)); //提示找到角点 mStatusBar->showMessage(QString::fromWCharArray(L"成功找到标记点")); int all = mImgs.size(); QString ta; ta.sprintf(" %d/%d", all, all); mCaps->setText(ta); mSlider->setMaximum(mImgs.size()); mSlider->setValue(mImgs.size()); updateButtonState(); return; } else { //删除保存的图像 QFile::remove(leftname + ".jpg"); QFile::remove(rightname + ".jpg"); //计数回到为初始值 COUNT--; //弹出对话框,提示没有找到角点 mStatusBar->showMessage(QString::fromWCharArray(L"没有找到标记点")); QMessageBox::StandardButton btn2 = QMessageBox::warning( 0, //父窗口 QString::fromWCharArray(L"没有找到标记点"), //标题栏 QString::fromWCharArray(L"在当前的图像中,没有找到对应的标记点")); //文本内容 //重新开始 mTimer->start(); } }
void BlobTrackingNode::process(const cv::Mat &img_input, const cv::Mat &img_mask, cv::Mat &img_output) { //This is output event QList<DetectedEvent> blobEvent; cv::Mat newInput; img_input.copyTo(newInput); if(img_input.empty() || img_mask.empty()){ return; } loadConfig(); if(firstTime){ saveConfig(); } IplImage* frame = new IplImage(img_input); cvConvertScale(frame, frame, 1, 0); IplImage* segmentated = new IplImage(img_mask); IplConvKernel* morphKernel = cvCreateStructuringElementEx(5, 5, 1, 1, CV_SHAPE_RECT, NULL); cvMorphologyEx(segmentated, segmentated, NULL, morphKernel, CV_MOP_OPEN, 1); cv::Mat temp = cv::Mat(segmentated); if(showBlobMask){ //cv::imshow("test",temp); ownerPlugin->updateFrameViewer("Tracking Mask",convertToQImage(temp)); } IplImage* labelImg = cvCreateImage(cvGetSize(frame), IPL_DEPTH_LABEL, 1); cvb::CvBlobs blobs; cvb::cvLabel(segmentated, labelImg, blobs); cvb::cvFilterByArea(blobs, minArea, maxArea); if(debugBlob){ cvb::cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_ANGLE|CV_BLOB_RENDER_TO_STD); } else{ cvb::cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_ANGLE); } cvb::cvUpdateTracks(blobs, tracks,threshold_distance, threshold_inactive); //At this point, we have assigned each blob in current frame in to a track. so we can iterate through active tracks, and // find out out blobs within one of those tracks. Following loop does that. This is helpfull to have more generalized idea about // relationship between blobs with increasing time. for (cvb::CvTracks::const_iterator track = tracks.begin(); track!=tracks.end(); ++track) { if((*track).second->active != 0){ for(std::map<cvb::CvLabel,cvb::CvBlob *>::iterator it = blobs.begin() ; it != blobs.end(); it++){ cvb::CvLabel label = (*it).first; cvb::CvBlob * blob = (*it).second; if((*track).second->label == label){ //qDebug()<< blob->minx <<","<<blob->miny; //This is smoothed time tracked blob lables blobEvent.append(DetectedEvent("blob",QString("%1,%2,%3,%4,%5,%6,%7,%8").arg(frameIndex).arg((*track).first).arg(blob->centroid.x).arg(blob->centroid.y).arg(blob->minx).arg(blob->miny).arg(blob->maxx).arg(blob->maxy),1.0)); } } } } if(debugTrack){ cvb::cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX|CV_TRACK_RENDER_TO_STD); } else{ cvb::cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX); } if(showFrameID){ cv::Mat temp = cv::Mat(frame); cv::putText(temp,QString("%1").arg(frameIndex).toStdString(),cv::Point(40,120),cv::FONT_HERSHEY_PLAIN,2,cv::Scalar(0,255,0),2); } if(showOutput){ cv::Mat temp = cv::Mat(frame); ownerPlugin->updateFrameViewer("Tracking Output",convertToQImage(temp)); //cvShowImage("Blob Tracking", frame); } cv::Mat img_result(frame); cv::putText(img_result,QString("%1").arg(QString::number(frameIndex)).toUtf8().constData(), cv::Point(10,30), CV_FONT_HERSHEY_PLAIN,1.0, CV_RGB(255,255,255)); img_result.copyTo(img_output); cvReleaseImage(&labelImg); delete frame; delete segmentated; cvReleaseBlobs(blobs); cvReleaseStructuringElement(&morphKernel); firstTime = false; frameIndex++; QImage input((uchar*)newInput.data, newInput.cols, newInput.rows, newInput.step1(), QImage::Format_RGB888); QImage output((uchar*)img_output.data, img_output.cols, img_output.rows, img_output.step1(), QImage::Format_RGB888); QList<QImage> images; images.append(input); images.append(output); emit generateEvent(blobEvent,images); //emit generateEvent(blobEvent); }