// ------------------------------------------------------------------------------------------------ void processXmlDocument(TiXmlHandle& handler, std::vector<Image>* array) { TiXmlHandle images = handler.FirstChild("response").FirstChild("data").FirstChild("images"); TiXmlHandle item_1 = images.Child("image", 0); TiXmlHandle item_2 = images.Child("image", 1); TiXmlHandle item_3 = images.Child("image", 2); Image image_1 = processImage(item_1); Image image_2 = processImage(item_2); Image image_3 = processImage(item_3); array->push_back(image_1); array->push_back(image_2); array->push_back(image_3); }
bool WICImageLoader::decodeImageData(ImageBlob blob, size_t size) { bool bRet = false; HRESULT hr = E_FAIL; IWICStream* pWicStream = NULL; IWICImagingFactory* pWicFactory = getWICFactory(); if(NULL != pWicFactory) { hr = pWicFactory->CreateStream(&pWicStream); } if(SUCCEEDED(hr)) { hr = pWicStream->InitializeFromMemory((BYTE*)blob, static_cast<DWORD>(size)); } IWICBitmapDecoder* pDecoder = NULL; if(SUCCEEDED(hr)) { hr = pWicFactory->CreateDecoderFromStream(pWicStream, NULL, WICDecodeMetadataCacheOnLoad, &pDecoder); } bRet = processImage(pDecoder); SafeRelease(&pWicStream); SafeRelease(&pDecoder); return bRet; }
void imageMsgConvert_rgb(const sensor_msgs::ImageConstPtr& msg) { cv_bridge::CvImagePtr cv_ptr; try { cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8); } catch (cv_bridge::Exception& e) { ROS_ERROR("cv_bridge exception: %s", e.what()); return; } rgb_img = cv::Mat(cv_ptr->image.size(), CV_8UC3); cv_ptr->image.copyTo(rgb_img); //process on input processImage(rgb_img); //image_sub_rgb.shutdown(); // Output modified video stream //image_pub_rgb.publish(cv_ptr->toImageMsg());cv::FileStorage file("some_name.ext", cv::FileStorage::WRITE); }
void CodeReaderThread::run() { m_mutex.lock(); QImage processingImage = QImage(m_image); m_mutex.unlock(); while (!m_quit) { // Image Processing: searching for codes QString code = processImage(processingImage); m_mutex.lock(); QImage image; if (!code.isEmpty()) image = processingImage.copy(); // Notify that the image processing is done emit processingFinished(image, code); m_condition.wait(&m_mutex); processingImage = QImage(m_image); m_mutex.unlock(); } }
tResult CurveDetector::OnPinEvent(IPin *source, tInt eventCore, tInt param1, tInt param2, IMediaSample *mediaSample) { RETURN_IF_POINTER_NULL(source); RETURN_IF_POINTER_NULL(mediaSample); if (eventCore == IPinEventSink::PE_MediaSampleReceived) { if (source == &this->videoInput) { if (this->isFirstFrame) { cObjectPtr<IMediaType> type; RETURN_IF_FAILED(this->videoInput.GetMediaType(&type)); cObjectPtr<IMediaTypeVideo> typeVideo; RETURN_IF_FAILED(type->GetInterface(IID_ADTF_MEDIA_TYPE_VIDEO, (tVoid**)(&typeVideo))); const tBitmapFormat *format = typeVideo->GetFormat(); RETURN_IF_POINTER_NULL(format); setBitmapFormat(format); this->isFirstFrame = false; } else { RETURN_IF_FAILED(processImage(mediaSample)); } } } RETURN_NOERROR; }
void ImageDialog::getNetPic() { EnableSavePic(false); EnableSaveAsPic(false); simuNaoProvider->outSafeJointData(tjSafeJointData); // TODO:need to get sensordata from nao when connect to nao simuNaoProvider->outSafeSensorData(tjSafeSensorData); simuNaoProvider->getPic(tjImage); sendData2messageQueue(idSimSensorData,idImageDialog,idVisionThread,&tjSafeSensorData,sizeof(SafeSensorData)); sendData2messageQueue(idSensoryJointData,idImageDialog,idVisionThread,&tjSafeJointData,sizeof(SafeJointData)); sendData2messageQueue(idUncorrectedRaw,idImageDialog,idVisionThread,tjImage.unCorrectedRaw,tjImage.imageSize); GenUnclassifyImage(tjImage,unclassifiedImage); (*processUnclassifiedImage)=(*unclassifiedImage); if(lut_ != NULL) { GenClassifyImage(lut_,tjImage,classifiedImage); processImage(); DrawVisualObjects(processUnclassifiedImage); } update(); show(); raise(); activateWindow(); EnableSavePic(true); EnableSaveAsPic(true); }
MyImage HistogramEqualizationProcessor::preProcessImage(const MyImage& image) const { QImage *resultImage = processImage(image.getImage()); MyImage result(*resultImage, MyImage::Gray); delete resultImage; return result; }
void ImageDialog::openPic() { QString filename = QFileDialog::getOpenFileName(this,tr("open Pic"),lastFileName,tr("Images (*.raw)")); currentDir=QFileInfo(filename).absoluteDir(); currentPath=QFileInfo(filename).absolutePath(); currentList=currentDir.entryList(QStringList(tr("*.raw")),QDir::Files,QDir::Name); currentIndex=currentList.indexOf(QFileInfo(filename).fileName()); if (!filename.isEmpty()) { lastFileName = filename; //rawString= filename.toStdWString(); rawString = filename; LoadRawStream(); GenUnclassifyImage(tjImage,unclassifiedImage); (*processUnclassifiedImage)=(*unclassifiedImage); if(lut_ != NULL) { GenClassifyImage(lut_,tjImage,classifiedImage); processImage(); DrawVisualObjects(processUnclassifiedImage); } } update(); show(); raise(); activateWindow(); }
void ImageDialog::nextImage() { if (currentIndex+1<currentList.size()) { QString nextfile(currentPath); QString currentFile_onlyname=currentList.at(++currentIndex); nextfile.append(tr("/")).append(currentFile_onlyname); lastFileName=nextfile; //rawString= nextfile.toStdString(); rawString = nextfile; LoadRawStream(); GenUnclassifyImage(tjImage,unclassifiedImage); (*processUnclassifiedImage)=(*unclassifiedImage); if(lut_ != NULL) { GenClassifyImage(lut_,tjImage,classifiedImage); processImage(); DrawVisualObjects(processUnclassifiedImage); } update(); show(); raise(); activateWindow(); } else QMessageBox::information(this,tr("Warning"),tr("TO THE END")); }
MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent), ui(new Ui::MainWindow) { ui->setupUi(this); currentStep1 = 0; currentStep2 = 0; imageSteps1 << 0; imageSteps2 << 0; // fp = 0; rubberBand1 = new QRubberBand(QRubberBand::Rectangle, ui->imageDisplay); rubberBand2 = new QRubberBand(QRubberBand::Rectangle, ui->imageDisplay_2); connect(ui->loadButton, SIGNAL(clicked()), this, SLOT(load())); connect(ui->analyzeButton, SIGNAL(clicked()), this, SLOT(processImage())); connect(ui->nextButton, SIGNAL(clicked()), this, SLOT(nextImage())); connect(ui->previousButton, SIGNAL(clicked()), this, SLOT(previousImage())); connect(ui->fitnessButton, SIGNAL(clicked()), this, SLOT(showPlot())); connect(ui->leftRadioButton, SIGNAL(clicked()), this, SLOT(selectImageLabel())); connect(ui->rightRadioButton, SIGNAL(clicked()), this, SLOT(selectImageLabel())); connect(ui->deleteStepButton, SIGNAL(clicked()), this, SLOT(removeStep())); connect(ui->compareButton, SIGNAL(clicked()), this, SLOT(compareSides())); ui->imageDisplay->setMouseTracking(true); ui->imageDisplay_2->setMouseTracking(true); ui->imageDisplay->installEventFilter(this); ui->imageDisplay_2->installEventFilter(this); ui->fitnessButton->setEnabled(false); selectImageLabel(); }
/* [subscriber callbacks] */ void RemoveSmallRegionsAlgNode::image_in_callback(const sensor_msgs::Image::ConstPtr& msg) { cv_bridge::CvImagePtr cv_ptr; try { cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8); } catch (cv_bridge::Exception& e) { ROS_ERROR("cv_bridge exception: %s", e.what()); return; } cv_ptr->image = processImage(cv_ptr->image); ROS_DEBUG("Remove small regions publishing image"); this->image_out_publisher_.publish(cv_ptr->toImageMsg()); //use appropiate mutex to shared variables if necessary //this->alg_.lock(); //this->remove_border_in_mutex_.enter(); //std::cout << msg->data << std::endl; //unlock previously blocked shared variables //this->alg_.unlock(); //this->remove_border_in_mutex_.exit(); }
void ImageCapture::timerTick() { cv::Mat frame; // Capture a frame from camera cvCapture->read(frame); // Check whether frame data are available if(!frame.empty()) { QImage image(imageConvert(frame)); // Check whether we are in video mode if(mode == 0) { // Maintain a counter to determine whether we will process incoming frame or not. if(counter >= threshold) { counter = 0; emit processImage(frame); } else { counter++; } } emit imageCaptured(image); } else { qDebug() << "Fail to read the frames from the cvCapture!!"; } }
void ArucoMapping::imageCallback(const sensor_msgs::ImageConstPtr &original_image) { //Create cv_brigde instance cv_bridge::CvImagePtr cv_ptr; try { cv_ptr=cv_bridge::toCvCopy(original_image, sensor_msgs::image_encodings::MONO8); } catch (cv_bridge::Exception& e) { ROS_ERROR("Not able to convert sensor_msgs::Image to OpenCV::Mat format %s", e.what()); return; } // sensor_msgs::Image to OpenCV Mat structure cv::Mat I = cv_ptr->image; // region of interest if(roi_allowed_==true) I = cv_ptr->image(cv::Rect(roi_x_,roi_y_,roi_w_,roi_h_)); //Marker detection processImage(I,I); // Show image cv::imshow("Mono8", I); cv::waitKey(10); }
CvRect detectRupee(IplImage * camImg) { //--------------------Capturing the 20frames of notes in Original Frame IplImage array----------- IplImage *OriginalFrame; CvRect rect; printf("2called..."); int k=0; OriginalFrame=camImg; while(k<2) { processImage(OriginalFrame); detectGandhiFace(); // rotate(OriginalFrame); rect=doBlack(OriginalFrame); if(rect.width!=0) break; else cvFlip(camImg,OriginalFrame,-1); k++; } return rect; }
/** * 完整的发票识别认证流程 */ void Worker :: run() { ImageProcess* threadImageProcess = new ImageProcess(); std::vector<OCRMask>* threadOCRMasks = new std::vector<OCRMask>(); BillInfo* threadBillInfo = new BillInfo; TesseractOCR* threadTesseractOCR = new TesseractOCR(); processImage(threadImageProcess, threadOCRMasks, *threadBillInfo); loadMasks(threadOCRMasks); recognizeText(threadImageProcess, threadOCRMasks, threadTesseractOCR); initBillInfo(threadOCRMasks, *threadBillInfo); mutex.lock(); vBillInfo.push_back(threadBillInfo); mutex.unlock(); if(threadOCRMasks) { delete threadOCRMasks; } if(threadImageProcess) { delete threadImageProcess; } if(threadTesseractOCR) { delete threadTesseractOCR; } }
void Screen::drawSortFrames(byte *file) { uint i, j; // Sort the sort list. Used to be a separate function, but it was only // called once, right before calling drawSortFrames(). if (_curSort > 1) { for (i = 0; i < _curSort - 1; i++) { for (j = 0; j < _curSort - 1; j++) { if (_sortList[_sortOrder[j]].sort_y > _sortList[_sortOrder[j + 1]].sort_y) { SWAP(_sortOrder[j], _sortOrder[j + 1]); } } } } // Draw the sorted frames - layers, shrinkers & normal flat sprites for (i = 0; i < _curSort; i++) { if (_sortList[_sortOrder[i]].layer_number) { // it's a layer - minus 1 for true layer number // we need to know from the BuildUnit because the // layers will have been sorted in random order processLayer(file, _sortList[_sortOrder[i]].layer_number - 1); } else { // it's a sprite processImage(&_sortList[_sortOrder[i]]); } } }
// ------------------------------------------------------------------------------------------------- tResult LaneFilter::OnPinEvent(IPin* source, tInt event_code, tInt param1, tInt param2, IMediaSample* media_sample) { // ------------------------------------------------------------------------------------------------- RETURN_IF_POINTER_NULL(source); RETURN_IF_POINTER_NULL(media_sample); if (event_code == IPinEventSink::PE_MediaSampleReceived) { if (source == &video_input_pin_) { processImage(media_sample); } else if (source == &object_input_pin_) { // read-out the incoming Media Sample cObjectPtr<IMediaCoder> coder_input; RETURN_IF_FAILED(object_data_description_->Lock(media_sample, &coder_input)); //get values from media sample tUInt32 size; coder_input->Get("size", (tVoid*)&size); Object object_array[size]; media_sample->CopyBufferTo((tVoid*)&object_array, sizeof(Object) * size, sizeof(tUInt32), 0); lane_preprocessor_.set_object_vector(object_array, sizeof(object_array)/sizeof(Object)); object_data_description_->Unlock(coder_input); } } RETURN_NOERROR; }
int main (int argc, char* const argv[]) { init(); if (argc == 1 && usage()) return 0; if (parseCmdLine(argc, argv) != 0) return 0; if (optShowHelp && showHelp()) return 0; if (optShowVersion && showVersion()) return 0; curl_global_init(CURL_GLOBAL_ALL); for (int i = 0; i < optFiles.size(); i++) { std::string processed = processImage(optFiles[i], optImageSize); if (processed == "") continue; if (processed != optFiles[i]) tempFiles.push_back(processed); std::string err; std::string output = uploadImage(processed.c_str(), err); if (!userOutput(output) || err.length() > 0) fprintf(stderr, "Upload failed for %s.\n%s\n", optFiles[i].c_str(), err.c_str()); } for (int i = 0; i < tempFiles.size(); i++) remove(tempFiles[i].c_str()); return 0; }
bool TextureCompiler::readJSON(const jsonxx::Object& root) { BaseCompiler::readJSON(root); m_input = root.get<std::string>("input"); m_format = root.get<std::string>("format"); m_processed = processImage(m_input, m_output); return m_processed; }
void ImageCapture::captureOneFrame() { // Check whether we are in image mode if(mode == 1) { cv::Mat frame; cvCapture->read(frame); emit processImage(frame); } }
Widget::Widget(QWidget *parent) : QWidget(parent), ui(new Ui::Widget) { ui->setupUi(this); setFixedSize(this->width(), this->height()); ui->scrollArea->setWidgetResizable(true); connect(ui->pushButton_openFile, SIGNAL(clicked()), this, SLOT(openFile())); connect(ui->pushButton_Process, SIGNAL(clicked()), this, SLOT(processImage())); }
//==============slot function====================// void ImageDialog::run() { processImage(); DrawVisualObjects(unclassifiedImage); update(); show(); raise(); activateWindow(); }
bool PackageGenerator::addImages(){ output("Proccess images"); for(qint32 s=0;s<_targetSizes.size();++s){ for(QMap<QString,ImageSetting>::ConstIterator i=_imageSettings.constBegin();i!=_imageSettings.constEnd();++i){ if(!processImage(i.key(),i.value().usedSizes.at(s),_targetSizes.at(s),i.value().crop)){ return false; } } } output("Success"); return true; }
void MainWindow::browseImage() { QFileDialog dialog(this, tr("Select image"), QStandardPaths::writableLocation(QStandardPaths::PicturesLocation)); dialog.setAcceptMode(QFileDialog::AcceptOpen); dialog.setMimeTypeFilters({"image/jpeg", "image/png", "application/octet-stream"}); if (dialog.exec() == QFileDialog::Accepted) processImage(dialog.selectedFiles().first()); }
// ------------------------------------------------------------------------------------------------ void processXmlDocument(TiXmlDocument& document, std::vector<Image>* array) { TiXmlElement* root = document.FirstChildElement("response"); if (root != nullptr) { TiXmlElement* data = root->FirstChildElement("data"); if (data != nullptr) { TiXmlElement* images = data->FirstChildElement("images"); if (images != nullptr) { TiXmlElement* item_1 = images->FirstChildElement("image"); if (item_1 != nullptr) { Image image_1 = processImage(item_1); array->push_back(image_1); TiXmlElement* item_2 = item_1->NextSiblingElement("image"); if (item_2 != nullptr) { Image image_2 = processImage(item_2); array->push_back(image_2); TiXmlElement* item_3 = item_2->NextSiblingElement("image"); if (item_3 != nullptr) { Image image_3 = processImage(item_3); array->push_back(image_3); // stop } else { ERR("No sibling element: image"); } } else { ERR("No sibling element: image"); } } else { ERR("No sibling element: image"); } } else { ERR("No child element: images"); } } else { ERR("No child element: data"); } } else { ERR("No child element: response"); } }
void ProcessImagesThread::run() { Q_ASSERT(this->pictures != 0); qDebug() << ("ProcessImagesThread::run called"); for (int i = 0; i < this->pictures->size(); i++) { processImage(pictures->value(i)); if (m_cancelNow) { qDebug() << ("ProcessImagesThread::run was canceled"); return; } emit complete(100.f * (float)i / (float)(pictures->size() - 1)); } qDebug() << ("ProcessImagesThread::run done completly"); }
std::vector<MedocDb::File> ExportDbDlg::processImages(const wxString & tesseractLanguage) const { std::shared_ptr<Ocr> ocr; MedocConfig config; if(config.isOcrEnabled()) { ocr.reset(new Ocr(tesseractLanguage)); } std::vector<MedocDb::File> files; for(const wxImage & image : m_images) { files.push_back (MedocDb::File (processImage(image), processImage(scale(image, 80)), (ocr ? ocr->recognize(image) : wxString(_(""))))); } return files; }
void ImageView::makeConnection() { connect(tBtn_menu, SIGNAL(triggered(QAction*)), this, SLOT(goBack(QAction*))); connect(btn_oriMode, SIGNAL(clicked()), this, SLOT(enterOriginalMode())); connect(btn_proMode, SIGNAL(clicked()), this, SLOT(enterProcessedMode())); connect(btn_editMode, SIGNAL(clicked()), this, SLOT(enterEditableMode())); connect(btn_save, SIGNAL(clicked()), this, SLOT(saveImage())); connect(btn_process, SIGNAL(clicked()), this, SLOT(processImage())); connect(btn_zoomIn, SIGNAL(clicked()), this, SLOT(zoomIn())); connect(btn_zoomOut, SIGNAL(clicked()), this, SLOT(zoomOut())); connect(com_zoom, SIGNAL(currentIndexChanged(int)), this, SLOT(zoomTo(int))); }
void RaspiVoice::GrabAndProcessFrame(RaspiVoiceOptions opt) { //Set new options. Options that have been copied to RaspiVoice:: fields in constructor are unaffected. this->opt = opt; //Read and process images: cv::Mat im = readImage(); processImage(im); if (verbose) { printtime("vOICe algorithm process start"); } i2ssConverter->Process(*image); }
void AprilAnalysis::processAndShowImage() { processImage(); std::cout << m_detections.size() << " tags detected:" << std::endl; for (int i = 0; i < static_cast<int>(m_detections.size()); i++) { printDetection(m_detections[i]); } for (int i = 0; i < static_cast<int>(m_detections.size()); i++) { m_detections[i].draw(m_img); } cv::namedWindow(m_windowName, cv::WINDOW_NORMAL); cv::imshow(m_windowName, m_img); while (cv::waitKey(100) == -1) {} }