void Binary::construct(std::string n, BinaryFormat f){ if (f == BinaryFormat_undefined){ f = detectFormat(n); } format = f; EPAXOut << "File " << n << " is a " << getFormatName() << " file" << ENDL; EPAXAssert(format != BinaryFormat_undefined, "Cannot determine format of " << n << ". Try specifying it manually."); switch (format){ case BinaryFormat_Elf32: binary = new Elf::ElfBinary32(n); break; case BinaryFormat_Elf64: binary = new Elf::ElfBinary64(n); break; case BinaryFormat_MachO32: binary = new MachO::MachOBinary32(n); break; case BinaryFormat_MachO64: binary = new MachO::MachOBinary64(n); break; default: EPAXDie("Unimplemented binary format " << getFormatName() << " given."); } EPAXOut << "Program entry point at vaddr " << HEX(binary->getStartAddr()) << ENDL; lineinfo = new LineInformation(binary); binary->describe(); //EPAXAssert(binary->isARM(), "This binary contains non-ARM code... bailing"); }
/** * Calls the import method of the filter responsible for the format * of the given file. * * @param graphic The container to which we will add * entities. Usually that's an RS_Graphic entity but * it can also be a polyline, text, ... * @param file Path and name of the file to import. */ bool RS_FileIO::fileImport(RS_Graphic& graphic, const QString& file, RS2::FormatType type) { RS_DEBUG->print("Trying to import file '%s'...", file.toLatin1().data()); RS2::FormatType t; if (type == RS2::FormatUnknown) { t = detectFormat(file); } else { t = type; } if (RS2::FormatUnknown != t) { std::unique_ptr<RS_FilterInterface> filter(getImportFilter(file, t)); if (filter.get() != NULL ){ return filter->fileImport(graphic, file, t); } RS_DEBUG->print(RS_Debug::D_WARNING, "RS_FileIO::fileImport: failed to import file: %s", file.toLatin1().data()); } else { RS_DEBUG->print(RS_Debug::D_WARNING, "RS_FileIO::fileImport: failed to detect file format: %s", file.toLatin1().data()); } return false; }
void ImageScanner::scanImageInformation() { DatabaseFields::ImageInformation dbFields = DatabaseFields::ImageInformationAll; QVariantList infos; if (m_scanMode == NewScan || m_scanMode == Rescan) { MetadataFields fields; fields << MetadataInfo::Rating << MetadataInfo::CreationDate << MetadataInfo::DigitizationDate << MetadataInfo::Orientation; QVariantList metadataInfos = m_metadata.getMetadataFields(fields); // creation date: fall back to file system property if (metadataInfos[1].isNull() || !metadataInfos[1].toDateTime().isValid()) { metadataInfos[1] = creationDateFromFilesystem(m_fileInfo); } // Some fields should only be overwritten if set in metadata if (m_scanMode == Rescan) { if (metadataInfos[0].isNull() || metadataInfos[0].toInt() == -1) { dbFields &= ~DatabaseFields::Rating; metadataInfos.removeAt(0); } } infos << metadataInfos; } QSize size = m_img.size(); infos << size.width() << size.height() << detectFormat() << m_img.originalBitDepth() << m_img.originalColorModel(); if (m_scanMode == NewScan) { DatabaseAccess().db()->addImageInformation(m_scanInfo.id, infos, dbFields); } else if (m_scanMode == Rescan) { DatabaseAccess().db()->changeImageInformation(m_scanInfo.id, infos, dbFields); } else // ModifiedScan { // Does _not_ update rating and orientation! DatabaseAccess().db()->changeImageInformation(m_scanInfo.id, infos, DatabaseFields::Width | DatabaseFields::Height | DatabaseFields::Format | DatabaseFields::ColorDepth | DatabaseFields::ColorModel); } }
void DDS::readStandardHeader(Common::SeekableReadStream &dds) { // All DDS header should be 124 bytes (+ 4 for the FourCC) big if (dds.readUint32LE() != 124) throw Common::Exception("Header size invalid"); // DDS features uint32 flags = dds.readUint32LE(); // Image dimensions uint32 height = dds.readUint32LE(); uint32 width = dds.readUint32LE(); dds.skip(4 + 4); // Pitch + Depth //uint32 pitchOrLineSize = dds.readUint32LE(); //uint32 depth = dds.readUint32LE(); uint32 mipMapCount = dds.readUint32LE(); // DDS doesn't provide any mip maps, only one full-size image if ((flags & kHeaderFlagsHasMipMaps) == 0) mipMapCount = 1; dds.skip(44); // Reserved // Read the pixel data format DDSPixelFormat format; format.size = dds.readUint32LE(); format.flags = dds.readUint32LE(); format.fourCC = dds.readUint32BE(); format.bitCount = dds.readUint32LE(); format.rBitMask = dds.readUint32LE(); format.gBitMask = dds.readUint32LE(); format.bBitMask = dds.readUint32LE(); format.aBitMask = dds.readUint32LE(); // Detect which specific format it describes detectFormat(format); dds.skip(16 + 4); // DDCAPS2 + Reserved _mipMaps.reserve(mipMapCount); for (uint32 i = 0; i < mipMapCount; i++) { MipMap *mipMap = new MipMap; mipMap->width = MAX<uint32>(width , 1); mipMap->height = MAX<uint32>(height, 1); setSize(*mipMap); mipMap->data = 0; width >>= 1; height >>= 1; _mipMaps.push_back(mipMap); } }
/** * Calls the export method of the object responsible for the format * of the given file. * * @param file Path and name of the file to import. */ bool RS_FileIO::fileExport(RS_Graphic& graphic, const QString& file, RS2::FormatType type) { RS_DEBUG->print("RS_FileIO::fileExport"); //RS_DEBUG->print("Trying to export file '%s'...", file.latin1()); if (type==RS2::FormatUnknown) { type=detectFormat(file, false); } std::unique_ptr<RS_FilterInterface>&& filter(getExportFilter(file, type)); if (filter){ return filter->fileExport(graphic, file, type); } RS_DEBUG->print("RS_FileIO::fileExport: no filter found"); return false; }
bool Image::initWithImageData(const unsigned char * data, ssize_t dataLen) { bool ret = false; do { CC_BREAK_IF(!data || dataLen <= 0); unsigned char* unpackedData = nullptr; ssize_t unpackedLen = 0; unpackedData = const_cast<unsigned char*>(data); unpackedLen = dataLen; _fileType = detectFormat(unpackedData, unpackedLen); switch (_fileType) { case Format::PNG: ret = initWithPngData(unpackedData, unpackedLen); break; case Format::JPG: ret = initWithJpgData(unpackedData, unpackedLen); break; case Format::S3TC: ret = initWithS3TCData(unpackedData, unpackedLen); break; default: OUTPUT_LOG("unsupported image format!"); } if (unpackedData != data) { free(unpackedData); } } while (0); return ret; }
/** * Calls the import method of the filter responsible for the format * of the given file. * * @param graphic The container to which we will add * entities. Usually that's an RS_Graphic entity but * it can also be a polyline, text, ... * @param file Path and name of the file to import. */ bool RS_FileIO::fileImport(RS_Graphic& graphic, const QString& file, RS2::FormatType type) { RS_DEBUG->print("Trying to import file '%s'...", file.toLatin1().data()); RS2::FormatType t; if (type == RS2::FormatUnknown) { t = detectFormat(file); } else { t = type; } if (RS2::FormatUnknown != t) { std::unique_ptr<RS_FilterInterface>&& filter(getImportFilter(file, t)); if (filter){ #ifdef DWGSUPPORT if (file.endsWith(".dwg",Qt::CaseInsensitive)){ QMessageBox::StandardButton sel = QMessageBox::warning(qApp->activeWindow(), QObject::tr("Warning"), QObject::tr("experimental, save your work first.\nContinue?"), QMessageBox::Ok|QMessageBox::Cancel, QMessageBox::NoButton); if (sel == QMessageBox::Cancel) return false; } #endif return filter->fileImport(graphic, file, t); } RS_DEBUG->print(RS_Debug::D_WARNING, "RS_FileIO::fileImport: failed to import file: %s", file.toLatin1().data()); } else { RS_DEBUG->print(RS_Debug::D_WARNING, "RS_FileIO::fileImport: failed to detect file format: %s", file.toLatin1().data()); } return false; }
int main(int argc, char *argv[]) { //test pour savoir si l'utilisateur a renseigne un parametre if(argc <= 3) { std::cout<<"---------------------------------------"<<std::endl<< "Veuillez rentrer la methode choisie : "<<std::endl<< "- template_tracking" <<std::endl<< "- LK_tracking" <<std::endl<< "- farneback" <<std::endl<< "- camshift_kalman" <<std::endl<< "- background_lk" <<std::endl<< "- background_kalman" <<std::endl<<std::endl<< "Le type de format : " <<std::endl<< "- video" <<std::endl<< "- image" <<std::endl<<std::endl<< "Le nom du fichier d'input" <<std::endl<< "---------------------------------------"<<std::endl; std::exit(EXIT_FAILURE); } //------------------VARIABLES----------------------------------------------// //Variables communes choiceAlgo algo; formatVideo format; std::string inputFileName(argv[3]); int nbTrames = 501; double fps = 0; std::vector<cv::Mat> sequence; cv::Mat previousSequence; int nbPedestrians = 0; cv::HOGDescriptor hog; hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector()); std::vector<cv::Rect> detectedPedestrian; // HOG + Good feature to track + LK std::vector<std::vector<cv::Point2f>> featuresDetected; std::vector<std::vector<cv::Point2f>> previousFeaturesDetected; // HOG + Template tracking std::vector<cv::Rect> boxes; std::vector<cv::Rect> previousBoxes; // Optical flow farneback cv::Mat flow; cv::Mat imGray; cv::Mat imGrayPrev; //camshift and kalman filter std::vector<cv::MatND> backProj; std::vector<cv::Rect> roiHogDetected; std::vector<cv::Rect> roiCamShift; std::vector<bool> detected; std::vector<cv::Mat> hist; std::vector<cv::RotatedRect> rectCamShift; cv::Point2f rect_points[4]; //--------------------------------------------------------------------------------------// //Background substraction, pour le tracking LK et goodfeaturestotrack regarder au dessus trackingOption tracking; cv::Ptr<cv::BackgroundSubtractor> pMOG2; std::vector<cv::Rect> detectedPedestrianFiltered; cv::KalmanFilter KF(4,2,0,CV_32F); cv::Mat_<float> measurement(2,1); cv::Mat prediction; cv::Mat estimated; pMOG2 = cv::createBackgroundSubtractorMOG2(); //Avec ajout du Haar cascade classifier std::vector<std::vector<cv::Rect>> rect_upper_body; cv::CascadeClassifier classifier; std::string upper_body_cascade_name = "haarcascade_fullbody.xml"; if(!classifier.load(upper_body_cascade_name)) { std::cout<<"le fichier "<<upper_body_cascade_name<<" ne peut etre charge"<<std::endl; return -1; } //--------------------------------------------------------------------------------------// //Background substraction and Kalman bool initKalman = true; cv::KalmanFilter Kalman(4,2,0,CV_32F); cv::Mat_<float> measurmt(2,1); cv::Mat predict; cv::Mat estim; Kalman.transitionMatrix.at<float>(0,0) = 1; Kalman.transitionMatrix.at<float>(0,1) = 0; Kalman.transitionMatrix.at<float>(0,2) = 1; Kalman.transitionMatrix.at<float>(0,3) = 0; Kalman.transitionMatrix.at<float>(1,0) = 0; Kalman.transitionMatrix.at<float>(1,1) = 1; Kalman.transitionMatrix.at<float>(1,2) = 0; Kalman.transitionMatrix.at<float>(1,3) = 1; Kalman.transitionMatrix.at<float>(2,0) = 0; Kalman.transitionMatrix.at<float>(2,1) = 0; Kalman.transitionMatrix.at<float>(2,2) = 1; Kalman.transitionMatrix.at<float>(2,3) = 0; Kalman.transitionMatrix.at<float>(3,0) = 0; Kalman.transitionMatrix.at<float>(3,1) = 0; Kalman.transitionMatrix.at<float>(3,2) = 0; Kalman.transitionMatrix.at<float>(3,3) = 1; measurmt.setTo(cv::Scalar(0)); cv::setIdentity(Kalman.measurementMatrix); cv::setIdentity(Kalman.processNoiseCov, cv::Scalar::all(1e-4)); cv::setIdentity(Kalman.measurementNoiseCov, cv::Scalar::all(1e-1)); cv::setIdentity(Kalman.errorCovPost, cv::Scalar::all(.1)); cv::Rect rectK; cv::Rect prevRectK; //acquisition de la video algo = detectAlgo(std::string(argv[1])); format = detectFormat(std::string(argv[2])); //------------------VIDEO--------------------------------------------------// if(format == SEQUENCE_IMAGE) sequence.resize(nbTrames); else if(format == VIDEO) std::cout<<"video"<<std::endl; extractVideoData(sequence, format, inputFileName, nbTrames, fps); cv::namedWindow("Video", cv::WINDOW_AUTOSIZE); //------------------TRAITEMENT-VIDEO---------------------------------------// for(int i=0;i<nbTrames;i++) { if(i>0) previousSequence = sequence[i-1]; else previousSequence = sequence[i]; ///------------------HOG + Good Features to track + LK-----------------// if(algo == HOG_GOODFEATURESTOTRACK_LK) { if(i%20 == 0) { detectedPedestrian = hogDetection(sequence[i], hog); nbPedestrians = detectedPedestrian.size(); if(nbPedestrians != 0) { featuresDetected = featuresDetection(sequence[i], detectedPedestrian); previousFeaturesDetected.resize(featuresDetected.size()); previousFeaturesDetected = featuresDetected; } } else if(previousFeaturesDetected.size() != 0) { featuresDetected = lucasKanadeTracking(previousSequence, sequence[i], previousFeaturesDetected); previousFeaturesDetected.clear(); previousFeaturesDetected.resize(featuresDetected.size()); previousFeaturesDetected = featuresDetected; } //--------Representation-------------------- /* cv::Scalar myColor; for(size_t j=0;j<featuresDetected.size();j++) { if(j%3 == 0) myColor = cv::Scalar(0,0,cv::RNG().uniform(200,255)); else if(j%2 == 0) myColor = cv::Scalar(0,cv::RNG().uniform(200,255),0); else myColor = cv::Scalar(cv::RNG().uniform(200,255),0,0); for(size_t k=0;k<featuresDetected[j].size();k++) { cv::circle(sequence[i], featuresDetected[j][k], 1, myColor,-1); } } */ for(size_t j=0;j<featuresDetected.size();j++) { cv::rectangle(sequence[i], cv::boundingRect(featuresDetected[j]), cv::Scalar( 0, 0, 255), 2, 8, 0 ); } //affichage de la video cv::imshow("Video", sequence[i]); } ///------------------HOG + Template Tracking---------------------------// else if(algo == HOG_TEMPLATE_TRACKING) { if(i%20 == 0) { detectedPedestrian = hogDetection(sequence[i], hog); nbPedestrians = detectedPedestrian.size(); if(nbPedestrians != 0) { boxes = templateTracking(sequence[i], detectedPedestrian, CV_TM_CCORR_NORMED); previousBoxes.resize(boxes.size()); previousBoxes = boxes; } } else if(previousBoxes.size() != 0) { boxes = templateTracking(sequence[i], previousBoxes, CV_TM_CCORR_NORMED); previousBoxes.clear(); previousBoxes.resize(boxes.size()); previousBoxes = boxes; } //--------Representation-------------------- for(size_t j=0;j<boxes.size();j++) { cv::rectangle(sequence[i], boxes[j], cv::Scalar( 0, 0, 255), 2, 8, 0 ); } //affichage de la video cv::imshow("Video", sequence[i]); } ///------------------HOG + Optical Flow Farneback----------------------// else if(algo == OPT_FLOW_FARNEBACK) { if(i!=0) { flow = cv::Mat::zeros(sequence[i].size(), CV_32FC2); cv::cvtColor(sequence[i], imGray, CV_BGR2GRAY); cv::cvtColor(sequence[i-1], imGrayPrev, CV_BGR2GRAY); cv::calcOpticalFlowFarneback(imGrayPrev, imGray, flow, 0.5, 3, 15, 3, 5, 1.2, 0); //-----------------Representation------------------------------// drawOptFlowMap(flow, imGrayPrev, 16, CV_RGB(0, 255, 0)); //dessin test //affichage de la video cv::imshow("Video", imGrayPrev); } } ///--------------HOG+Camshift + Kalman Filter--------------------------// else if(algo == CAMSHIFT_KALMAN_FILTER) { //camshift if(i%20 == 0 && roiCamShift.size() == 0) { roiHogDetected = hogDetection(sequence[i], hog); refineROI(roiCamShift, detected, roiHogDetected); //test if(roiCamShift.size() != 0) { /* roiCamShift[0].x += 30; roiCamShift[0].width -= 60; roiCamShift[0].y += 40; roiCamShift[0].height -= 100; */ roiCamShift[0].x += roiCamShift[0].width/2; roiCamShift[0].width = roiCamShift[0].width/3; //roiCamShift[0].y += roiCamShift[0].height/2; roiCamShift[0].height = roiCamShift[0].height/3; } // } backProj = computeProbImage(sequence[i], roiCamShift, hist, detected); if (roiCamShift.size() != 0) cv::imshow("temp", backProj[0]); ///-------Test-Camshift--------------------/// rectCamShift.resize(roiCamShift.size()); for(size_t j=0;j<roiCamShift.size();j++) { /* std::cout<<roiCamShift[j]<<std::endl; cv::rectangle(backProj[j], roiCamShift[j], cv::Scalar( 255, 0, 0), 2, 8, 0 ); //DEBUG cv::imshow("before camshift", backProj[j]); cv::waitKey(0); */ cv::Rect rectMeanShift; rectMeanShift = roiCamShift[j]; cv::meanShift(backProj[j], rectMeanShift, cv::TermCriteria(cv::TermCriteria::EPS | cv::TermCriteria::COUNT, 10, 1)); cv::rectangle(sequence[i], rectMeanShift, cv::Scalar( 0, 255, 0), 2, 8, 0 ); rectCamShift[j] = cv::CamShift(backProj[j], roiCamShift[j], cv::TermCriteria(cv::TermCriteria::EPS | cv::TermCriteria::COUNT, 10, 1)); rectCamShift[j].points(rect_points); for(int k = 0; k < 4; k++) cv::line(sequence[i], rect_points[k], rect_points[(k+1)%4], cv::Scalar( 0, 0, 255), 2, 8); } ///----------------------------------------/// //-----------------Representation----------------------------------// //dessin du rectangle for(size_t j=0;j<roiCamShift.size();j++) cv::rectangle(sequence[i], roiCamShift[j], cv::Scalar( 255, 0, 0), 2, 8, 0 ); //affichage de la video cv::imshow("Video", sequence[i]); } ///------------------BACKGROUND-SUBSTRACTION---------------------------// else if(algo == BACKGROUND_LK) { if(i%10 == 0) //égal 0 pour le test { backgroundSubstractionDetection(sequence[i], detectedPedestrianFiltered, pMOG2, tracking); } if(tracking == GOOD_FEATURES_TO_TRACK) { featuresDetected.resize(detectedPedestrianFiltered.size()); featuresDetected = featuresDetection(sequence[i], detectedPedestrianFiltered); previousFeaturesDetected.resize(featuresDetected.size()); previousFeaturesDetected = featuresDetected; tracking = LUCAS_KANADE; KF.transitionMatrix.at<float>(0,0) = 1; KF.transitionMatrix.at<float>(0,1) = 0; KF.transitionMatrix.at<float>(0,2) = 1; KF.transitionMatrix.at<float>(0,3) = 0; KF.transitionMatrix.at<float>(1,0) = 0; KF.transitionMatrix.at<float>(1,1) = 1; KF.transitionMatrix.at<float>(1,2) = 0; KF.transitionMatrix.at<float>(1,3) = 1; KF.transitionMatrix.at<float>(2,0) = 0; KF.transitionMatrix.at<float>(2,1) = 0; KF.transitionMatrix.at<float>(2,2) = 1; KF.transitionMatrix.at<float>(2,3) = 0; KF.transitionMatrix.at<float>(3,0) = 0; KF.transitionMatrix.at<float>(3,1) = 0; KF.transitionMatrix.at<float>(3,2) = 0; KF.transitionMatrix.at<float>(3,3) = 1; measurement.setTo(cv::Scalar(0)); for(size_t j=0;j<featuresDetected.size();j++) { detectedPedestrianFiltered[j] = cv::boundingRect(featuresDetected[j]); } KF.statePre.at<float>(0) = rectCenter(detectedPedestrianFiltered[0]).x; KF.statePre.at<float>(1) = rectCenter(detectedPedestrianFiltered[0]).y; KF.statePre.at<float>(2) = 0; KF.statePre.at<float>(3) = 0; cv::setIdentity(KF.measurementMatrix); cv::setIdentity(KF.processNoiseCov, cv::Scalar::all(1e-4)); cv::setIdentity(KF.measurementNoiseCov, cv::Scalar::all(1e-1)); cv::setIdentity(KF.errorCovPost, cv::Scalar::all(.1)); } else if(tracking == LUCAS_KANADE) { for(size_t j=0;j<featuresDetected.size();j++) { detectedPedestrianFiltered[j] = cv::boundingRect(featuresDetected[j]); } featuresDetected = lucasKanadeTracking(previousSequence, sequence[i], previousFeaturesDetected); previousFeaturesDetected.clear(); previousFeaturesDetected.resize(featuresDetected.size()); previousFeaturesDetected = featuresDetected; prediction = KF.predict(); cv::Point predictPt(prediction.at<float>(0),prediction.at<float>(1)); // Get mouse point measurement(0) = rectCenter(detectedPedestrianFiltered[0]).x; measurement(1) = rectCenter(detectedPedestrianFiltered[0]).y; cv::Point measPt(measurement(0),measurement(1)); // The "correct" phase that is going to use the predicted value and our measurement cv::Mat estimated = KF.correct(measurement); cv::Point statePt(estimated.at<float>(0),estimated.at<float>(1)); //cv::circle(sequence[i], measPt, 1, cv::Scalar(0,255,0), 7, 24); //cv::circle(sequence[i], predictPt, 1, cv::Scalar(0,255,255), 7, 24); } //--------Representation-------------------- if(tracking != NOTHING_TO_TRACK) findUpperBody(classifier, sequence[i], detectedPedestrianFiltered, rect_upper_body); for(size_t j=0;j<featuresDetected.size();j++) { for(size_t k=0;k<rect_upper_body[j].size();k++) { cv::rectangle(sequence[i], rect_upper_body[j][k], cv::Scalar( 0, 255, 0), 2, 8, 0 ); } //detectedPedestrianFiltered[j] = cv::boundingRect(featuresDetected[j]); cv::rectangle(sequence[i], cv::boundingRect(featuresDetected[j]), cv::Scalar( 0, 0, 255), 2, 8, 0 ); } //affichage de la video cv::imshow("Video", sequence[i]); } else if(algo == BACKGROUND_KALMAN) { int refresh = 6; if(i%refresh == 0) { backgroundSubstractionDetection(sequence[i], detectedPedestrianFiltered, pMOG2, tracking); } if(initKalman && detectedPedestrianFiltered.size() != 0) { Kalman.statePre.at<float>(0) = rectCenter(detectedPedestrianFiltered[0]).x; Kalman.statePre.at<float>(1) = rectCenter(detectedPedestrianFiltered[0]).y; Kalman.statePre.at<float>(2) = 0; Kalman.statePre.at<float>(3) = 0; initKalman = false; } if(detectedPedestrianFiltered.size() != 0) { predict = Kalman.predict(); cv::Point predictPt(predict.at<float>(0),predict.at<float>(1)); // The "correct" phase that is going to use the predicted value and our measurement if(i%refresh == 0) { rectK = findROI(predictPt, detectedPedestrianFiltered); if(!fusionRects(prevRectK, rectK)) { cv::Point refPoint = rectCenter(rectK); // Get center point measurmt(0) = refPoint.x; measurmt(1) = refPoint.y; cv::Point measPt(measurmt(0),measurmt(1)); cv::Mat estim = Kalman.correct(measurmt); cv::Point statePt(estim.at<float>(0),estim.at<float>(1)); } prevRectK = rectK; } std::string str = std::to_string(sqrt(pow(Kalman.statePre.at<float>(2), 2)+pow(Kalman.statePre.at<float>(3), 2))); //cv::circle(sequence[i], measPt, 1, cv::Scalar(0,255,0), 7, 24); cv::circle(sequence[i], predictPt, 1, cv::Scalar(0,255,255), 7, 24); cv::putText(sequence[i], str, predictPt, cv::FONT_HERSHEY_PLAIN, 1.0, CV_RGB(255,0,0), 2.0); } cv::imshow("Video", sequence[i]); } //------------------CLEAR-VARIABLES------------------------------------// detectedPedestrian.clear(); featuresDetected.clear(); boxes.clear(); previousSequence.release(); flow.release(); imGray.release(); imGrayPrev.release(); roiHogDetected.clear(); backProj.clear(); //------------------CONDITIONS-ARRET-----------------------------------// if (cv::waitKey((int)(1000/fps)) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop { std::cout << "esc key is pressed by user" << std::endl; return 0; } } cv::waitKey(0); return 0; }
QString SubtitleConverter::detectFormat(const QString &subtitleFile) const { const QStringList &lines = readFile(subtitleFile, "UTF-8", 15); return detectFormat(lines); }
SpawnStatus spawn(const char *path, const char *args, const char *workdir, SecurityLevel secLvl, Pid requesterPid, Pid *outPid, File_t *outStdin, File_t *outStdout, File_t *outStderr, File_t inStdin, File_t inStdout, File_t inStderr) { // file of executable File_t file; // open input file if (!findFile(path, &file)) { klog("unable to open the file: %s, it doesn't exist on PATH directory", path); return SPAWN_STATUS_IO_ERROR; } // detect format BinaryFormat format = detectFormat(file); if (format == BF_UNKNOWN) { klog("binary has an unknown format: %s", path); return SPAWN_STATUS_FORMAT_ERROR; } // create empty target process auto targetProc = CreateEmptyProcess(secLvl); Pid targetPid = GetCreatedProcessID(targetProc); // apply configuration ProcessConfiguration configuration; configuration.sourcePath = (char*) path; ConfigureProcess(targetProc, configuration); // create a loader Loader *loader; if (format == BF_ELF32) loader = new Elf32Loader(targetProc, file); //else if (format == BF_ELF64) loader = new Elf64Loader(targetProc, file); else { klog("no loader implementation for binary format %i", format); return SPAWN_STATUS_FORMAT_ERROR; } // setup standard I/O if (!setupStdio(targetPid, requesterPid, outStdin, outStdout, outStderr, inStdin, inStdout, inStderr)) klog("unable to setup stdio for process %i", targetPid); // perform loading uintptr_t entryAddress; LoaderStatus ldrStat = loader->load(&entryAddress); SpawnStatus spawnStat = SPAWN_STATUS_UNKNOWN; if (ldrStat == LS_SUCCESSFUL) { // push command line arguments writeCliArgs(targetProc, args); // set working directory SetWorkingDirectoryP(workdir, targetProc); // attached loaded process AttachCreatedProcess(targetProc, entryAddress); // out-set process id *outPid = targetPid; spawnStat = SPAWN_STATUS_SUCCESSFUL; } else { // cancel creation & let kernel clean up CancelProcessCreation(targetProc); if (ldrStat == LS_IO_ERROR) spawnStat = SPAWN_STATUS_IO_ERROR; else if (ldrStat == LS_FORMAT_ERROR) spawnStat = SPAWN_STATUS_FORMAT_ERROR; else if (ldrStat == LS_MEMORY_ERROR) spawnStat = SPAWN_STATUS_MEMORY_ERROR; else { klog("loader return unknown status on failure %i", ldrStat); spawnStat = SPAWN_STATUS_UNKNOWN; } } // Close binary file Close(file); return spawnStat; }