Exemple #1
0
bool BackupTask::fullExport(const QString &destPath,
                            QString &errorMessage)
{
    int dbVersion = DefinitionHolder::DATABASE_VERSION;
    qint64 metadataOffset = 0;
    QMap<qint64, QString> fileOffset;
    QStringList contentFileList = MetadataEngine::getInstance()
            .getAllContentFiles().values();

    //calc progress
    int progress = 0;
    int totalSteps = 0;
    totalSteps = 1 + contentFileList.size();
    emit progressSignal(progress, totalSteps);

    QFile destFile(destPath);
    if (!destFile.open(QIODevice::WriteOnly)) {
        errorMessage = tr("Failed to open create file %1: %2")
                .arg(destPath).arg(destFile.errorString());
        return false;
    }

    QDataStream out(&destFile);
    out << m_magicNumber;
    out << dbVersion;

    int placeHolderOffset = destFile.pos();
    out << metadataOffset; //place holder

    //write database file
    fileOffset.insert(destFile.pos(), "database");
    QFile dbFile(m_dbPath);
    if (!dbFile.open(QIODevice::ReadOnly)) {
        errorMessage = tr("Failed to open file %1: %2")
                .arg(m_dbPath).arg(dbFile.errorString());
        return false;
    }
    while(!dbFile.atEnd()) {
        destFile.write(dbFile.read(m_fileBufSize));
    }
    dbFile.close();

    //update progress
    emit progressSignal(++progress, totalSteps);

    //write content files
    foreach (QString s, contentFileList) {
        fileOffset.insert(destFile.pos(), s);
        QFile file(m_filesDir + s);
        if (!file.open(QIODevice::ReadOnly)) {
            errorMessage = tr("Failed to open file %1: %2")
                    .arg(m_filesDir + s).arg(file.errorString());
            return false;
        }
        while(!file.atEnd()) {
            destFile.write(file.read(m_fileBufSize));
        }
        file.close();

        //update progress
        emit progressSignal(++progress, totalSteps);
    }
Exemple #2
0
/*!
 * Parses and adapts data passed by the GUI into format used by analysis functions, and runs loop to complete an analysis of a video
 * Takes no parameters, all data needed is referenced form class level variables that store data Passed from GUI
 *
 * \return Returns nothing, a video analysis is complete once this function terminates
 *
 * \see Analyzer() constructor for data and values that are passed from the GUI
 */
void Analyzer::analyze()
{
    //holds general video data that will be sent to output
    OpenCV::generalVideoData videoInfo;
    //holds data for individual regions that will be sent to output
    std::vector <OpenCV::regionData> regionData;
    //the region coordinates that will be used for analysis
    std::vector < std::vector<int> > regionCoordinates;
    //temporary container for a single regions coordinates, used to fill regionCoordinates
    std::vector <int> regionTemp;

    //holds all of out possible region threshold values
    float percentChangeInRegion[10];

    //set 2 region coordinates using start point passed from system. and region height and width passed from system
    if(_regionHeights->size() != 0)
    {
        for(unsigned int i = 0; i < _regionHeights->size();  i++)
        {
            //add an empty container to hold one regions coordinates
            regionCoordinates.push_back(regionTemp);

            //fill that container with region data
            regionCoordinates[i].push_back( (*_regionXCoords)[i] );
            regionCoordinates[i].push_back( (*_regionYCoords)[i] );
            regionCoordinates[i].push_back( (*_regionXCoords)[i] + (*_regionWidths)[i] );
            regionCoordinates[i].push_back( (*_regionYCoords)[i] + (*_regionHeights)[i] );

            //convert from int to decimal
            percentChangeInRegion[i] = ( (*_regionThresholds)[i] );

            percentChangeInRegion[i] = ( percentChangeInRegion[i] / 100 );
        }
    }
    else//if no regions were selected by the user, set up a default region
    {
        percentChangeInRegion[0] = 0;

        //add a defult region name for output when no regions are selected
        _regionNames->push_back("default");
    }

    //copy the video file path as a standard string
    std::string videoFilePath = _videoFilePath.toStdString();

    //temporary output is stored in the system's "tmp" folder, final output will be saved in the users working directory
    //if they chose to keep analysis data
    std::string outputFilePath = "tmp";

    int lastSlashInPath = videoFilePath.find_last_of('/');

    //if we are on macOS
    if(lastSlashInPath != -1)
    {
        outputFilePath = "tmp/";
    }
    else//windows
    {
        outputFilePath = "tmp\\";
    }


    //if the video file fails to open, send error, otherwise, continue analysis
    if(!_cvObject.openVideoFile(videoFilePath))
    {
        //Commented out for final release
        //std::cout << "Failed to open video file " << videoFilePath << std::endl;
    }
    else//continue to analyze video
    {
        //percent of analysis that is complete
        int percentComplete = 0;

        //will hold the name of the video file we are analizing, without trailing extention
        std::string videoFileName;

        //testing code, parses throught given file path and gets our video file name without extension
        videoFileName = videoFilePath;

        //parse out video file name
        if(videoFileName.find_last_of('/') != -1)//ignore compiler warning, this comparison is working properly
        {
            videoFileName = videoFileName.substr(videoFileName.find_last_of('/') + 1, videoFileName.size() );
            videoFileName = videoFileName.substr(0, (videoFileName.size() - 4) );
        }
        else
        {
            videoFileName = videoFileName.substr(videoFileName.find_last_of('\\') + 1, videoFileName.size() );
            videoFileName = videoFileName.substr(0, (videoFileName.size() - 4) );
        }

        //store video file name for results later
        videoInfo.videoName = videoFileName;

        //video run time information;
        int hoursOfVideo = 0;
        int minutesOfVideo = 0;
        int secondsOfVideo = 0;

        //get the total run time of the video
        _cvObject.getFormattedVideoTime(hoursOfVideo, minutesOfVideo, secondsOfVideo, _cvObject.getNumberOfVideoFrames(), _cvObject.getVideoFrameRate());

        //store meta data for results later
        videoInfo.frameWidthResult = _cvObject.getVideoFrameWidth();
        videoInfo.frameHeightResult = _cvObject.getVideoFrameHeight();
        videoInfo.totalNumberOfFramesResult = _cvObject.getNumberOfVideoFrames();
        videoInfo.frameRateResult = _cvObject.getVideoFrameRate();
        videoInfo.hoursOfRunTimeResult = hoursOfVideo;
        videoInfo.minutesOfRunTimeResult = minutesOfVideo;
        videoInfo.secondsOfRunTimeResult = secondsOfVideo;
        videoInfo.totalVideoRunTimeInSeconds = ( (hoursOfVideo * 3600) + (minutesOfVideo * 60) + secondsOfVideo );
        //will be accumulated over total analysis
        videoInfo.totalFramesPastThreshHold = 0;


        //if at least one region was selected by the user
        if(regionCoordinates.size() != 0)
        {
            //output and region data, and store it for results later
            for(unsigned int regionNum = 0; regionNum < regionCoordinates.size(); regionNum++)
            {
                //store region data for results later
                OpenCV::regionData tempData;
                tempData.regionStartPointX = regionCoordinates[regionNum][0];
                tempData.regionStartPointY = regionCoordinates[regionNum][1];
                tempData.regionEndPointX = regionCoordinates[regionNum][2];
                tempData.regionEndPointY = regionCoordinates[regionNum][3];
                tempData.regionRectangleColor = _cvObject.colorList[regionNum].colorName;
                regionData.push_back(tempData);
            }

        }
        else //if no regions were selected by the user, set one region selecting the whole frame
        {
            OpenCV::regionData tempData;
            std::vector <int> tempCoordinates(4);

            regionCoordinates.push_back(tempCoordinates);

            regionCoordinates[0][0] = 0;
            regionCoordinates[0][1] = 0;
            regionCoordinates[0][2] = _cvObject.getVideoFrameWidth();
            regionCoordinates[0][3] = _cvObject.getVideoFrameHeight();

            tempData.regionStartPointX = 0;
            tempData.regionStartPointY = 0;
            tempData.regionEndPointX = _cvObject.getVideoFrameWidth();
            tempData.regionEndPointY = _cvObject.getVideoFrameHeight();
            tempData.regionRectangleColor = _cvObject.colorList[0].colorName;

            regionData.push_back(tempData);
        }


        //set amount of frame to analyze based on user input
        _cvObject.setFrameAnalysisSize(regionCoordinates, _isFullFrameAnalysis);

        //setup results for changes
        if(regionCoordinates.size() != 0)
        {
            //initialize variables withing the OpenCV class that track pixel changes per region
            int numberOfRegions = regionCoordinates.size();
            _cvObject.initializePixelChangeVariables(numberOfRegions, regionData, percentChangeInRegion, _regionWidths, _regionHeights);
        }
        else //if no regions were selected by the user, set one region selecting the whole frame
        {
            //initialize pixel change variables for a single region
            _cvObject.initializePixelChangeVariables(1, regionData, percentChangeInRegion, _regionWidths, _regionHeights);
        }


        //currently, the video starts at the beginning and analyzes all the way to the end
        //will be used once start and end are passed by th GUI
        double analysisStartFrame = 0;
        double analysisEndFrame = 0;

        //now declared here since code moved out of OpenCV class
        int currentFrameNumber = 0;

        //if the video start time is greater than the beginning of the video, set video start time,
        //and frame number information
        if(_startSecond > 0)
        {
            _cvObject.setCurrentVideoTime(_startSecond * 1000);
            currentFrameNumber = _cvObject.getCurrentVideoFrame();
            analysisStartFrame = currentFrameNumber;
        }
        else//video is being analyzed form the beginning
        {
            analysisStartFrame = 0;
            currentFrameNumber = 0;
        }

        //set analysis end time based on time chosen by the user
        analysisEndFrame = _stopSecond * _cvObject.getVideoFrameRate();
        _stopSecond = _stopSecond * 1000;

        //output file start and end video data
        videoInfo.frameAnalysisStart = analysisStartFrame;
        videoInfo.frameAnalysisEnd = analysisEndFrame;

        //set start frame, output path, and video file name in open CV class
        _cvObject.initializeStartFrameAndFileName(outputFilePath, videoFileName, analysisStartFrame);

        //set current frame size of video, used for creating image variables for analysis
        _cvObject.initializeFrameSizeSensitivityAndDrawSize(_motionSensitivity);

        //initialize average frame motion image variable
        _cvObject.initializeMovingAverageFrame();

        //set image output options based on GUI options chosen
        _cvObject.setAnalyzeOptions(_isOutputImages, _imageOutputSize);

        //is the current frame to be analyzed the first frame of the video, or a new edit frame chosen by the user
        bool isEditFrame = true;

        //emit a starting signal to show that analysis has begun for very long jobs
        emit progressSignal(1);

        //has an openCV error occured on this run
        bool isErrorThrown = false;

        //Main Analysis Loop//
        while(true)
        {
            //path for image files saved during analysis, send this data through the system to the carosel
            QString savedImagePath;

            //analyze current video frame
            try
            {
                savedImagePath = _cvObject.analyzeCurrentFrame(currentFrameNumber, regionCoordinates, videoInfo, regionData, isEditFrame);
            }
            catch(cv::Exception& e)//if an openCV error is caught, end the loop, deallocate data, and send error message/window
            {
                //Commented out for final release
                //const char* err_msg = e.what();
                //std::cout << err_msg;

                isErrorThrown = true;
                _isCancelled = true;
                _cvObject.deallocateFramesOnError();
                break;
            }

            //check if the user has stopped the analysis by clicking a button on the GUI
            if(_isCancelled == true)
            {
                emit progressSignal(0);
                break;
            }
            //if we are at the end of the video, or at the user selected stopping point, end analysis
            if(_cvObject.getCurrentVideoTime() >= _stopSecond)
            {
                break;
            }

            //output current percentage completion
            float framesAnalyzed = currentFrameNumber;
            float lastFrameToAnylize = analysisEndFrame;

            if(percentComplete < int((framesAnalyzed/lastFrameToAnylize) * 100))
            {
                percentComplete = int((framesAnalyzed/lastFrameToAnylize) * 100);

                //Commented out for final release
                //std::cout << "Analysis is " << percentComplete << "% Complete\n";

                //Emit this data to the GUI to update our progress bar
                emit progressSignal(percentComplete);
            }

            //if we are at an edit point set by the user, skip to the next frame they wish to have analyzed
            if(_editPoints.size() != 0)
            {
                //if the current video time is equal/greater than the next edit point
                if(_cvObject.getCurrentVideoTime() >= (_editPoints.front() * 1000))
                {
                    //pop old edit time
                    _editPoints.pop_front();

                    ///set the video to the user selected time to resume analysis
                    _cvObject.setCurrentVideoTime((double) (_editPoints.front() * 1000));
                    currentFrameNumber = _cvObject.getCurrentVideoFrame();

                    //remove analysis resume time
                    _editPoints.pop_front();

                    //we have skipped time in the video, tells frame analysis algorythim to reset
                    //moving frame average for the next frame
                    isEditFrame = true;
                }
                else
                {
                    isEditFrame = false;
                }
            }
            else//Else no edit had taken place, proceed as normal
            {
                isEditFrame = false;
            }

            //if an image was saved this frame, pass its file path back through the system
            if(savedImagePath != "")
            {
                _parseString = savedImagePath.split("-");
                sendImageInfoSlot(savedImagePath, _parseString.at(1).toLocal8Bit().constData());
            }

        }//End While, Main Analysis Loop

        //release average frame data from memory
        _cvObject.deallocateMovingAverageFrame();

        //after the entire video has been analyzed, close the video filestream
        _cvObject.closeVideoFile();

        //if the user did not stop the analysis while it was in progress, output the analysis result data
        if(_isCancelled == false)
        {
            //reset progress bar
            emit progressSignal(0);

            //process the results from an analysis
            Result getResult;
            getResult.exportToText(videoFilePath, outputFilePath, videoInfo, regionData, _regionNames);

            // Prepare the result object to be emitted.
            _result = new Result();
            emit sendResultSignal(_result);
        }

        //if an openCV error has ended the analysis
        if(isErrorThrown == true)
        {
            emit progressSignal(0);
            //display error window letting user know something went wrong while analyzing this video
            displayErrorMessageSignal();
        }
    }
}