Пример #1
0
    // Prediction
    //  Accepts a single integral image/channel, thus only to be used for testing
    //  Assumes that predPtr is already allocated, of same size as imgPtr
    DLL_EXPORT
    void predictWithChannel( void *modelPtr, ImagePixelType *imgPtr,
                              IntegralImagePixelType *chImgPtr,
                              int width, int height, int depth,
                              PredictionPixelType *predPtr )
    {
        Matrix3D<PredictionPixelType> predMatrix;
        predMatrix.fromSharedData( predPtr, width, height, depth );

        // create roi for image, no GT available
        ROIData roi;
        roi.init( imgPtr, 0, 0, 0, width, height, depth );

        // get the precomputed integral images
        ROIData::IntegralImageType ii;
        ii.fromSharedData(chImgPtr, width, height, depth);
        roi.addII( ii.internalImage().data() );

        MultipleROIData allROIs;
        allROIs.add( shared_ptr_nodelete(ROIData, &roi) );

        Booster adaboost;
        adaboost.setModel( *((BoosterModel *) modelPtr) );

        adaboost.predict<false>( allROIs, &predMatrix );
    }
Пример #2
0
    // Prediction for a single ROI
    //  Accepts an arbitrary number of integral images/channels.
    //  Assumes that predPtr is already allocated, of same size as imgPtr
    //  Returns 0 if ok
    DLL_EXPORT
    int predictWithChannels( void *modelPtr, ImagePixelType *imgPtr,
                              void *eigVecImgPtr,
                              int width, int height, int depth,
                              IntegralImagePixelType **chImgPtr,
                              int numChannels, double zAnisotropyFactor,
                              int useEarlyStopping,
                              int useROI, int x1, int y1, int z1, int x2, int y2, int z2, // used only if useROI != 0
                              PredictionPixelType *predPtr )
    {
        Matrix3D<PredictionPixelType> predMatrix;
        predMatrix.fromSharedData( predPtr, width, height, depth );

        // create roi for image, no GT available
        ROIData roi;
        roi.init( imgPtr, 0, 0, 0, width, height, depth, zAnisotropyFactor, 0.0, (const ROIData::RotationMatrixType *) eigVecImgPtr );
        std::unique_ptr<ROIData::IntegralImageType[]> ii(new ROIData::IntegralImageType[numChannels]);  // TODO: remove

        for (int ch=0; ch < numChannels; ch++)
        {
           ii[ch].fromSharedData(chImgPtr[ch], width, height, depth);

           roi.addII( ii[ch].internalImage().data() );
        }

        MultipleROIData allROIs;
        allROIs.add( shared_ptr_nodelete(ROIData, &roi) );

        try
        {
            Booster adaboost;
            adaboost.setModel( *((BoosterModel *) modelPtr) );
            if(useEarlyStopping != 0)
            {
                if (useROI)
                {
                    ROICoordinates subROI;
                    subROI.x1 = x1; subROI.y1 = y1; subROI.z1 = z1;
                    subROI.x2 = x2; subROI.y2 = y2; subROI.z2 = z2;
                    
                    adaboost.predictWithFeatureOrdering<true>( allROIs, &predMatrix, 0, IIBOOST_NUM_THREADS, &subROI );
                }
                else
                    adaboost.predictWithFeatureOrdering<true>( allROIs, &predMatrix );
            }
            else
                adaboost.predictWithFeatureOrdering<false>( allROIs, &predMatrix );
        }
        catch( std::exception &e )
        {
            printf("Error in prediction: %s\n", e.what());
            return -1;
        }

        return 0;
    }
Пример #3
0
QMap<int, ROIData> ROITool::computeROIData()
{
    Q_ASSERT(m_roiPolygon);

    // To compute the voxels inside the polygon we'll use the sweep line algorithm approach
    // tracing a set of lines within the bounds of the drawn polygon. Upon the resulting intersections between polygon segments and sweep lines
    // we will be able to compute which points of the line are inside of the polygon and get the corresponding voxel values.
    // The sweep lines will be horizontal and swept down in vertical direction
    double bounds[6];
    m_roiPolygon->getBounds(bounds);

    int xIndex, yIndex, zIndex;
    m_2DViewer->getView().getXYZIndexes(xIndex, yIndex, zIndex);

    // Building up the initial sweep line
    // We'll have to add some extra space to the x/y bounds just to help the sweep line algorithm work better
    // when the vertices and segments are just on the bound lines
    double *spacing = m_2DViewer->getMainInput()->getSpacing();
    double xMargin = spacing[xIndex] * 1.1;
    double yMargin = spacing[yIndex] * 1.1;
    
    // First point of the sweep line, will be at the minimum x, y, z bounds of the polygon
    Point3D sweepLineBeginPoint;
    sweepLineBeginPoint[xIndex] = bounds[xIndex * 2] - xMargin;
    sweepLineBeginPoint[yIndex] = bounds[yIndex * 2] - yMargin;
    sweepLineBeginPoint[zIndex] = bounds[zIndex * 2];
    
    // Second point of the sweep line, will be the same as the first but with the maximum x bounds of the polygon so it will trace an horizontal line
    Point3D sweepLineEndPoint;
    sweepLineEndPoint[xIndex] = bounds[xIndex * 2 + 1] + xMargin;
    sweepLineEndPoint[yIndex] = bounds[yIndex * 2] - yMargin;
    sweepLineEndPoint[zIndex] = bounds[zIndex * 2];

    // The ending height of the sweep line will be at the maximum y bounds of the polygon
    double verticalLimit = bounds[yIndex * 2 + 1] + yMargin;

    // Compute the ROI data corresponding for each input
    QMap<int, ROIData> roiDataMap;
    for (int i = 0; i < m_2DViewer->getNumberOfInputs(); ++i)
    {
        // Compute the voxel values inside of the polygon if the input is visible and the images are monochrome
        if (m_2DViewer->isInputVisible(i) && !m_2DViewer->getInput(i)->getImage(0)->getPhotometricInterpretation().isColor())
        {
            ROIData roiData = computeVoxelValues(m_roiPolygon->getSegments(), sweepLineBeginPoint, sweepLineEndPoint, verticalLimit, i);
            
            // Set additional information of the ROI data
            roiData.setUnits(m_2DViewer->getInput(i)->getPixelUnits());
            roiData.setModality(m_2DViewer->getInput(i)->getModality());

            roiDataMap.insert(i, roiData);
        }
    }

    return roiDataMap;
}
Пример #4
0
    DLL_EXPORT
    int predictIndividualWeakLearnersWithChannels( void *modelPtr, ImagePixelType *imgPtr,
                                                   void *eigVecImgPtr,
                                                   int width, int height, int depth,
                                                   IntegralImagePixelType **chImgPtr,
                                                   int numChannels, double zAnisotropyFactor,
                                                   WLPredictionPixelType **predPtr)
    {
        BoosterModel* model = static_cast<BoosterModel*>(modelPtr);
        
        int numWL = model->size();
        Matrix3D<WLPredictionPixelType> predMatrix[numWL]; // TODO: remove
        for(int i=0; i < numWL; ++i)
            predMatrix[i].fromSharedData(predPtr[i], width, height, depth);
        
        // create roi for image, no GT available
        ROIData roi;
        roi.init( imgPtr, 0, 0, 0, width, height, depth, zAnisotropyFactor, 0.0, (const ROIData::RotationMatrixType *) eigVecImgPtr );
        ROIData::IntegralImageType ii[numChannels];  // TODO: remove

        for (int ch=0; ch < numChannels; ch++)
        {
           ii[ch].fromSharedData(chImgPtr[ch], width, height, depth);

           roi.addII( ii[ch].internalImage().data() );
        }

        MultipleROIData allROIs;
        allROIs.add( shared_ptr_nodelete(ROIData, &roi) );

        try
        {
            Booster adaboost;
            adaboost.setModel(*model);
            adaboost.predictIndividualWeakLearners(allROIs, predMatrix);
        }
        catch( std::exception &e )
        {
            printf("Error in prediction: %s\n", e.what());
            return -1;
        }

        return 0;
    }