Пример #1
0
 GuiInfoAdder::GuiInfoAdder(const Point<int>& outputSize, const int numberGpus, const bool guiEnabled) :
     mOutputSize{outputSize},
     mBorderMargin{intRound(fastMax(mOutputSize.x, mOutputSize.y) * 0.025)},
     mNumberGpus{numberGpus},
     mGuiEnabled{guiEnabled},
     mFpsCounter{0u},
     mLastElementRenderedCounter{std::numeric_limits<int>::max()},
     mLastId{std::numeric_limits<unsigned long long>::max()}
 {
 }
Пример #2
0
 void GuiInfoAdder::addInfo(cv::Mat& cvOutputData, const int numberPeople, const unsigned long long id,
                            const std::string& elementRenderedName, const unsigned long long frameNumber,
                            const Array<long long>& poseIds, const Array<float>& poseKeypoints)
 {
     try
     {
         // Sanity check
         if (cvOutputData.empty())
             error("Wrong input element (empty cvOutputData).", __LINE__, __FUNCTION__, __FILE__);
         // Size
         const auto borderMargin = intRound(fastMax(cvOutputData.cols, cvOutputData.rows) * 0.025);
         // Update fps
         updateFps(mLastId, mFps, mFpsCounter, mFpsQueue, id, mNumberGpus);
         // Fps or s/gpu
         char charArrayAux[15];
         std::snprintf(charArrayAux, 15, "%4.1f fps", mFps);
         // Recording inverse: sec/gpu
         // std::snprintf(charArrayAux, 15, "%4.2f s/gpu", (mFps != 0. ? mNumberGpus/mFps : 0.));
         putTextOnCvMat(cvOutputData, charArrayAux, {intRound(cvOutputData.cols - borderMargin), borderMargin},
                        WHITE_SCALAR, true, cvOutputData.cols);
         // Part to show
         // Allowing some buffer when changing the part to show (if >= 2 GPUs)
         // I.e. one GPU might return a previous part after the other GPU returns the new desired part, it looks
         // like a mini-bug on screen
         // Difference between Titan X (~110 ms) & 1050 Ti (~290ms)
         if (mNumberGpus == 1 || (elementRenderedName != mLastElementRenderedName
                                  && mLastElementRenderedCounter > 4))
         {
             mLastElementRenderedName = elementRenderedName;
             mLastElementRenderedCounter = 0;
         }
         mLastElementRenderedCounter = fastMin(mLastElementRenderedCounter, std::numeric_limits<int>::max() - 5);
         mLastElementRenderedCounter++;
         // Add each person ID
         addPeopleIds(cvOutputData, poseIds, poseKeypoints, borderMargin);
         // OpenPose name as well as help or part to show
         putTextOnCvMat(cvOutputData, "OpenPose - " +
                        (!mLastElementRenderedName.empty() ?
                             mLastElementRenderedName : (mGuiEnabled ? "'h' for help" : "")),
                        {borderMargin, borderMargin}, WHITE_SCALAR, false, cvOutputData.cols);
         // Frame number
         putTextOnCvMat(cvOutputData, "Frame: " + std::to_string(frameNumber),
                        {borderMargin, (int)(cvOutputData.rows - borderMargin)}, WHITE_SCALAR, false, cvOutputData.cols);
         // Number people
         putTextOnCvMat(cvOutputData, "People: " + std::to_string(numberPeople),
                        {(int)(cvOutputData.cols - borderMargin), (int)(cvOutputData.rows - borderMargin)},
                        WHITE_SCALAR, true, cvOutputData.cols);
     }
     catch (const std::exception& e)
     {
         error(e.what(), __LINE__, __FUNCTION__, __FILE__);
     }
 }
Пример #3
0
/*
 * Counts which mirrors are intersected by the straight path starting at S, and ending at E
 * where the mirrors are configured according to s_dir and d_dir.
 * The result is stored in mirror_count.
 */
void OrbifoldData::countMirrorsHeterogeneous(const Vector2d& S, const Vector2d& E,
    const Vector2d& dir, const StaticDirectionInfo& s_dir, const DynamicDirectionInfo& d_dir,
    unsigned* mirror_count) const {

  // The projection of the start position onto the direction perpendicular to the mirror plane
  const double So = dot(S, s_dir.o);

  // The projection of the path direction onto the direction perpendicular to the mirror plane
  const double dir_dot_o = dot(dir, s_dir.o);

  // Compute the index of the first and last mirror planes intersected by the path
  int m0 = 0, mE = 0;
  if(dir_dot_o > 0) {
    m0 = fastCeil(So / d_dir.v_sep);
    mE = fastFloor(dot(E, s_dir.o) / d_dir.v_sep);
  } else if (dir_dot_o < 0) {
    m0 = fastFloor(So / d_dir.v_sep);
    mE = fastCeil(dot(E, s_dir.o) / d_dir.v_sep);
  } else {
    // Disregard rays parallel to this mirror direction since they won't intersect any mirrors
    return;
  }

  // The distance along the path of the first and last mirror intersections
  const double k0 = (m0 * d_dir.v_sep - So) / dir_dot_o;
  const double kE = (mE * d_dir.v_sep - So) / dir_dot_o;

  // Bail out if the path doesn't cross a mirror
  if(kE < k0) { return; }

  // The total number of mirrors intersected by the path
  const unsigned num_mirrors = abs(mE - m0);

  // dK is the distance along the path between mirrors
  const double  dK = (kE - k0) / fastMax(num_mirrors, 1);

  // k is the distance along the path of each mirror plane intersected
  double k = k0;
  // i keeps track of the type of mirror boundary
  unsigned i = abs(m0) % DynamicDirectionInfo::OFFSET_ARRAY_LEN;

  for(unsigned j = 0; j <= num_mirrors; j++) {
    // Where the ray intersects the mirror line
    const Vector2d P = S + k * dir;

    // Project the intersection onto the mirror plane, and figure out which type of mirror was intersected
    const int l = fastFloor((dot(P, s_dir.m) + d_dir.offset_array[i]) / d_dir.h_sep);
    mirror_count[s_dir.index_array[abs(l % static_cast<int>(StaticDirectionInfo::NUM_MIRRORS))]] += 1;

    k += dK;
    i = (i + 1) % DynamicDirectionInfo::OFFSET_ARRAY_LEN;
  }
}
Пример #4
0
 void getNumberCudaThreadsAndBlocks(dim3& numberCudaThreads, dim3& numberCudaBlocks, const Point<int>& frameSize)
 {
     try
     {
         #ifdef USE_CUDA
             // numberCudaThreads
             // Image <= 480p    --> THREADS_PER_BLOCK_TINY
             // Image <= 720p    --> THREADS_PER_BLOCK_SMALL
             // Image <= 1080p   --> THREADS_PER_BLOCK_MEDIUM
             // Image <= 4k      --> THREADS_PER_BLOCK_BIG
             // Image >  4K      --> THREADS_PER_BLOCK_HUGE
             const auto maxValue = fastMax(frameSize.x, frameSize.y);
             // > 4K
             if (maxValue > 3840)
                 numberCudaThreads = THREADS_PER_BLOCK_HUGE;
             // 4K
             else if (maxValue > 1980)
                 numberCudaThreads = THREADS_PER_BLOCK_BIG;
             // FullHD
             else if (maxValue > 1280)
                 numberCudaThreads = THREADS_PER_BLOCK_MEDIUM;
             // HD
             else if (maxValue > 640)
                 numberCudaThreads = THREADS_PER_BLOCK_SMALL;
             // VGA
             else
                 numberCudaThreads = THREADS_PER_BLOCK_TINY;
             // numberCudaBlocks
             numberCudaBlocks = dim3{getNumberCudaBlocks((unsigned int)frameSize.x, numberCudaThreads.x),
                                     getNumberCudaBlocks((unsigned int)frameSize.y, numberCudaThreads.y),
                                     numberCudaThreads.z};
         #else
             UNUSED(numberCudaThreads);
             UNUSED(numberCudaBlocks);
             UNUSED(frameSize);
             error("OpenPose must be compiled with the `USE_CUDA` macro definition in order to use this"
                   " functionality.", __LINE__, __FUNCTION__, __FILE__);
         #endif
     }
     catch (const std::exception& e)
     {
         error(e.what(), __LINE__, __FUNCTION__, __FILE__);
     }
 }
Пример #5
0
    void renderKeypointsCpu(Array<float>& frameArray, const Array<float>& keypoints,
                            const std::vector<unsigned int>& pairs, const std::vector<float> colors,
                            const float thicknessCircleRatio, const float thicknessLineRatioWRTCircle,
                            const std::vector<float>& poseScales, const float threshold)
    {
        try
        {
            if (!frameArray.empty())
            {
                // Array<float> --> cv::Mat
                auto frame = frameArray.getCvMat();

                // Security check
                if (frame.dims != 3 || frame.size[0] != 3)
                    error(errorMessage, __LINE__, __FUNCTION__, __FILE__);

                // Get frame channels
                const auto width = frame.size[2];
                const auto height = frame.size[1];
                const auto area = width * height;
                const auto channelOffset = area * sizeof(float) / sizeof(uchar);
                cv::Mat frameB(height, width, CV_32FC1, &frame.data[0]);
                cv::Mat frameG(height, width, CV_32FC1, &frame.data[channelOffset]);
                cv::Mat frameR(height, width, CV_32FC1, &frame.data[2 * channelOffset]);

                // Parameters
                const auto lineType = 8;
                const auto shift = 0;
                const auto numberColors = colors.size();
                const auto numberScales = poseScales.size();
                const auto thresholdRectangle = 0.1f;
                const auto numberKeypoints = keypoints.getSize(1);

                // Keypoints
                for (auto person = 0 ; person < keypoints.getSize(0) ; person++)
                {
                    const auto personRectangle = getKeypointsRectangle(keypoints, person, thresholdRectangle);
                    if (personRectangle.area() > 0)
                    {
                        const auto ratioAreas = fastMin(1.f, fastMax(personRectangle.width/(float)width,
                                                                     personRectangle.height/(float)height));
                        // Size-dependent variables
                        const auto thicknessRatio = fastMax(intRound(std::sqrt(area)
                                                                     * thicknessCircleRatio * ratioAreas), 2);
                        // Negative thickness in cv::circle means that a filled circle is to be drawn.
                        const auto thicknessCircle = fastMax(1, (ratioAreas > 0.05f ? thicknessRatio : -1));
                        const auto thicknessLine = fastMax(1, intRound(thicknessRatio * thicknessLineRatioWRTCircle));
                        const auto radius = thicknessRatio / 2;

                        // Draw lines
                        for (auto pair = 0u ; pair < pairs.size() ; pair+=2)
                        {
                            const auto index1 = (person * numberKeypoints + pairs[pair]) * keypoints.getSize(2);
                            const auto index2 = (person * numberKeypoints + pairs[pair+1]) * keypoints.getSize(2);
                            if (keypoints[index1+2] > threshold && keypoints[index2+2] > threshold)
                            {
                                const auto thicknessLineScaled = thicknessLine
                                                               * poseScales[pairs[pair+1] % numberScales];
                                const auto colorIndex = pairs[pair+1]*3; // Before: colorIndex = pair/2*3;
                                const cv::Scalar color{colors[colorIndex % numberColors],
                                                       colors[(colorIndex+1) % numberColors],
                                                       colors[(colorIndex+2) % numberColors]};
                                const cv::Point keypoint1{intRound(keypoints[index1]), intRound(keypoints[index1+1])};
                                const cv::Point keypoint2{intRound(keypoints[index2]), intRound(keypoints[index2+1])};
                                cv::line(frameR, keypoint1, keypoint2, color[0], thicknessLineScaled, lineType, shift);
                                cv::line(frameG, keypoint1, keypoint2, color[1], thicknessLineScaled, lineType, shift);
                                cv::line(frameB, keypoint1, keypoint2, color[2], thicknessLineScaled, lineType, shift);
                            }
                        }

                        // Draw circles
                        for (auto part = 0 ; part < numberKeypoints ; part++)
                        {
                            const auto faceIndex = (person * numberKeypoints + part) * keypoints.getSize(2);
                            if (keypoints[faceIndex+2] > threshold)
                            {
                                const auto radiusScaled = radius * poseScales[part % numberScales];
                                const auto thicknessCircleScaled = thicknessCircle * poseScales[part % numberScales];
                                const auto colorIndex = part*3;
                                const cv::Scalar color{colors[colorIndex % numberColors],
                                                       colors[(colorIndex+1) % numberColors],
                                                       colors[(colorIndex+2) % numberColors]};
                                const cv::Point center{intRound(keypoints[faceIndex]),
                                                       intRound(keypoints[faceIndex+1])};
                                cv::circle(frameR, center, radiusScaled, color[0], thicknessCircleScaled, lineType,
                                           shift);
                                cv::circle(frameG, center, radiusScaled, color[1], thicknessCircleScaled, lineType,
                                           shift);
                                cv::circle(frameB, center, radiusScaled, color[2], thicknessCircleScaled, lineType,
                                           shift);
                            }
                        }
                    }
                }
            }
        }
        catch (const std::exception& e)
        {
            error(e.what(), __LINE__, __FUNCTION__, __FILE__);
        }
    }
Пример #6
0
    void FaceExtractorCaffe::forwardPass(const std::vector<Rectangle<float>>& faceRectangles,
                                         const cv::Mat& cvInputData,
                                         const double scaleInputToOutput)
    {
        try
        {
            #if defined USE_CAFFE
                if (!faceRectangles.empty())
                {
                    // Security checks
                    if (cvInputData.empty())
                        error("Empty cvInputData.", __LINE__, __FUNCTION__, __FILE__);

                    // Fix parameters
                    const auto netInputSide = fastMin(mNetOutputSize.x, mNetOutputSize.y);

                    // Set face size
                    const auto numberPeople = (int)faceRectangles.size();
                    mFaceKeypoints.reset({numberPeople, (int)FACE_NUMBER_PARTS, 3}, 0);

                    // HeatMaps: define size
                    if (!mHeatMapTypes.empty())
                        mHeatMaps.reset({numberPeople, (int)FACE_NUMBER_PARTS, mNetOutputSize.y, mNetOutputSize.x});

                    // // Debugging
                    // cv::Mat cvInputDataCopy = cvInputData.clone();
                    // Extract face keypoints for each person
                    for (auto person = 0 ; person < numberPeople ; person++)
                    {
                        const auto& faceRectangle = faceRectangles.at(person);
                        // Only consider faces with a minimum pixel area
                        const auto minFaceSize = fastMin(faceRectangle.width, faceRectangle.height);
                        // // Debugging -> red rectangle
                        // log(std::to_string(cvInputData.cols) + " " + std::to_string(cvInputData.rows));
                        // cv::rectangle(cvInputDataCopy,
                        //               cv::Point{(int)faceRectangle.x, (int)faceRectangle.y},
                        //               cv::Point{(int)faceRectangle.bottomRight().x,
                        //                         (int)faceRectangle.bottomRight().y},
                        //               cv::Scalar{0,0,255}, 2);
                        // Get parts
                        if (minFaceSize > 40)
                        {
                            // // Debugging -> green rectangle overwriting red one
                            // log(std::to_string(cvInputData.cols) + " " + std::to_string(cvInputData.rows));
                            // cv::rectangle(cvInputDataCopy,
                            //               cv::Point{(int)faceRectangle.x, (int)faceRectangle.y},
                            //               cv::Point{(int)faceRectangle.bottomRight().x,
                            //                         (int)faceRectangle.bottomRight().y},
                            //               cv::Scalar{0,255,0}, 2);
                            // Resize and shift image to face rectangle positions
                            const auto faceSize = fastMax(faceRectangle.width, faceRectangle.height);
                            const double scaleFace = faceSize / (double)netInputSide;
                            cv::Mat Mscaling = cv::Mat::eye(2, 3, CV_64F);
                            Mscaling.at<double>(0,0) = scaleFace;
                            Mscaling.at<double>(1,1) = scaleFace;
                            Mscaling.at<double>(0,2) = faceRectangle.x;
                            Mscaling.at<double>(1,2) = faceRectangle.y;

                            cv::Mat faceImage;
                            cv::warpAffine(cvInputData, faceImage, Mscaling,
                                           cv::Size{mNetOutputSize.x, mNetOutputSize.y},
                                           CV_INTER_LINEAR | CV_WARP_INVERSE_MAP,
                                           cv::BORDER_CONSTANT, cv::Scalar(0,0,0));

                            // cv::Mat -> float*
                            uCharCvMatToFloatPtr(mFaceImageCrop.getPtr(), faceImage, true);

                            // // Debugging
                            // if (person < 5)
                            // cv::imshow("faceImage" + std::to_string(person), faceImage);

                            // 1. Caffe deep network
                            upImpl->spNetCaffe->forwardPass(mFaceImageCrop);

                            // Reshape blobs
                            if (!upImpl->netInitialized)
                            {
                                upImpl->netInitialized = true;
                                reshapeFaceExtractorCaffe(upImpl->spResizeAndMergeCaffe, upImpl->spMaximumCaffe,
                                                          upImpl->spCaffeNetOutputBlob, upImpl->spHeatMapsBlob,
                                                          upImpl->spPeaksBlob, upImpl->mGpuId);
                            }

                            // 2. Resize heat maps + merge different scales
                            #ifdef USE_CUDA
                                upImpl->spResizeAndMergeCaffe->Forward_gpu({upImpl->spCaffeNetOutputBlob.get()},
                                                                           {upImpl->spHeatMapsBlob.get()});
                                cudaCheck(__LINE__, __FUNCTION__, __FILE__);
                            #elif USE_OPENCL
                                upImpl->spResizeAndMergeCaffe->Forward_ocl({upImpl->spCaffeNetOutputBlob.get()},
                                                                           {upImpl->spHeatMapsBlob.get()});
                            #else
                                upImpl->spResizeAndMergeCaffe->Forward_cpu({upImpl->spCaffeNetOutputBlob.get()},
                                                                           {upImpl->spHeatMapsBlob.get()});
                            #endif

                            // 3. Get peaks by Non-Maximum Suppression
                            #ifdef USE_CUDA
                                upImpl->spMaximumCaffe->Forward_gpu({upImpl->spHeatMapsBlob.get()},
                                                                    {upImpl->spPeaksBlob.get()});
                                cudaCheck(__LINE__, __FUNCTION__, __FILE__);
                            #elif USE_OPENCL
                                // CPU Version is already very fast (4ms) and data is sent to connectKeypoints as CPU for now anyway
                                upImpl->spMaximumCaffe->Forward_cpu({upImpl->spHeatMapsBlob.get()}, {upImpl->spPeaksBlob.get()});
                            #else
                                upImpl->spMaximumCaffe->Forward_cpu({upImpl->spHeatMapsBlob.get()},
                                                                    {upImpl->spPeaksBlob.get()});
                            #endif

                            const auto* facePeaksPtr = upImpl->spPeaksBlob->mutable_cpu_data();
                            for (auto part = 0 ; part < mFaceKeypoints.getSize(1) ; part++)
                            {
                                const auto xyIndex = part * mFaceKeypoints.getSize(2);
                                const auto x = facePeaksPtr[xyIndex];
                                const auto y = facePeaksPtr[xyIndex + 1];
                                const auto score = facePeaksPtr[xyIndex + 2];
                                const auto baseIndex = mFaceKeypoints.getSize(2)
                                                     * (part + person * mFaceKeypoints.getSize(1));
                                mFaceKeypoints[baseIndex] = (float)(scaleInputToOutput
                                                                    * (Mscaling.at<double>(0,0) * x
                                                                       + Mscaling.at<double>(0,1) * y
                                                                       + Mscaling.at<double>(0,2)));
                                mFaceKeypoints[baseIndex+1] = (float)(scaleInputToOutput
                                                                      * (Mscaling.at<double>(1,0) * x
                                                                         + Mscaling.at<double>(1,1) * y
                                                                         + Mscaling.at<double>(1,2)));
                                mFaceKeypoints[baseIndex+2] = score;
                            }
                            // HeatMaps: storing
                            if (!mHeatMapTypes.empty()){
                                #ifdef USE_CUDA
                                    updateFaceHeatMapsForPerson(mHeatMaps, person, mHeatMapScaleMode,
                                                                upImpl->spHeatMapsBlob->gpu_data());
                                #else
                                    updateFaceHeatMapsForPerson(mHeatMaps, person, mHeatMapScaleMode,
                                                                upImpl->spHeatMapsBlob->cpu_data());
                                #endif
                            }
                        }
                    }
                    // // Debugging
                    // cv::imshow("AcvInputDataCopy", cvInputDataCopy);
                }
                else
                    mFaceKeypoints.reset();
            #else
                UNUSED(faceRectangles);
                UNUSED(cvInputData);
                UNUSED(scaleInputToOutput);
            #endif
        }
        catch (const std::exception& e)
        {
            error(e.what(), __LINE__, __FUNCTION__, __FILE__);
        }
    }