コード例 #1
0
    void DeConvolutionLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
    {
        Blob &wghtBlob = blobs[0];

        for (size_t ii = 0; ii < outputs.size(); ii++)
        {
            Blob &convBlob = *inputs[ii];
            Blob &decnBlob = outputs[ii];

            for (int n = 0; n < convBlob.num(); n++)
            {
                for (int g = 0; g < group; g++)
                {
                    Mat dstMat(inpGroupCn, inpH*inpW, decnBlob.type(), decnBlob.ptr(n, g*inpGroupCn));

                    if (is1x1())
                        colMat = dstMat;

                    Mat convMat(outGroupCn, outH*outW, convBlob.type(), convBlob.ptr(n, g*outGroupCn));
                    Mat wghtMat(outGroupCn, ksize, wghtBlob.type(), wghtBlob.ptr(g*outGroupCn));
                    gemmCPU(wghtMat, convMat, 1, colMat, 0, GEMM_1_T);

                    col2im(dstMat);

                    if (bias)
                    {
                        float *biasPtr = blobs[1].ptrf() + g*inpGroupCn;
                        Mat biasMat(inpGroupCn, 1, CV_32F, biasPtr);
                        gemmCPU(biasMat, biasOnesMat, 1, dstMat, 1); //TODO: gemv
                    }
                }
            }
        }
    }
コード例 #2
0
    void ConvolutionLayer::forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
    {
        Blob &wgtBlob = blobs[0];

        for (size_t ii = 0; ii < outputs.size(); ii++)
        {
            Blob &inpBlob = *inputs[ii];
            Blob &outBlob = outputs[ii];

            for (int n = 0; n < inpBlob.num(); n++)
            {
                for (int g = 0; g < group; g++)
                {
                    im2col(inpBlob, n, g);

                    Mat kerMat(outGroupCn, ksize, wgtBlob.type(), wgtBlob.ptr(g*outGroupCn));
                    Mat dstMat(outGroupCn, outH*outW, outBlob.type(), outBlob.ptr(n, g*outGroupCn));

                    gemmCPU(kerMat, colMat, 1, dstMat, 0);
                    
                    if (bias)
                    {
                        float *biasPtr = blobs[1].ptrf() + g*outGroupCn;
                        Mat biasMat(outGroupCn, 1, CV_32F, biasPtr);
                        gemmCPU(biasMat, biasOnesMat, 1, dstMat, 1); //TODO: gemv
                    }
                }
            }
        }
    }
コード例 #3
0
ファイル: core_func.cpp プロジェクト: c1rew/EasyPR-Android
Mat CutTheRect(Mat &in, Rect &rect)
{
    int size = in.cols;  // (rect.width>rect.height)?rect.width:rect.height;
    Mat dstMat(size, size, CV_8UC1);
    dstMat.setTo(Scalar(0, 0, 0));

    int x = (int)floor((float)(size - rect.width) / 2.0f);
    int y = (int)floor((float)(size - rect.height) / 2.0f);

    //把rect中的数据 考取到dstMat的中间
    for (int i = 0; i < rect.height; ++i) {
        //宽
        for (int j = 0; j < rect.width; ++j) {
            dstMat.data[dstMat.step[0] * (i + y) + j + x] =
                in.data[in.step[0] * (i + rect.y) + j + rect.x];
        }
    }

    //
    return dstMat;
}
コード例 #4
0
ファイル: shift_layer.cpp プロジェクト: cyberCBM/DetectO
    virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
    {
        CV_TRACE_FUNCTION();
        CV_TRACE_ARG_VALUE(name, "name", name.c_str());

        CV_Assert(inputs.size() > 0);
        CV_Assert(blobs.size() > 0);

        if(inputs[0]->dims == blobs[0].dims)
        {
            for (size_t ii = 0; ii < outputs.size(); ii++)
            {
                Mat &inpBlob = *inputs[ii];
                Mat &outBlob = outputs[ii];

                outBlob = inpBlob + blobs[0];
            }
        }
        else
        {
            Mat biasOnesMat = internals[0];
            biasOnesMat.setTo(1);
            for (size_t ii = 0; ii < outputs.size(); ii++)
            {
                Mat &inpBlob = *inputs[ii];
                Mat &outBlob = outputs[ii];

                inpBlob.copyTo(outBlob);

                for (int n = 0; n < inpBlob.size[0]; n++)
                {
                    Mat dstMat(inpBlob.size[1], inpBlob.size[2] * inpBlob.size[3],
                               outBlob.type(), outBlob.ptr(n));
                    gemm(blobs[0], biasOnesMat, 1, dstMat, 1, dstMat); //TODO: gemv
                }
            }
        }
    }
コード例 #5
0
    void FullyConnectedLayer::forward(std::vector<Blob*> &input, std::vector<Blob> &output)
    {
        for (size_t i = 0; i < input.size(); i++)
        {
            int M = (int)input[i]->total(0, axis);
            int N = numOutputs;
            int K = innerSize;

            Mat srcMat(M, K, input[i]->type(), input[i]->ptrf());
            Mat weight(N, K, blobs[0].type(), blobs[0].ptrf());
            Mat dstMat(M, N, output[i].type(), output[i].ptrf());

            //important: for perfomance purposes Caffe stores weights as transposed array
            gemmCPU(srcMat, weight, 1, dstMat, 0, GEMM_2_T);

            if (bias)
            {
                Mat biasOnesMat = Mat::ones(M, 1, CV_32F);
                Mat biasMat(1, N, CV_32F, blobs[1].ptrf());
                gemmCPU(biasOnesMat, biasMat, 1, dstMat, 1);
            }
        }
    }
コード例 #6
0
int main(int argc, char *argv[]) {

  const unsigned int cameraIndex = 0u;
  const unsigned int numImagesPerFPSMeasurement = 240u;
  const int windowWidth = 1440;
  const int windowHeight = 900;
  const char cascadeFilename[] = "haarcascade_frontalface_alt.xml";
  const double detectionScaleFactor = 1.25;
  const int detectionMinNeighbours = 4;
  const int detectionFlags = CV_HAAR_SCALE_IMAGE;
  const cv::Size detectionMinSize(120, 120);
  const cv::Size detectionMaxSize;
  const cv::Scalar detectionDrawColor(255.0, 0.0, 255.0);
  char strBuffer[256u];
  const size_t strBufferSize = 256u;

  int matType;
  cv::Mat equalizedGrayMat;

#ifdef _WIN32
  snprintf(strBuffer, strBufferSize, "%s/../%s", argv[0], cascadeFilename);
  cv::CascadeClassifier detector(strBuffer);
#else
  cv::CascadeClassifier detector(cascadeFilename);
#endif
  if (detector.empty()) {
    snprintf(strBuffer, strBufferSize, "%s could not be loaded.",
              cascadeFilename);
    SDL_ShowSimpleMessageBox(
      SDL_MESSAGEBOX_ERROR, "Failed to Load Cascade File", strBuffer, NULL);
    return EXIT_FAILURE;
  }
  std::vector<cv::Rect> detectionRects;

  fc2Error error;

  fc2Image image;
  error = fc2CreateImage(&image);
  if (error != FC2_ERROR_OK) {
    showFC2Error(error);
    return EXIT_FAILURE;
  }

  fc2Context context;
  error = fc2CreateContext(&context);
  if (error != FC2_ERROR_OK) {
    showFC2Error(error);
    return EXIT_FAILURE;
  }
  
  fc2PGRGuid cameraGUID;
  error = fc2GetCameraFromIndex(context, cameraIndex, &cameraGUID);
  if (error != FC2_ERROR_OK) {
    showFC2Error(error);
    return EXIT_FAILURE;
  }
  
  error = fc2Connect(context, &cameraGUID);
  if (error != FC2_ERROR_OK) {
    showFC2Error(error);
    return EXIT_FAILURE;
  }

  error = fc2StartCapture(context);
  if (error != FC2_ERROR_OK) {
    fc2Disconnect(context);
    showFC2Error(error);
    return EXIT_FAILURE;
  }

  if (SDL_Init(SDL_INIT_VIDEO) < 0) {
    fc2StopCapture(context);
    fc2Disconnect(context);
    showSDLError();
    return EXIT_FAILURE;
  }

  SDL_Window *window = SDL_CreateWindow(
      "LookSpry", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
      windowWidth, windowHeight, 0u);
  if (window == NULL) {
    fc2StopCapture(context);
    fc2Disconnect(context);
    showSDLError();
    return EXIT_FAILURE;
  }

  SDL_Renderer *renderer = SDL_CreateRenderer(window, -1, 0u);
  if (renderer == NULL) {
    fc2StopCapture(context);
    fc2Disconnect(context);
    SDL_DestroyWindow(window);
    showSDLError();
    return EXIT_FAILURE;
  }
  
  SDL_RendererInfo rendererInfo;
  SDL_GetRendererInfo(renderer, &rendererInfo);

  if (strcmp(rendererInfo.name, "direct3d") == 0) {
    SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "best");
  } else if (strcmp(rendererInfo.name, "opengl") == 0) {
    SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
  }

  snprintf(strBuffer, strBufferSize, "LookSpry | %s", rendererInfo.name);
  SDL_SetWindowTitle(window, strBuffer);

  SDL_Texture *videoTex = NULL;
  void *videoTexPixels;
  int pitch;

  clock_t startTicks = clock();
  clock_t endTicks;
  unsigned int numImagesCaptured = 0u;

  bool running = true;
  bool detecting = true;
  bool mirroring = true;
  SDL_Event event;
  while (running) {
    while (SDL_PollEvent(&event)) {
      if (event.type == SDL_QUIT) {
        running = false;
        break;
      } else if (event.type == SDL_KEYUP) {
        switch(event.key.keysym.sym) {
        // When 'd' is pressed, start or stop [d]etection.
        case SDLK_d:
          detecting = !detecting;
          break;
        // When 'm' is pressed, [m]irror or un-mirror the video.
        case SDLK_m:
          mirroring = !mirroring;
          break;
        default:
          break;
        }
      }
    }

    error = fc2RetrieveBuffer(context, &image);
    if (error != FC2_ERROR_OK) {
       fc2Disconnect(context);
       SDL_DestroyTexture(videoTex);
       SDL_DestroyRenderer(renderer);
       SDL_DestroyWindow(window);
       showFC2Error(error);
       return EXIT_FAILURE;
    }

    if (videoTex == NULL) {
      equalizedGrayMat.create(image.rows, image.cols, CV_8UC1);
      SDL_RenderSetLogicalSize(renderer, image.cols, image.rows);
      Uint32 videoTexPixelFormat;
      switch (image.format) {
        // For monochrome capture modes, plan to render captured data to the Y
        // plane of a planar YUV texture.
        case FC2_PIXEL_FORMAT_RAW8:
        case FC2_PIXEL_FORMAT_MONO8:
          videoTexPixelFormat = SDL_PIXELFORMAT_YV12;
          matType = CV_8UC1;
          break;
        // For color capture modes, plan to render captured data to the entire
        // space of a texture in a matching color format.
        case FC2_PIXEL_FORMAT_422YUV8:
          videoTexPixelFormat = SDL_PIXELFORMAT_UYVY;
          matType = CV_8UC2;
          break;
        case FC2_PIXEL_FORMAT_RGB:
          videoTexPixelFormat = SDL_PIXELFORMAT_RGB24;
          matType = CV_8UC3;
          break;
        case FC2_PIXEL_FORMAT_BGR:
          videoTexPixelFormat = SDL_PIXELFORMAT_BGR24;
          matType = CV_8UC3;
          break;
        default:
          fc2StopCapture(context);
          fc2Disconnect(context);
          SDL_DestroyTexture(videoTex);
          SDL_DestroyRenderer(renderer);
          SDL_DestroyWindow(window);
          SDL_ShowSimpleMessageBox(
              SDL_MESSAGEBOX_ERROR, "Unsupported FlyCapture2 Pixel Format",
              "LookSpry supports RAW8, MONO8, 422YUV8, RGB, and BGR.", NULL);
          return EXIT_FAILURE;
      }
      videoTex = SDL_CreateTexture(
          renderer, videoTexPixelFormat, SDL_TEXTUREACCESS_STREAMING,
          image.cols, image.rows);
      if (videoTex == NULL) {
        fc2StopCapture(context);
        fc2Disconnect(context);
        SDL_DestroyRenderer(renderer);
        SDL_DestroyWindow(window);
        showSDLError();
        return EXIT_FAILURE;
      }
      snprintf(
          strBuffer, strBufferSize, "LookSpry | %s | %dx%d --> %dx%d",
          rendererInfo.name, image.cols, image.rows, windowWidth,
          windowHeight);
      SDL_SetWindowTitle(window, strBuffer);
    }

    cv::Mat srcMat(image.rows, image.cols, matType, image.pData, image.stride);
    if (detecting) {
      switch (image.format) {
        // For monochrome capture modes, just equalize.
        case FC2_PIXEL_FORMAT_RAW8:
        case FC2_PIXEL_FORMAT_MONO8:
          cv::equalizeHist(srcMat, equalizedGrayMat);
          break;
        // For color capture modes, convert to gray and equalize.
        case FC2_PIXEL_FORMAT_422YUV8:
          cv::cvtColor(srcMat, equalizedGrayMat, cv::COLOR_YUV2GRAY_UYVY);
          cv::equalizeHist(equalizedGrayMat, equalizedGrayMat);
          break;
        case FC2_PIXEL_FORMAT_RGB:
          cv::cvtColor(srcMat, equalizedGrayMat, cv::COLOR_RGB2GRAY);
          cv::equalizeHist(equalizedGrayMat, equalizedGrayMat);
          break;
        case FC2_PIXEL_FORMAT_BGR:
          cv::cvtColor(srcMat, equalizedGrayMat, cv::COLOR_BGR2GRAY);
          cv::equalizeHist(equalizedGrayMat, equalizedGrayMat);
          break;
        default:
          break;
      }
      // Run the detector on the equalized image.
      detector.detectMultiScale(
          equalizedGrayMat, detectionRects, detectionScaleFactor,
          detectionMinNeighbours, detectionFlags, detectionMinSize,
          detectionMaxSize);
      // Draw the resulting detection rectangles on the original image.
      for (cv::Rect detectionRect : detectionRects) {
        cv::rectangle(srcMat, detectionRect, detectionDrawColor);
      }
    }

    SDL_LockTexture(videoTex, NULL, &videoTexPixels, &pitch);

    switch (image.format) {
    case FC2_PIXEL_FORMAT_RAW8:
    case FC2_PIXEL_FORMAT_MONO8:
      // Make the planar YUV video gray by setting all bytes in its U and V
      // planes to 128 (the middle of the range).
      memset(((unsigned char *)videoTexPixels + image.dataSize), 128,
             image.dataSize / 2u);
      break;
    default:
      break;
    }

    if (mirroring) {
      // Flip the image data while copying it to the texture.
      cv::Mat dstMat(image.rows, image.cols, matType, videoTexPixels,
                     image.stride);
      cv::flip(srcMat, dstMat, 1);
    } else {
      // Copy the image data, as-is, to the texture.
      // Note that the PointGrey image and srcMat have pointers to the same
      // data, so the following code does reference the data that we modified
      // earlier via srcMat.
      memcpy(videoTexPixels, image.pData, image.dataSize);
    }

    SDL_UnlockTexture(videoTex);
    SDL_RenderCopy(renderer, videoTex, NULL, NULL);
    SDL_RenderPresent(renderer);

    numImagesCaptured++;
    if (numImagesCaptured >= numImagesPerFPSMeasurement) {
      endTicks = clock();
      snprintf(
          strBuffer, strBufferSize, "LookSpry | %s | %dx%d --> %dx%d | %ld FPS",
          rendererInfo.name, image.cols, image.rows, windowWidth,
          windowHeight,
          numImagesCaptured * CLOCKS_PER_SEC / (endTicks - startTicks));
      SDL_SetWindowTitle(window, strBuffer);
      startTicks = endTicks;
      numImagesCaptured = 0u;
    }
  }

  fc2StopCapture(context);
  fc2Disconnect(context);
  SDL_DestroyTexture(videoTex);
  SDL_DestroyRenderer(renderer);
  SDL_DestroyWindow(window);
  return EXIT_SUCCESS;
}