Beispiel #1
0
void Integral_Image(cv::Mat src_img, cv::Mat &out_img){   //画像の積分?
    cv::Mat tmpim;
    src_img.copyTo(tmpim);

    int height = (int)src_img.step1(), width = src_img.channels();

    for(int j = 0; j < src_img.rows; j++){               //縦
        for(int i = 0; i < src_img.cols; i++){           //横
            for(int k = 0; k < src_img.channels(); k++){ //チャンネル数

                int present_pos = j * (int)src_img.step1() + i * src_img.channels() + k;

//                if(j == 0)
//                    tmpim.data[present_pos] += src_img.data[present_pos + height];
//                else if(j == src_img.rows -1)
//                    tmpim.data[present_pos] += src_img.data[present_pos - height];
//                else
//                    tmpim.data[present_pos] += src_img.data[present_pos + height] + src_img.data[present_pos - height];

                if(i == 0)
                    tmpim.data[present_pos] += src_img.data[present_pos + width];
                else if(i == src_img.cols - 1)
                    tmpim.data[present_pos] += src_img.data[present_pos - width];
                else
                    tmpim.data[present_pos] += src_img.data[present_pos + width] + src_img.data[present_pos - width];
            }
        }
    }

    tmpim.copyTo(out_img);  //出力画像
}
QImage BlobTrackingNode::convertToQImage(cv::Mat &cvImg)
{
    if (cvImg.channels()== 1){
        QImage img((uchar*)cvImg.data, cvImg.cols, cvImg.rows, cvImg.step1(), QImage::Format_Indexed8);
        return img;
    }
    else{
        QImage img((uchar*)cvImg.data, cvImg.cols, cvImg.rows, cvImg.step1(), QImage::Format_RGB888);
        return img;
    }
}
void sharpen2(const cv::Mat& image, cv::Mat& result)
{

    result.create(image.size(), image.type()); // allocate if necessary

    int step = image.step1();
    const uchar* previous = image.data;        // ptr to previous row
    const uchar* current = image.data + step;  // ptr to current row
    const uchar* next = image.data + 2 * step; // ptr to next row
    uchar* output = result.data + step;        // ptr to output row

    for (int j = 1; j < image.rows - 1; j++)
    { // for each row (except first and last)
        for (int i = 1; i < image.cols - 1; i++)
        { // for each column (except first and last)

            output[i] = cv::saturate_cast<uchar>(5 * current[i] - current[i - 1] - current[i + 1] -
                                                 previous[i] - next[i]);
        }

        previous += step;
        current += step;
        next += step;
        output += step;
    }

    // Set the unprocess pixels to 0
    result.row(0).setTo(cv::Scalar(0));
    result.row(result.rows - 1).setTo(cv::Scalar(0));
    result.col(0).setTo(cv::Scalar(0));
    result.col(result.cols - 1).setTo(cv::Scalar(0));
}
Beispiel #4
0
static float IC_Angle(const cv::Mat& image,
					  const int half_k, 
                      cv::Point2f pt,
                      const std::vector<int> & u_max)
{
	int m_01 = 0, m_10 = 0;

	const uchar* center = &image.at<uchar> (cvRound(pt.y), cvRound(pt.x));

	// Treat the center line differently, v=0
	for (int u = -half_k; u <= half_k; ++u)
		m_10 += u * center[u];

	// Go line by line in the circular patch
	int step = (int)image.step1();
	for (int v = 1; v <= half_k; ++v)
	{
		// Proceed over the two lines
		int v_sum = 0;
		int d = u_max[v];
		for (int u = -d; u <= d; ++u)
		{
			int val_plus = center[u + v*step], val_minus = center[u - v*step];
			v_sum += (val_plus - val_minus);
			m_10 += u * (val_plus + val_minus);
		}
		m_01 += v * v_sum;
	}

    return cv::fastAtan2((float)m_01, (float)m_10);
}
Beispiel #5
0
 //
 // showPropertiesOfMat
 //
 //   ...displays all properties of specified Mat.
 //
 void showPropertiesOfMat (const cv::Mat &src_mat)
 {
   // 行数
   std::cout << "rows:" << src_mat.rows <<std::endl;
   // 列数
   std::cout << "cols:" << src_mat.cols << std::endl;
   // 次元数
   std::cout << "dims:" << src_mat.dims << std::endl;
   // サイズ(2次元の場合)
   std::cout << "size[]:" << src_mat.size().width << "," << src_mat.size().height  << "[byte]" << std::endl;
   // ビット深度ID
   std::cout << "depth (ID):" << src_mat.depth() << "(=" << CV_64F << ")" << std::endl;
   // チャンネル数
   std::cout << "channels:" << src_mat.channels() << std::endl;
   // 1要素内の1チャンネル分のサイズ [バイト単位]
   std::cout << "elemSize1 (elemSize/channels):" << src_mat.elemSize1() << "[byte]" << std::endl;
   // 要素の総数
   std::cout << "total:" << src_mat.total() << std::endl;
   // ステップ数 [バイト単位]
   std::cout << "step:" << src_mat.step << "[byte]" << std::endl;
   // 1ステップ内のチャンネル総数
   std::cout << "step1 (step/elemSize1):" << src_mat.step1()  << std::endl;
   // データは連続か?
   std::cout << "isContinuous:" << (src_mat.isContinuous()?"true":"false") << std::endl;
   // 部分行列か?
   std::cout << "isSubmatrix:" << (src_mat.isSubmatrix()?"true":"false") << std::endl;
   // データは空か?
   std::cout << "empty:" << (src_mat.empty()?"true":"false") << std::endl;
 }
Beispiel #6
0
void Mat_cheaker(cv::Mat src_im){
    std::cout << "rows: "   << src_im.rows << std::endl;
    std::cout << "cols: "   << src_im.cols << std::endl;
    std::cout << "chanel: " << src_im.channels() << std::endl;
    std::cout << "step: "   << src_im.step1() << std::endl;
    std::cout << "dims: "   << src_im.dims << std::endl;
    std::cout << "elemesize: " << src_im.elemSize1() << std::endl;
}
Beispiel #7
0
Waifu2x::eWaifu2xError Waifu2x::LoadMatBySTBI(cv::Mat &float_image, const std::vector<char> &img_data)
{
	int x, y, comp;
	stbi_uc *data = stbi_load_from_memory((const stbi_uc *)img_data.data(), img_data.size(), &x, &y, &comp, 4);
	if (!data)
		return eWaifu2xError_FailedOpenInputFile;

	int type = 0;
	switch (comp)
	{
	case 1:
	case 3:
	case 4:
		type = CV_MAKETYPE(CV_8U, comp);
		break;

	default:
		return eWaifu2xError_FailedOpenInputFile;
	}

	float_image = cv::Mat(cv::Size(x, y), type);

	const auto LinePixel = float_image.step1() / float_image.channels();
	const auto Channel = float_image.channels();
	const auto Width = float_image.size().width;
	const auto Height = float_image.size().height;

	assert(x == Width);
	assert(y == Height);
	assert(Channel == comp);

	auto ptr = float_image.data;
	for (int i = 0; i < y; i++)
	{
		for (int j = 0; j < x; j++)
		{
			for (int ch = 0; ch < Channel; ch++)
				ptr[(i * LinePixel + j) * comp + ch] = data[(i * x + j) * comp + ch];
		}
	}

	stbi_image_free(data);

	if (comp >= 3)
	{
		// RGBだからBGRに変換
		for (int i = 0; i < y; i++)
		{
			for (int j = 0; j < x; j++)
				std::swap(ptr[(i * LinePixel + j) * comp + 0], ptr[(i * LinePixel + j) * comp + 2]);
		}
	}

	return eWaifu2xError_OK;
}
Beispiel #8
0
// matlab entry point
// [M, Ix, Iy] = bounded_dt(vals, ax, bx, ay, by, s)
void	PM_type::bounded_dt2(const cv::Mat &vals, FLOATS_SSS &DXDEF, FLOATS_SSS &DYDEF, cv::Mat &M, cv::Mat &Ix, cv::Mat &Iy)
{
	const int s = PM_type::SSS;

	// storage allocation
	int	Rows = vals.rows, Cols = vals.cols;
	if( !Rows || !Cols )
		throw runtime_error("");
	M.create( Rows, Cols, CV_32FC1 );
	Ix.create( Rows, Cols, CV_32SC1 );
	Iy.create( Rows, Cols, CV_32SC1 );
	cv::Mat	tmpM(Rows, Cols, CV_32FC1);
	cv::Mat	tmpIy(Rows, Cols, CV_32SC1);

	const float	*vPt = (float*)vals.data; int vStep = vals.step1();	
	float	*mPt = (float*)M.data; int mStep = M.step1();
	int		*ixPt = (int*)Ix.data; int ixStep = Ix.step1();
	int		*iyPt = (int*)Iy.data; int iyStep = Iy.step1();
	float	*tmPt = (float*)tmpM.data; int tmStep = tmpM.step1();
	int		*tyPt = (int*)tmpIy.data; int tyStep = tmpIy.step1();

	for( int c=0; c<Cols; c++ )
		max_filter_1d_v2( vPt++, tmPt++, tyPt++, s, Cols, Rows, DYDEF );

	tmPt = (float*)tmpM.data;
	for( int r=0; r<Rows; r++ ){
		max_filter_1d_v2( tmPt, mPt, ixPt, s, 1, Cols, DXDEF );
		tmPt += tmStep;
		mPt += mStep;
		ixPt += ixStep;
	}

	ixPt = (int*)Ix.data;
	iyPt = (int*)Iy.data;
	tyPt = (int*)tmpIy.data;
	for( int r=0; r<Rows; r++ ){
		for( int c=0; c<Cols; c++ )
			*(iyPt++) = tyPt[*(ixPt++)];
		tyPt += tyStep;
	}

}
Beispiel #9
0
// 画像を読み込んで値を0.0f〜1.0fの範囲に変換
Waifu2x::eWaifu2xError Waifu2x::LoadMat(cv::Mat &float_image, const uint32_t* source, int width, int height)
{
	float_image = cv::Mat(cv::Size(width, height), CV_MAKETYPE(CV_8U, 4));

	const auto LinePixel = float_image.step1() / float_image.channels();
	const auto Channel = float_image.channels();
	const auto Width = float_image.size().width;
	const auto Height = float_image.size().height;

	const uint8_t *sptr = (const uint8_t *)source;
	auto ptr = float_image.data;
	for (int i = 0; i < height; i++)
	{
		for (int j = 0; j < width; j++)
		{
			for (int ch = 0; ch < Channel; ch++)
				ptr[(i * LinePixel + j) * 4 + ch] = sptr[(i * width + j) * 4 + ch];
		}
	}

	// RGBだからBGRに変換
	/*
	for (int i = 0; i < height; i++)
	{
		for (int j = 0; j < width; j++)
			std::swap(ptr[(i * LinePixel + j) * 4 + 0], ptr[(i * LinePixel + j) * 4 + 2]);
	}
	*/

	cv::Mat convert;
	float_image.convertTo(convert, CV_32F, 1.0 / 255.0);
	float_image.release();

	{
		// アルファチャンネル付きだったらα乗算済みにする

		std::vector<cv::Mat> planes;
		cv::split(convert, planes);

		cv::Mat w = planes[3];

		planes[0] = planes[0].mul(w);
		planes[1] = planes[1].mul(w);
		planes[2] = planes[2].mul(w);

		cv::merge(planes, convert);
	}

	float_image = convert;

	return eWaifu2xError_OK;
}
void debayer2x2toBGR(const cv::Mat& src, cv::Mat& dst, int R, int G1, int G2, int B)
{
  typedef cv::Vec<T, 3> DstPixel; // 8- or 16-bit BGR
  dst.create(src.rows / 2, src.cols / 2, cv::DataType<DstPixel>::type);

  int src_row_step = src.step1();
  int dst_row_step = dst.step1();
  const T* src_row = src.ptr<T>();
  T* dst_row = dst.ptr<T>();

  // 2x2 downsample and debayer at once
  for (int y = 0; y < dst.rows; ++y)
  {
    for (int x = 0; x < dst.cols; ++x)
    {
      dst_row[x*3 + 0] = src_row[x*2 + B];
      dst_row[x*3 + 1] = (src_row[x*2 + G1] + src_row[x*2 + G2]) / 2;
      dst_row[x*3 + 2] = src_row[x*2 + R];
    }
    src_row += src_row_step * 2;
    dst_row += dst_row_step;
  }
}
void HoGExtractor::Preprocess(const cv::Mat& img) {

	uchar *data = img.data;
	int height = img.size().height;
	int width = img.size().width;
	int step = img.step1();

	memset((void *)integrals, 0, sizeof(feat) * widthStep);
	memset((void *)bins, 0, sizeof(feat) * widthStep);

	for (int i = 1; i < height - 1; i++) {
		for (int j = 1; j < width - 1; j++) {
			feat tempX = (feat)data[(i - 1)*step + (j + 1)] - (feat)data[(i - 1)*step + (j - 1)]
				+ 2 * (feat)data[(i)*step + (j + 1)] - 2 * (feat)data[(i)*step + (j - 1)]
				+ (feat)data[(i + 1)*step + (j + 1)] - (feat)data[(i + 1)*step + (j - 1)];

			feat tempY = (feat)data[(i + 1)*step + (j - 1)] - (feat)data[(i - 1)*step + (j - 1)]
				+ 2 * (feat)data[(i + 1)*step + (j)] - 2 * (feat)data[(i - 1)*step + (j)]
				+ (feat)data[(i + 1)*step + (j + 1)] - (feat)data[(i - 1)*step + (j + 1)];

			int direction = Direct(tempX, tempY);
			feat temp = SQRT(tempX*tempX + tempY*tempY);

			int index = (i - 1) * widthStep + (j - 1) * 9;

			if (i == 1) {
				bins[index + direction] = temp;
			}
			else {
				memcpy((void *)(bins + index), (void *)(bins + index - widthStep), sizeof(feat) * 9);
				bins[index + direction] += temp;
			}

			memcpy((void *)(integrals + index), (void *)(bins + index), sizeof(feat) * 9);
			if (j > 1) {
				for (int k = 0; k < 9; k++) {
					integrals[index + k] += integrals[index + k - 9];
				}
			}
		}
	}
}
// ネットワークを使って画像を再構築する
Waifu2x::eWaifu2xError cNet::ReconstructImage(const bool UseTTA, const int crop_w, const int crop_h, const int outer_padding, const int batch_size, float *outputBlockBuf, const cv::Mat &inMat, cv::Mat &outMat)
{
	const auto InputHeight = inMat.size().height;
	const auto InputWidth = inMat.size().width;
	const auto InputLine = inMat.step1();

	assert(inMat.channels() == 1 || inMat.channels() == 3);

	const int InputPadding = mNetOffset + outer_padding; // 入力パディング

	const auto NoPaddingInputWidth = InputWidth - InputPadding * 2; // パディングを除いた入力画像サイズ(横)
	const auto NoPaddingInputHeight = InputHeight - InputPadding * 2; // パディングを除いた入力画像サイズ(縦)

	cv::Mat outim(NoPaddingInputHeight * mInnerScale, NoPaddingInputWidth * mInnerScale, inMat.type());

	// float *imptr = (float *)im.data;
	float *imptr = (float *)outim.data;

	const auto input_block_width = crop_w + InputPadding * 2; // 入力ブロックサイズ(横)
	const auto input_block_height = crop_h + InputPadding * 2; // 入力ブロックサイズ(縦)

	const auto output_block_width = input_block_width * mInnerScale - mNetOffset * 2; // 出力ブロックサイズ(横)
	const auto output_block_height = input_block_height * mInnerScale - mNetOffset * 2; // 出力ブロックサイズ(縦)

	const auto output_crop_block_width = crop_w * mInnerScale; // クロップ後の出力ブロックサイズ(横)
	const auto output_crop_block_height = crop_h * mInnerScale; // クロップ後の出力ブロックサイズ(縦)

	const auto output_crop_w = (output_block_width - crop_w * mInnerScale) / 2; // 出力後のクロップサイズ
	const auto output_crop_h = (output_block_height - crop_h * mInnerScale) / 2; // 出力後のクロップサイズ

	assert(NoPaddingInputWidth % crop_w == 0);
	assert(NoPaddingInputHeight % crop_h == 0);

	try
	{
		auto input_blobs = mNet->input_blobs();

		assert(input_blobs.size() > 0);

		auto input_blob = mNet->input_blobs()[0];

		input_blob->Reshape(batch_size, mInputPlane, input_block_height, input_block_width);

		assert(inMat.channels() == mInputPlane);
		assert(input_blob->shape(1) == mInputPlane);

		const int WidthNum = NoPaddingInputWidth / crop_w;
		const int HeightNum = NoPaddingInputHeight / crop_h;

		const int BlockNum = WidthNum * HeightNum;

		const int input_block_plane_size = input_block_width * input_block_height * mInputPlane;
		const int output_block_plane_size = output_block_width * output_block_height * mInputPlane;

		// 画像は(消費メモリの都合上)block_size*block_sizeに分けて再構築する
		for (int num = 0; num < BlockNum; num += batch_size)
		{
			const int processNum = (BlockNum - num) >= batch_size ? batch_size : BlockNum - num;

			if (processNum < batch_size)
				input_blob->Reshape(processNum, mInputPlane, input_block_height, input_block_width);

			for (int n = 0; n < processNum; n++)
			{
				const int wn = (num + n) % WidthNum;
				const int hn = (num + n) / WidthNum;

				const int w = wn * crop_w;
				const int h = hn * crop_h;

				assert(w + input_block_width <= InputWidth && h + input_block_height <= InputHeight);

				cv::Mat someimg = inMat(cv::Rect(w, h, input_block_width, input_block_height));

				// 画像を直列に変換
				{
					float *fptr = input_blob->mutable_cpu_data() + (input_block_plane_size * n);
					const float *uptr = (const float *)someimg.data;

					const auto Line = someimg.step1();

					if (someimg.channels() == 1)
					{
						if (input_block_width == Line)
							memcpy(fptr, uptr, input_block_width * input_block_height * sizeof(float));
						else
						{
							for (int i = 0; i < input_block_height; i++)
								memcpy(fptr + i * input_block_width, uptr + i * Line, input_block_width * sizeof(float));
						}
					}
					else
					{
						const auto LinePixel = someimg.step1() / someimg.channels();
						const auto Channel = someimg.channels();
						const auto Width = someimg.size().width;
						const auto Height = someimg.size().height;

						for (int i = 0; i < Height; i++)
						{
							for (int j = 0; j < Width; j++)
							{
								for (int ch = 0; ch < Channel; ch++)
								{
									const size_t IndexSrc = i * someimg.step1() + j * Channel + ch;
									const size_t IndexDst = (ch * Height + i) * Width + j;
									fptr[IndexDst] = uptr[IndexSrc];
								}
							}
						}
					}
				}
			}

			assert(input_blob->count() == input_block_plane_size * processNum);

			// 計算
			auto out = mNet->Forward();

			auto b = out[0];

			assert(b->count() == output_block_plane_size * processNum);

			const float *ptr = nullptr;

			if (caffe::Caffe::mode() == caffe::Caffe::CPU)
				ptr = b->cpu_data();
			else
				ptr = b->gpu_data();

			caffe::caffe_copy(output_block_plane_size * processNum, ptr, outputBlockBuf);

			for (int n = 0; n < processNum; n++)
			{
				const int wn = (num + n) % WidthNum;
				const int hn = (num + n) / WidthNum;

				const int w = wn * output_crop_block_width;
				const int h = hn * output_crop_block_height;

				const float *fptr = outputBlockBuf + (output_block_plane_size * n);

				const auto Line = outim.step1();

				// 結果を出力画像にコピー
				if (outim.channels() == 1)
				{
					for (int i = 0; i < output_crop_block_height; i++)
						memcpy(imptr + (h + i) * Line + w, fptr + (i + output_crop_h) * output_block_width + output_crop_w, output_crop_block_width * sizeof(float));
				}
				else
				{
					const auto LinePixel = Line / outim.channels();
					const auto Channel = outim.channels();

					for (int i = 0; i < output_crop_block_height; i++)
					{
						for (int j = 0; j < output_crop_block_width; j++)
						{
							for (int ch = 0; ch < Channel; ch++)
							{
								const size_t IndexSrc = (ch * output_block_height + i + output_crop_h) * output_block_width + j + output_crop_w;
								const size_t IndexDst = ((h + i) * LinePixel + (w + j)) * Channel + ch;

								imptr[IndexDst] = fptr[IndexSrc];
							}
						}
					}
				}

				//{
				//	cv::Mat testim(output_block_size, output_block_size, CV_32FC1);
				//	float *p = (float *)testim.data;
				//	for (int i = 0; i < output_block_size; i++)
				//	{
				//		for (int j = 0; j < output_block_size; j++)
				//		{
				//			p[testim.step1() * i + j] = fptr[i * output_block_size + j];
				//		}
				//	}

				//	const int cv_depth = DepthBitToCVDepth(8);
				//	const double max_val = GetValumeMaxFromCVDepth(cv_depth);
				//	const double eps = GetEPS(cv_depth);

				//	cv::Mat write_iamge;
				//	testim.convertTo(write_iamge, cv_depth, max_val, eps);

				//	cv::imwrite("ti.png", write_iamge);
				//	testim.release();
				//}
			}
		}
	}
	catch (...)
	{
		return Waifu2x::eWaifu2xError_FailedProcessCaffe;
	}

	// 値を0~1にクリッピング
	cv::threshold(outim, outim, 1.0, 1.0, cv::THRESH_TRUNC);
	cv::threshold(outim, outim, 0.0, 0.0, cv::THRESH_TOZERO);

	outMat = outim;

	return Waifu2x::eWaifu2xError_OK;
}
Beispiel #13
0
String cvMat2String(const cv::Mat& M, LPCSTR format) {
	switch (M.type()) {
	case CV_32F: return cvMat2String(M.ptr<float>(), (uint32_t)M.rows, (uint32_t)M.cols, (uint32_t)M.step1(), format);
	case CV_64F: return cvMat2String(M.ptr<double>(), (uint32_t)M.rows, (uint32_t)M.cols, (uint32_t)M.step1(), format);
	}
	return String();
}
void BlobTrackingNode::process(const cv::Mat &img_input, const cv::Mat &img_mask, cv::Mat &img_output)
{
    //This is output event
    QList<DetectedEvent> blobEvent;
cv::Mat newInput;
img_input.copyTo(newInput);

    if(img_input.empty() || img_mask.empty()){
        return;
    }
    loadConfig();

    if(firstTime){
        saveConfig();
    }
    IplImage* frame = new IplImage(img_input);
    cvConvertScale(frame, frame, 1, 0);

    IplImage* segmentated = new IplImage(img_mask);

    IplConvKernel* morphKernel = cvCreateStructuringElementEx(5, 5, 1, 1, CV_SHAPE_RECT, NULL);
    cvMorphologyEx(segmentated, segmentated, NULL, morphKernel, CV_MOP_OPEN, 1);

    cv::Mat temp = cv::Mat(segmentated);
    if(showBlobMask){
        //cv::imshow("test",temp);
        ownerPlugin->updateFrameViewer("Tracking Mask",convertToQImage(temp));
    }
    IplImage* labelImg = cvCreateImage(cvGetSize(frame), IPL_DEPTH_LABEL, 1);

    cvb::CvBlobs blobs;
    cvb::cvLabel(segmentated, labelImg, blobs);

    cvb::cvFilterByArea(blobs, minArea, maxArea);
  
    if(debugBlob){
        cvb::cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_ANGLE|CV_BLOB_RENDER_TO_STD);
    }
    else{
        cvb::cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_ANGLE);
    }

    cvb::cvUpdateTracks(blobs, tracks,threshold_distance, threshold_inactive);



    //At this point, we have assigned each blob in current frame in to a track. so we can iterate through active tracks, and
    // find out out blobs within one of those tracks. Following loop does that. This is helpfull to have more generalized idea about
    // relationship between blobs with increasing time.

    for (cvb::CvTracks::const_iterator track = tracks.begin(); track!=tracks.end(); ++track)
    {
        if((*track).second->active != 0){
            for(std::map<cvb::CvLabel,cvb::CvBlob *>::iterator it = blobs.begin() ; it != blobs.end(); it++){

                cvb::CvLabel label = (*it).first;
                cvb::CvBlob * blob = (*it).second;

                if((*track).second->label == label){
                    //qDebug()<< blob->minx <<","<<blob->miny;
                    //This is smoothed time tracked blob lables
                    blobEvent.append(DetectedEvent("blob",QString("%1,%2,%3,%4,%5,%6,%7,%8").arg(frameIndex).arg((*track).first).arg(blob->centroid.x).arg(blob->centroid.y).arg(blob->minx).arg(blob->miny).arg(blob->maxx).arg(blob->maxy),1.0));
                }
            }
        }
    }

    if(debugTrack){
        cvb::cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX|CV_TRACK_RENDER_TO_STD);
    }
    else{
        cvb::cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX);
    }
    if(showFrameID){
        cv::Mat temp = cv::Mat(frame);
        cv::putText(temp,QString("%1").arg(frameIndex).toStdString(),cv::Point(40,120),cv::FONT_HERSHEY_PLAIN,2,cv::Scalar(0,255,0),2);
    }
    if(showOutput){
        cv::Mat temp = cv::Mat(frame);
        ownerPlugin->updateFrameViewer("Tracking Output",convertToQImage(temp));

        //cvShowImage("Blob Tracking", frame);
    }
    cv::Mat img_result(frame);

    cv::putText(img_result,QString("%1").arg(QString::number(frameIndex)).toUtf8().constData(), cv::Point(10,30), CV_FONT_HERSHEY_PLAIN,1.0, CV_RGB(255,255,255));

    img_result.copyTo(img_output);
    cvReleaseImage(&labelImg);
    delete frame;
    delete segmentated;
    cvReleaseBlobs(blobs);
    cvReleaseStructuringElement(&morphKernel);

    firstTime = false;
    frameIndex++;

    QImage input((uchar*)newInput.data, newInput.cols, newInput.rows, newInput.step1(), QImage::Format_RGB888);
    QImage output((uchar*)img_output.data, img_output.cols, img_output.rows, img_output.step1(), QImage::Format_RGB888);
    QList<QImage> images;

    images.append(input);
    images.append(output);
    emit generateEvent(blobEvent,images);

    //emit generateEvent(blobEvent);
}
Beispiel #15
0
// ネットワークを使って画像を再構築する
Waifu2x::eWaifu2xError Waifu2x::ReconstructImage(boost::shared_ptr<caffe::Net<float>> net, cv::Mat &im)
{
	const auto Height = im.size().height;
	const auto Width = im.size().width;
	const auto Line = im.step1();

	assert(Width % output_size == 0);
	assert(Height % output_size == 0);

	assert(im.channels() == 1 || im.channels() == 3);

	cv::Mat outim(im.rows, im.cols, im.type());

	// float *imptr = (float *)im.data;
	float *imptr = (float *)outim.data;

	try
	{
		auto input_blobs = net->input_blobs();
		auto input_blob = net->input_blobs()[0];

		input_blob->Reshape(batch_size, input_plane, input_block_size, input_block_size);

		assert(im.channels() == input_plane);
		assert(input_blob->shape(1) == input_plane);

		const int WidthNum = Width / output_size;
		const int HeightNum = Height / output_size;

		const int BlockNum = WidthNum * HeightNum;

		const int input_block_plane_size = input_block_size * input_block_size * input_plane;
		const int output_block_plane_size = output_block_size * output_block_size * input_plane;

		const int output_padding = inner_padding + outer_padding - layer_num;

		// 画像は(消費メモリの都合上)output_size*output_sizeに分けて再構築する
		for (int num = 0; num < BlockNum; num += batch_size)
		{
			const int processNum = (BlockNum - num) >= batch_size ? batch_size : BlockNum - num;

			if (processNum < batch_size)
				input_blob->Reshape(processNum, input_plane, input_block_size, input_block_size);

			for (int n = 0; n < processNum; n++)
			{
				const int wn = (num + n) % WidthNum;
				const int hn = (num + n) / WidthNum;

				const int w = wn * output_size;
				const int h = hn * output_size;

				if (w + crop_size <= Width && h + crop_size <= Height)
				{
					int x, y;
					x = w - inner_padding;
					y = h - inner_padding;

					int width, height;

					width = crop_size + inner_padding * 2;
					height = crop_size + inner_padding * 2;

					int top, bottom, left, right;

					top = outer_padding;
					bottom = outer_padding;
					left = outer_padding;
					right = outer_padding;

					if (x < 0)
					{
						left += -x;
						width -= -x;
						x = 0;
					}

					if (x + width > Width)
					{
						right += (x + width) - Width;
						width = Width - x;
					}

					if (y < 0)
					{
						top += -y;
						height -= -y;
						y = 0;
					}

					if (y + height > Height)
					{
						bottom += (y + height) - Height;
						height = Height - y;
					}

					cv::Mat someimg = im(cv::Rect(x, y, width, height));

					cv::Mat someborderimg;
					// 画像を中央にパディング。余白はcv::BORDER_REPLICATEで埋める
					// 実はimで画素が存在する部分は余白と認識されないが、inner_paddingがlayer_numでouter_paddingが1以上ならそこの部分の画素は結果画像として取り出す部分には影響しない
					cv::copyMakeBorder(someimg, someborderimg, top, bottom, left, right, cv::BORDER_REPLICATE);
					someimg.release();

					// 画像を直列に変換
					{
						float *fptr = input_block + (input_block_plane_size * n);
						const float *uptr = (const float *)someborderimg.data;

						const auto Line = someborderimg.step1();

						if (someborderimg.channels() == 1)
						{
							if (input_block_size == Line)
								memcpy(fptr, uptr, input_block_size * input_block_size * sizeof(float));
							else
							{
								for (int i = 0; i < input_block_size; i++)
									memcpy(fptr + i * input_block_size, uptr + i * Line, input_block_size * sizeof(float));
							}
						}
						else
						{
							const auto LinePixel = someborderimg.step1() / someborderimg.channels();
							const auto Channel = someborderimg.channels();
							const auto Width = someborderimg.size().width;
							const auto Height = someborderimg.size().height;

							for (int i = 0; i < Height; i++)
							{
								for (int j = 0; j < LinePixel; j++)
								{
									for (int ch = 0; ch < Channel; ch++)
										fptr[(ch * Height + i) * Width + j] = uptr[(i * LinePixel + j) * Channel + ch];
								}
							}

							/*
							{
								cv::Mat im(someborderimg.size(), CV_32F, fptr, Width * sizeof(float));

								cv::Mat write_iamge;
								im.convertTo(write_iamge, CV_8U, 255.0);
								im.release();

								if (!cv::imwrite("test_in.png", write_iamge))
									return eWaifu2xError_FailedOpenOutputFile;
							}
							*/
						}
					}
				}
			}

			assert(input_blob->count() == input_block_plane_size * processNum);

			// ネットワークに画像を入力
			input_blob->set_cpu_data(input_block);

			// 計算
			auto out = net->ForwardPrefilled(nullptr);

			auto b = out[0];

			assert(b->count() == output_block_plane_size * processNum);

			const float *ptr = nullptr;

			if (caffe::Caffe::mode() == caffe::Caffe::CPU)
				ptr = b->cpu_data();
			else
				ptr = b->gpu_data();

			caffe::caffe_copy(output_block_plane_size * processNum, ptr, output_block);

			for (int n = 0; n < processNum; n++)
			{
				const int wn = (num + n) % WidthNum;
				const int hn = (num + n) / WidthNum;

				const int w = wn * output_size;
				const int h = hn * output_size;

				const float *fptr = output_block + (output_block_plane_size * n);

				// 結果を出力画像にコピー
				if (outim.channels() == 1)
				{
					for (int i = 0; i < crop_size; i++)
						memcpy(imptr + (h + i) * Line + w, fptr + (i + output_padding) * output_block_size + output_padding, crop_size * sizeof(float));
				}
				else
				{
					const auto LinePixel = outim.step1() / outim.channels();
					const auto Channel = outim.channels();

					for (int i = 0; i < crop_size; i++)
					{
						for (int j = 0; j < crop_size; j++)
						{
							for (int ch = 0; ch < Channel; ch++)
								imptr[((h + i) * LinePixel + (w + j)) * Channel + ch] = fptr[(ch * output_block_size + i + output_padding) * output_block_size + j + output_padding];
						}
					}

					/*
					{
						cv::Mat im(someborderimg.size(), CV_32F, fptr, Width * sizeof(float));

						cv::Mat write_iamge;
						im.convertTo(write_iamge, CV_8U, 255.0);
						im.release();

						if (!cv::imwrite("test_in.png", write_iamge))
							return eWaifu2xError_FailedOpenOutputFile;
					}
					*/
				}
			}
		}
	}
	catch (...)
	{
		return eWaifu2xError_FailedProcessCaffe;
	}

	im = outim;

	return eWaifu2xError_OK;
}
Beispiel #16
0
Waifu2x::eWaifu2xError Waifu2x::WriteMat(const cv::Mat &im, const boost::filesystem::path &output_file)
{
	const boost::filesystem::path ip(output_file);
	const std::string ext = ip.extension().string();

	const bool isJpeg = boost::iequals(ext, ".jpg") || boost::iequals(ext, ".jpeg");

	if (boost::iequals(ext, ".tga"))
	{
		unsigned char *data = im.data;

		std::vector<unsigned char> rgbimg;
		if (im.channels() >= 3 || im.step1() != im.size().width * im.channels()) // RGB用バッファにコピー(あるいはパディングをとる)
		{
			const auto Line = im.step1();
			const auto Channel = im.channels();
			const auto Width = im.size().width;
			const auto Height = im.size().height;

			rgbimg.resize(Width * Height * Channel);

			const auto Stride = Width * Channel;
			for (int i = 0; i < Height; i++)
				memcpy(rgbimg.data() + Stride * i, im.data + Line * i, Stride);

			data = rgbimg.data();
		}

		if (im.channels() >= 3) // BGRをRGBに並び替え
		{
			const auto Line = im.step1();
			const auto Channel = im.channels();
			const auto Width = im.size().width;
			const auto Height = im.size().height;

			auto ptr = rgbimg.data();
			for (int i = 0; i < Height; i++)
			{
				for (int j = 0; j < Width; j++)
					std::swap(ptr[(i * Width + j) * Channel + 0], ptr[(i * Width + j) * Channel + 2]);
			}
		}

		boost::iostreams::stream<boost::iostreams::file_descriptor> os;

		try
		{
			os.open(output_file, std::ios_base::out | std::ios_base::binary | std::ios_base::trunc);
		}
		catch (...)
		{
			return Waifu2x::eWaifu2xError_FailedOpenOutputFile;
		}

		if(!os)
			return eWaifu2xError_FailedOpenOutputFile;

		if (!stbi_write_tga_to_func(Waifu2x_stbi_write_func, &os, im.size().width, im.size().height, im.channels(), data))
			return eWaifu2xError_FailedOpenOutputFile;

		return eWaifu2xError_OK;
	}

	try
	{
		const boost::filesystem::path op(output_file);
		const boost::filesystem::path opext(op.extension());

		std::vector<int> params;

		const auto &OutputExtentionList = Waifu2x::OutputExtentionList;
		for (const auto &elm : OutputExtentionList)
		{
			if (elm.ext == opext)
			{
				if (elm.imageQualitySettingVolume && output_quality)
				{
					params.push_back(*elm.imageQualitySettingVolume);
					params.push_back(*output_quality);
				}

				break;
			}
		}

		std::vector<uchar> buf;
		cv::imencode(ext, im, buf, params);

		if (writeFile(output_file, buf))
			return eWaifu2xError_OK;

	}
	catch (...)
	{
	}

	return eWaifu2xError_FailedOpenOutputFile;
}