Exemple #1
0
PyObject* from_sand_wrapper(PyObject *im) {
	NDArrayConverter cvt;
	cv::Mat _im = cvt.toMat(im);
	cv::Mat _imO(_im.size(), CV_8UC1);
	from_sand(_im, _imO);
	return cvt.toNDArray(_imO);
}
Exemple #2
0
PyObject* from_grass_b_wrapper(PyObject *im) {
	NDArrayConverter cvt;
	cv::Mat _im = cvt.toMat(im);
	cv::Mat _imO;
	from_grass_b(_im, _imO);
	return cvt.toNDArray(_imO);
}
Exemple #3
0
PyObject * get_bboxes(PyObject * image_, PyObject * seg_, int x1, int y1, int x2, int y2) {
	NDArrayConverter cvt;
	cv::Mat image  = cvt.toMat(image_);
	cv::Mat seg    = cvt.toMat(seg_);
	//cv::Mat bboxes = cvt.toMat(bboxes_);
	std::cout << x1 << " " << y1 << " " << x2 << " " << y2 << std::endl;
	return cvt.toNDArray(get_bboxes_(image, seg, x1, y1, x2, y2));
}
Exemple #4
0
PyObject* detect_watanabe_wrapper(PyObject *im) {
	NDArrayConverter cvt;
	cv::Mat _im = cvt.toMat(im);
	cv::Point _itokawa;
	detect_watanabe(_im, _itokawa);
        cv::Mat res(2, 1, CV_32F);
        res.at<float>(0, 0) = _itokawa.x;
        res.at<float>(1, 0) = _itokawa.y;
	return cvt.toNDArray(res);
}
PyObject* CannyNewFuncRGB(PyObject *srcImgPy, double threshLow, double threshHigh, int kernelSize)
{
    NDArrayConverter cvt;
    cv::Mat srcImg = cvt.toMat(srcImgPy);
	cv::Mat returnedEdges;
	
	cppCannyBunk_RGB(srcImg, returnedEdges, threshLow, threshHigh, kernelSize);
	
    return cvt.toNDArray(returnedEdges);
}
Exemple #6
0
//	cv::Mat segment(cv::Mat inputFrame) {
	PyObject *segment(PyObject *_inputFrame) {
		NDArrayConverter cvt;
		cv::Mat inputFrame = cvt.toMat(_inputFrame);

        cv::Mat patch;
        inputFrame(cv::Rect(p0.x, p0.y, p1.x - p0.x, p3.y - p0.y)).copyTo(patch);

		cv::imshow("Video Captured", inputFrame);
		cv::imshow("Patch", patch);

		return cvt.toNDArray(patch);
	}
Exemple #7
0
static cv::Mat get_gray_img(PyObject *p_img) {
    // get image and convert if necessary
    NDArrayConverter cvt;
    cv::Mat img_temp = cvt.toMat(p_img);
    cv::Mat img;
    if (img_temp.channels() == 1) {
        img = img_temp;
    } else {
        cv::cvtColor(img_temp, img, CV_BGR2GRAY);
    }

    return img;
}
Exemple #8
0
cv::Mat createNumpyArrayWithMatWrapper(int cols, int rows, int type)
{
    int sizes[] = {cols, rows};
    PyObject *o = createNumArrayFromMatDimensions(2, sizes, type);
    NDArrayConverter conv;
    //The NDArrayConverter interface can only return a new Mat object
    //allocated on the heap. What we want is a Mat object on the stack.
    //So we assign it (cheap copy!) and then deallocate.
    cv::Mat *p = conv.toMat(o, 0);
    cv::Mat result = *p;
    delete p;
    return result;
}
PyObject* CannyVanilla(PyObject *srcImgPy, double threshLow, double threshHigh, int kernelSize)
{
    NDArrayConverter cvt;
    cv::Mat srcImg = cvt.toMat(srcImgPy);
	cv::Mat returnedEdges;
	if(srcImg.channels() > 1) {
		cv::cvtColor(srcImg, srcImg, CV_BGR2GRAY);
	}
	
	cv::Canny(srcImg, returnedEdges, threshLow, threshHigh, kernelSize, true);
	
    return cvt.toNDArray(returnedEdges);
}
Exemple #10
0
PyObject* extract_features(PyObject* p_descriptor_extractor,
        PyObject *p_img, PyObject *p_keypoints) {
    cv::Mat img = get_gray_img(p_img);
    Py_ssize_t num_keypoints = PyList_Size(p_keypoints);

    std::vector<cv::KeyPoint> keypoints;

    for(Py_ssize_t i = 0; i < num_keypoints; ++i) {
        keypoints.push_back(cv::KeyPoint());
        PyObject* cv2_keypoint = PyList_GetItem(p_keypoints, i);

        // get attributes
        PyObject* cv2_keypoint_size = PyObject_GetAttrString(cv2_keypoint, "size");
        PyObject* cv2_keypoint_angle = PyObject_GetAttrString(cv2_keypoint, "angle");
        PyObject* cv2_keypoint_response = PyObject_GetAttrString(cv2_keypoint, "response");
        PyObject* cv2_keypoint_pt = PyObject_GetAttrString(cv2_keypoint, "pt");
        PyObject* cv2_keypoint_pt_x = PyTuple_GetItem(cv2_keypoint_pt, 0);
        PyObject* cv2_keypoint_pt_y = PyTuple_GetItem(cv2_keypoint_pt, 1);

        // set data
        PyArg_Parse(cv2_keypoint_size, "f", &keypoints[i].size);
        PyArg_Parse(cv2_keypoint_angle, "f", &keypoints[i].angle);
        PyArg_Parse(cv2_keypoint_response, "f", &keypoints[i].response);
        PyArg_Parse(cv2_keypoint_pt_x, "f", &keypoints[i].pt.x);
        PyArg_Parse(cv2_keypoint_pt_y, "f", &keypoints[i].pt.y);

        Py_DECREF(cv2_keypoint_size);
        Py_DECREF(cv2_keypoint_angle);
        Py_DECREF(cv2_keypoint_response);
        Py_DECREF(cv2_keypoint_pt_x);
        Py_DECREF(cv2_keypoint_pt_y);
        Py_DECREF(cv2_keypoint_pt);
        // TODO: decrement reference doesn't work
        // Py_DECREF(cv2_keypoint);
    }

    cv::Mat descriptors;
    brisk::BriskDescriptorExtractor* descriptor_extractor =
            static_cast<brisk::BriskDescriptorExtractor*>(PyCObject_AsVoidPtr(p_descriptor_extractor));
    descriptor_extractor->compute(img, keypoints, descriptors);

    NDArrayConverter cvt;
    PyObject* ret = PyList_New(2);
    PyObject* ret_keypoints = keypoints_ctopy(keypoints);
    PyList_SetItem(ret, 0, ret_keypoints);
    PyList_SetItem(ret, 1, cvt.toNDArray(descriptors));
    // TODO: decrement reference doesn't work
    // Py_DECREF(ret_keypoints);

    return ret;
}
  // Convert obj_ptr into a cv::Mat
  static void construct(PyObject* obj_ptr,
                        boost::python::converter::rvalue_from_python_stage1_data* data)
  {
    using namespace boost::python;
    typedef converter::rvalue_from_python_storage< T > storage_t;

    storage_t* the_storage = reinterpret_cast<storage_t*>( data );
    void* memory_chunk = the_storage->storage.bytes;

    NDArrayConverter cvt;
    T* newvec = new (memory_chunk) T(cvt.toMat(obj_ptr));
    data->convertible = memory_chunk;

    return;
  }
Exemple #12
0
	bool calibrate(PyObject *_inputFrame_BGR, int key) {
		NDArrayConverter cvt;
		cv::Mat inputFrame_BGR = cvt.toMat(_inputFrame_BGR);

		// Convert input image to HSV
		cv::Mat inputFrame_HSV;
//		cv::GaussianBlur(inputFrame_BGR, inputFrame_BGR, cv::Size(5, 5), 0, 0);
//		cv::resize(inputFrame_BGR, inputFrame_BGR, cv::Size(inputFrame_BGR.cols / 2, inputFrame_BGR.rows / 2));
		cv::cvtColor(inputFrame_BGR, inputFrame_HSV, CV_BGR2HSV);

		// Threshold the HSV image, keep only the red pixels
		cv::Mat lowerRedHueImg;
		cv::Mat upperRedHueImg;
		cv::Mat redHueImg;
		cv::inRange(inputFrame_HSV, cv::Scalar(0, 100, 100), cv::Scalar(10, 255, 255), lowerRedHueImg);
		cv::inRange(inputFrame_HSV, cv::Scalar(160, 100, 100), cv::Scalar(179, 255, 255), upperRedHueImg);
		cv::addWeighted(lowerRedHueImg, 1.0, upperRedHueImg, 1.0, 0.0, redHueImg);

		// Find four centers of red pixels in the combined threshold image
		std::vector< std::vector<cv::Point> > contours;
		cv::findContours(redHueImg, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
		if (contours.size() == 4) {
			for (int i = 0; i < contours.size(); i++) {
				cv::Point center(0, 0);
				for (int j = 0; j < contours.at(i).size(); j++)
					center += contours.at(i).at(j);
				center = cv::Point(center.x / contours.at(i).size(), center.y / contours.at(i).size());
				if (center.x < redHueImg.cols / 2 && center.y < redHueImg.rows / 2)
					p0 = center;
				else if (center.x > redHueImg.cols / 2 && center.y < redHueImg.rows / 2)
					p1 = center;
				else if (center.x > redHueImg.cols / 2 && center.y > redHueImg.rows / 2)
					p2 = center;
				else
					p3 = center;

				cv::drawContours(inputFrame_BGR, contours, i, cv::Scalar(255, 0, 0));
				cv::circle(inputFrame_BGR, center, 3, cv::Scalar(255, 0, 0), -1);
			}

			if (p0 != p1 && p0 != p2 && p0 != p3 && p1 != p2 && p1 != p3 && p2 != p3 && (key & 0xFF)  == 27) // 'Esc' key
				return true;
		}

		cv::imshow("Video Captured", inputFrame_BGR);

		return false;
	}
bp::object doBPySpectralResidualSaliency(bp::object FullsizedImage)
{
	NDArrayConverter cvt;
	cv::Mat srcImgCPP = cvt.toMat(FullsizedImage.ptr());
	
	std::vector<cv::Mat> foundCrops;
	std::vector<std::pair<double,double>> cropGeolocations;
	SpectralResidualSaliencyClass saldoer;
	saldoer.ProcessSaliency(&srcImgCPP, &foundCrops, &cropGeolocations, 0);
	consoleOutput.Level1() << "SpectralResidualSaliency found " << to_istring(foundCrops.size()) << " crops" << std::endl;
	
	std::vector<bp::object> foundCropsPy;
	for(int ii=0; ii<foundCrops.size(); ii++) {
		foundCropsPy.append(bp::object(cvt.toNDArray()));
	}
	
	return bp::str(shapename.c_str());
}
Exemple #14
0
PyObject* detect_and_extract(PyObject* p_descriptor_extractor, PyObject *p_img,
        PyObject *p_thresh, PyObject *p_octaves) {
    cv::Mat img = get_gray_img(p_img);
    std::vector<cv::KeyPoint> keypoints = detect(img, p_thresh, p_octaves);

    cv::Mat descriptors;
    brisk::BriskDescriptorExtractor* descriptor_extractor =
            static_cast<brisk::BriskDescriptorExtractor*>(PyCObject_AsVoidPtr(p_descriptor_extractor));
    descriptor_extractor->compute(img, keypoints, descriptors);

    NDArrayConverter cvt;
    PyObject* ret = PyList_New(2);
    PyObject* ret_keypoints = keypoints_ctopy(keypoints);
    PyList_SetItem(ret, 0, ret_keypoints);
    PyList_SetItem(ret, 1, cvt.toNDArray(descriptors));
    // TODO: decrement reference doesn't work
    // Py_DECREF(ret_keypoints);

    return ret;
}
PyObject*
mul(PyObject *left, PyObject *right)
{
    NDArrayConverter cvt;
    cv::Mat leftMat, rightMat;
    leftMat = cvt.toMat(left);
    rightMat = cvt.toMat(right);
    auto r1 = leftMat.rows, c1 = leftMat.cols, r2 = rightMat.rows,
         c2 = rightMat.cols;
    // Work only with 2-D matrices that can be legally multiplied.
    if (c1 != r2)
    {
        PyErr_SetString(PyExc_TypeError, 
                        "Incompatible sizes for matrix multiplication.");
        py::throw_error_already_set();
    }
    cv::Mat result = leftMat * rightMat;

    PyObject* ret = cvt.toNDArray(result);

    return ret;
}
Exemple #16
0
PyObject * get_bboxes(PyObject * seg_, uint32_t max_size, float sigma, uint32_t n) {
	NDArrayConverter cvt;
	cv::Mat seg     = cvt.toMat(seg_);
	return cvt.toNDArray(get_bboxes_(seg, max_size, sigma, n));
}
 static PyObject* convert(const T& mat){
   NDArrayConverter cvt;
   PyObject* ret = cvt.toNDArray(mat);
   return ret;
 }
 static PyObject* convert(const cv::Point3f& v){
   NDArrayConverter cvt;
   PyObject* ret = cvt.toNDArray(cv::Mat(v));
   return ret;
 }
bp::object ClusterKmeansPPwithMask(PyObject *filteredCropImage, PyObject *maskForClustering,
				int k_num_cores, int num_lloyd_iterations, int num_kmeanspp_iterations, bool print_debug_console_output,
				double use5DclusteringScale)
{
#if PROFILING_KMEANS
	cout << "KMEANS_CPP_WITH_MASK ----------- start" << endl;
	auto tstart = std::chrono::steady_clock::now();
#endif
	
	NDArrayConverter cvt;
	cv::Mat srcCropImage = cvt.toMat(filteredCropImage);
	cv::Mat srcMaskImage = cvt.toMat(maskForClustering);
	
	if(srcCropImage.empty()) {
		cout<<"ClusterKmeansPPwithMask() -- error: srcCropImage was empty"<<endl<<std::flush; return bp::object();
	}
	if(srcCropImage.channels() != 3) {
		cout<<"ClusterKmeansPPwithMask() -- error: srcCropImage was not a 3-channel image"<<endl<<std::flush; return bp::object();
	}
	if(cv::countNonZero(srcMaskImage) < k_num_cores) {
		cout<<"Error: num masked pixels < num k cores"<<endl; return bp::object();
	}
	if(srcCropImage.type() != CV_32FC3) {
		srcCropImage.convertTo(srcCropImage, CV_32FC3);
	}
	
	//----------------------------------------------------
	// clustering / processing
	
	// get clusterable pixels
	std::vector<ClusterablePoint*>* clusterablePixels(GetSetOfPixelColors_WithMask_3Df(&srcCropImage, &srcMaskImage, use5DclusteringScale));
	
	double returned_potential;
	
#if PROFILING_KMEANS
	auto tstartclustering = std::chrono::steady_clock::now();
#endif
	
	// do kmeans++
	std::vector<std::vector<ClusterablePoint*>> resultsClusters(KMEANSPLUSPLUS(clusterablePixels, &myrng, k_num_cores, num_lloyd_iterations, num_kmeanspp_iterations, print_debug_console_output, &returned_potential));
	
#if PROFILING_KMEANS
	auto tendclustering = std::chrono::steady_clock::now();
#endif
	
	// get the color of each cluster
	std::vector<ClusterablePoint*> clusterColors(GetClusterMeanColors_3Df(resultsClusters));
	
	// get the binary masks of each cluster
	std::vector<cv::Mat> returnedMasksCPP(GetClusterMasks_3Df(resultsClusters, clusterColors, srcCropImage.rows, srcCropImage.cols));
	
	// draw the result to a color-clustered image
	cv::Mat drawnClusters(GetClusteredImage_3Df(resultsClusters, clusterColors, srcCropImage.rows, srcCropImage.cols));
	
	//----------------------------------------------------
	// now prepare to ship results to Python from C++
	
	int numClustersFound = (int)resultsClusters.size();
	
	bp::list returnedClusterColors; //will be a list of 3-element-lists
	bp::list returnedClusterMasks; //will be a list of cv2 numpy images
	
	for(int ii=0; ii<numClustersFound; ii++) {
		//get color of each cluster
		std::vector<double> thisClustersColors(3);
		ClusterablePoint_OpenCV* thiscluster_pixelcolor = dynamic_cast<ClusterablePoint_OpenCV*>(clusterColors[ii]);
		thisClustersColors[0] = thiscluster_pixelcolor->GetPixel()[0];
		thisClustersColors[1] = thiscluster_pixelcolor->GetPixel()[1];
		thisClustersColors[2] = thiscluster_pixelcolor->GetPixel()[2];
		returnedClusterColors.append(std_vector_to_py_list<double>(thisClustersColors));
		
		//get mask of each cluster
		PyObject* thisImgCPP = cvt.toNDArray(returnedMasksCPP[ii]);
		returnedClusterMasks.append(bp::object(bp::handle<>(bp::borrowed(thisImgCPP))));
	}
	
	//get mask of each cluster
	PyObject* drawnClustersCPP = cvt.toNDArray(drawnClusters);
	bp::object drawnClustersPython(bp::handle<>(bp::borrowed(drawnClustersCPP)));
	
#if PROFILING_KMEANS
	auto tend = std::chrono::steady_clock::now();
	cout << "CPP_KMEANS_WITHMASK -- total time: " << std::chrono::duration<double, std::milli>(tend-tstart).count() << " milliseconds" << endl;
	cout << "CPP_KMEANS_WITHMASK -- clustering time: " << std::chrono::duration<double, std::milli>(tendclustering-tstartclustering).count() << " milliseconds" << endl;
#endif
	
	return bp::make_tuple(drawnClustersPython, returnedClusterColors, returnedClusterMasks, returned_potential);
}