Пример #1
0
vector<ArrayXXb> PyArray_ToArraysXXb(PyObject* array) {
	if(PyArray_DESCR(array)->type != PyArray_DescrFromType(NPY_BOOL)->type)
		throw Exception("Can only handle arrays of bool values.");

	if(PyArray_NDIM(array) == 3) {
		vector<ArrayXXb> channels;

		if(PyArray_FLAGS(array) & NPY_F_CONTIGUOUS)
			for(int m = 0; m < PyArray_DIM(array, 2); ++m)
				channels.push_back(Matrix<bool, Dynamic, Dynamic, ColMajor>(
					PyArray_DIM(array, 0),
					PyArray_DIM(array, 1)));

		else if(PyArray_FLAGS(array) & NPY_C_CONTIGUOUS)
			for(int m = 0; m < PyArray_DIM(array, 2); ++m)
				channels.push_back(Matrix<bool, Dynamic, Dynamic, RowMajor>(
					PyArray_DIM(array, 0),
					PyArray_DIM(array, 1)));

		else
			throw Exception("Data must be stored in contiguous memory.");

		bool* data = reinterpret_cast<bool*>(PyArray_DATA(array));

		for(int m = 0; m < channels.size(); ++m)
			for(int i = 0; i < channels[m].size(); ++i)
				channels[m].data()[i] = data[m * channels[m].size() + i];

		return channels;
	} else {
		throw Exception("Can only handle three-dimensional arrays.");
	}
}
Пример #2
0
int main(int argc, char *argv[])
{
    char *path = "sequences/cokecan/img";
    char *ext = "png";
    int numLength = 5;
    char numString[numLength+1];
    char filename[255];
    int start = 0;
    int end = 291;
    cv::Point2f initTopLeft(150,82);
    cv::Point2f initBottomDown(170,118);

    CMT cmt;
    //cmt.estimateRotation = false;
    for(int i = start; i <= end; i++)
    {
        num2str(numString, numLength+1, i);
        sprintf(filename, "%s%s.%s", path, numString, ext);

#ifdef DEBUG_MODE
        qDebug() << filename;
#endif

        cv::Mat img = cv::imread(filename);
        cv::Mat im_gray;
        cv::cvtColor(img, im_gray, CV_RGB2GRAY);

        if(i == start)
            cmt.initialise(im_gray, initTopLeft, initBottomDown);
        cmt.processFrame(im_gray);

        for(int i = 0; i<cmt.trackedKeypoints.size(); i++)
            cv::circle(img, cmt.trackedKeypoints[i].first.pt, 3, cv::Scalar(255,255,255));
        cv::line(img, cmt.topLeft, cmt.topRight, cv::Scalar(255,255,255));
        cv::line(img, cmt.topRight, cmt.bottomRight, cv::Scalar(255,255,255));
        cv::line(img, cmt.bottomRight, cmt.bottomLeft, cv::Scalar(255,255,255));
        cv::line(img, cmt.bottomLeft, cmt.topLeft, cv::Scalar(255,255,255));

#ifdef DEBUG_MODE
        qDebug() << "trackedKeypoints";
        for(int i = 0; i<cmt.trackedKeypoints.size(); i++)
            qDebug() << cmt.trackedKeypoints[i].first.pt.x << cmt.trackedKeypoints[i].first.pt.x << cmt.trackedKeypoints[i].second;
        qDebug() << "box";
        qDebug() << cmt.topLeft.x << cmt.topLeft.y;
        qDebug() << cmt.topRight.x << cmt.topRight.y;
        qDebug() << cmt.bottomRight.x << cmt.bottomRight.y;
        qDebug() << cmt.bottomLeft.x << cmt.bottomLeft.y;
#endif

        imshow("frame", img);
        cv::waitKey(1);
    }
    return 0;
}
Пример #3
0
bool CallbackInterface::operator()(int iter, const Trainable& cd) {
	CDObject* cdObj = reinterpret_cast<CDObject*>(CD_new(mType, 0, 0));

	// TODO: fix this hack
	cdObj->cd = const_cast<Trainable*>(&cd);
	cdObj->owner = false;

	// call Python object
	PyObject* args = Py_BuildValue("(iO)", iter, cdObj);
	PyObject* result = PyObject_CallObject(mCallback, args);
	Py_DECREF(args);

	// if cont is false, training will be aborted
	bool cont = true;

	if(result) {
		if(PyBool_Check(result))
			cont = (result == Py_True);
		Py_DECREF(result);
		Py_DECREF(cdObj);
	} else {
		Py_DECREF(cdObj);
		throw Exception("Some error occured during call to callback function.");
	}

	return cont;
}
Пример #4
0
PyObject* MixtureComponent_train(MixtureComponentObject* self, PyObject* args, PyObject* kwds) {
	const char* kwlist[] = {"data", "weights", "parameters", 0};

	PyObject* data;
	PyObject* weights = 0;
	PyObject* parameters = 0;

	// read arguments
	if(!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO", const_cast<char**>(kwlist),
		&data, &weights, &parameters))
		return 0;

	if(weights == Py_None)
		weights = 0;

	// make sure data is stored in NumPy array
	data = PyArray_FROM_OTF(data, NPY_DOUBLE, NPY_F_CONTIGUOUS | NPY_ALIGNED);

	if(!data) {
		PyErr_SetString(PyExc_TypeError, "Data has to be stored in NumPy array.");
		return 0;
	}

	bool converged;

	try {
		Mixture::Component::Parameters* params = PyObject_ToMixtureComponentParameters(parameters);

		if(weights) {
			MatrixXd weightsMatrix = PyArray_ToMatrixXd(weights);
			MatrixXd dataMatrix = PyArray_ToMatrixXd(data);

			if(weightsMatrix.rows() != 1)
				weightsMatrix = weightsMatrix.transpose();
			if(weightsMatrix.rows() != 1)
				throw Exception("Weights should be stored in a row vector.");

			converged = self->component->train(dataMatrix, weightsMatrix, *params);
		} else {
			converged = self->component->train(PyArray_ToMatrixXd(data), *params);
		}

		delete params;
	} catch(Exception exception) {
		Py_DECREF(data);
		PyErr_SetString(PyExc_RuntimeError, exception.message());
		return 0;
	}

	Py_DECREF(data);

	if(converged) {
		Py_INCREF(Py_True);
		return Py_True;
	} else {
		Py_INCREF(Py_False);
		return Py_False;
	}
}
Пример #5
0
Trainable::Parameters* PyObject_ToMLRParameters(PyObject* parameters) {
	MLR::Parameters* params = dynamic_cast<MLR::Parameters*>(
		PyObject_ToParameters(parameters, new MLR::Parameters));

	// read parameters from dictionary
	if(parameters && parameters != Py_None) {
		PyObject* callback = PyDict_GetItemString(parameters, "callback");
		if(callback)
			if(PyCallable_Check(callback))
				params->callback = new CallbackInterface(&MLR_type, callback);
			else if(callback != Py_None)
				throw Exception("callback should be a function or callable object.");

		PyObject* train_weights = PyDict_GetItemString(parameters, "train_weights");
		if(train_weights)
			if(PyBool_Check(train_weights))
				params->trainWeights = (train_weights == Py_True);
			else
				throw Exception("train_weights should be of type `bool`.");

		PyObject* train_biases = PyDict_GetItemString(parameters, "train_biases");
		if(train_biases)
			if(PyBool_Check(train_biases))
				params->trainBiases = (train_biases == Py_True);
			else
				throw Exception("train_biases should be of type `bool`.");

		PyObject* regularize_weights = PyDict_GetItemString(parameters, "regularize_weights");
		if(regularize_weights)
			params->regularizeWeights = PyObject_ToRegularizer(regularize_weights);

		PyObject* regularize_biases = PyDict_GetItemString(parameters, "regularize_biases");
		if(regularize_biases)
			params->regularizeBiases = PyObject_ToRegularizer(regularize_biases);

	}

	return params;
}
Пример #6
0
MatrixXd PyArray_ToMatrixXd(PyObject* array) {
	if(PyArray_DESCR(array)->type != PyArray_DescrFromType(NPY_DOUBLE)->type)
		throw Exception("Can only handle arrays of double values.");

	if(PyArray_NDIM(array) == 1) {
		if(PyArray_FLAGS(array) & NPY_F_CONTIGUOUS)
			return Map<Matrix<double, Dynamic, Dynamic, ColMajor> >(
				reinterpret_cast<double*>(PyArray_DATA(array)),
				PyArray_DIM(array, 0), 1);

		else if(PyArray_FLAGS(array) & NPY_C_CONTIGUOUS)
			return Map<Matrix<double, Dynamic, Dynamic, RowMajor> >(
				reinterpret_cast<double*>(PyArray_DATA(array)),
				PyArray_DIM(array, 0), 1);

		else
			throw Exception("Data must be stored in contiguous memory.");

	} else if(PyArray_NDIM(array) == 2) {
		if(PyArray_FLAGS(array) & NPY_F_CONTIGUOUS)
			return Map<Matrix<double, Dynamic, Dynamic, ColMajor> >(
				reinterpret_cast<double*>(PyArray_DATA(array)),
				PyArray_DIM(array, 0),
				PyArray_DIM(array, 1));

		else if(PyArray_FLAGS(array) & NPY_C_CONTIGUOUS)
			return Map<Matrix<double, Dynamic, Dynamic, RowMajor> >(
				reinterpret_cast<double*>(PyArray_DATA(array)),
				PyArray_DIM(array, 0),
				PyArray_DIM(array, 1));

		else
			throw Exception("Data must be stored in contiguous memory.");

	} else {
		throw Exception("Can only handle one- and two-dimensional arrays.");
	}
}
Пример #7
0
Tuples PyList_AsTuples(PyObject* list) {
	if(!PyList_Check(list))
		throw Exception("Indices should be given in a list.");

	Tuples tuples;

	// convert list of tuples
	for(int i = 0; i < PyList_Size(list); ++i) {
		PyObject* tuple = PyList_GetItem(list, i);

		if(!PyTuple_Check(tuple) || PyTuple_Size(tuple) != 2)
			throw Exception("Indices should be stored in a list of 2-tuples.");

		int m, n;

		if(!PyArg_ParseTuple(tuple, "ii", &m, &n))
			throw Exception("Indices should be integers.");

		tuples.push_back(make_pair(m, n));
	}

	return tuples;
}
Пример #8
0
Regularizer PyObject_ToRegularizer(PyObject* regularizer) {
	if(PyFloat_Check(regularizer))
		return Regularizer(PyFloat_AsDouble(regularizer));

	if(PyInt_Check(regularizer))
		return Regularizer(static_cast<double>(PyInt_AsLong(regularizer)));

	if(PyDict_Check(regularizer)) {
		PyObject* r_strength = PyDict_GetItemString(regularizer, "strength");
		PyObject* r_transform = PyDict_GetItemString(regularizer, "transform");
		PyObject* r_norm = PyDict_GetItemString(regularizer, "norm");

		if(r_transform == Py_None)
			r_transform = 0;

		Regularizer::Norm norm = Regularizer::L2;

		if(r_norm) {
			if(PyString_Size(r_norm) != 2)
				throw Exception("Regularizer norm should be 'L1' or 'L2'.");

			switch(PyString_AsString(r_norm)[1]) {
				default:
					throw Exception("Regularizer norm should be 'L1' or 'L2'.");

				case '1':
					norm = Regularizer::L1;
					break;

				case '2':
					norm = Regularizer::L2;
					break;
			}
		}

		double strength = r_transform ? 1. : 0.;

		if(r_strength) {
			if(PyInt_Check(r_strength)) {
				strength = static_cast<double>(PyInt_AsLong(r_strength));
			} else {
				if(!PyFloat_Check(r_strength))
					throw Exception("Regularizer strength should be of type `float`.");
				strength = PyFloat_AsDouble(r_strength);
			}
		}

		if(r_transform) {
			PyObject* matrix = PyArray_FROM_OTF(r_transform, NPY_DOUBLE, NPY_IN_ARRAY);

			if(!matrix)
				throw Exception("Regularizer transform should be of type `ndarray`.");

			return Regularizer(PyArray_ToMatrixXd(matrix), norm, strength);
		} else {
			return Regularizer(strength, norm);
		}
	}

	PyObject* matrix = PyArray_FROM_OTF(regularizer, NPY_DOUBLE, NPY_IN_ARRAY);

	if(matrix)
		return Regularizer(PyArray_ToMatrixXd(matrix));

	throw Exception("Regularizer should be of type `dict`, `float` or `ndarray`.");
}
Пример #9
0
    bool updateModule()
    {
        ImageOf<PixelBgr> *img=imgInPort.read();
        if (img==NULL)
            return true;

        mutex.lock();

        ImageOf<PixelMono> imgMono;
        imgMono.resize(img->width(),img->height());

        cv::Mat imgMat=cv::cvarrToMat((IplImage*)img->getIplImage());
        cv::Mat imgMonoMat=cv::cvarrToMat((IplImage*)imgMono.getIplImage());
        cv::cvtColor(imgMat,imgMonoMat,CV_BGR2GRAY);

        if (initBoundingBox)
        {
            tracker->initialise(imgMonoMat,tl,br);
            initBoundingBox=false;
        }

        if (doCMT)
        {
            if (tracker->processFrame(imgMonoMat))
            {
                if (dataOutPort.getOutputCount()>0)
                {
                    Bottle &data=dataOutPort.prepare();
                    data.clear();

                    data.addInt((int)tracker->topLeft.x);
                    data.addInt((int)tracker->topLeft.y);
                    data.addInt((int)tracker->topRight.x);
                    data.addInt((int)tracker->topRight.y);
                    data.addInt((int)tracker->bottomRight.x);
                    data.addInt((int)tracker->bottomRight.y);
                    data.addInt((int)tracker->bottomLeft.x);
                    data.addInt((int)tracker->bottomLeft.y);

                    dataOutPort.write();
                }

                if (imgOutPort.getOutputCount()>0)
                {
                    cv::line(imgMat,tracker->topLeft,tracker->topRight,cv::Scalar(255,0,0),2);
                    cv::line(imgMat,tracker->topRight,tracker->bottomRight,cv::Scalar(255,0,0),2);
                    cv::line(imgMat,tracker->bottomRight,tracker->bottomLeft,cv::Scalar(255,0,0),2);
                    cv::line(imgMat,tracker->bottomLeft,tracker->topLeft,cv::Scalar(255,0,0),2);

                    for (size_t i=0; i<tracker->trackedKeypoints.size(); i++) 
                        cv::circle(imgMat,tracker->trackedKeypoints[i].first.pt,3,cv::Scalar(0,255,0));
                }
            }            
        }

        if (imgOutPort.getOutputCount()>0)
        {
            imgOutPort.prepare()=*img;
            imgOutPort.write();
        }

        mutex.unlock();
        return true;
    }
Пример #10
0
int main(int argc, char **argv)
{
    CMT cmt;
    Rect rect;
    
    FILELog::ReportingLevel() = logINFO; // = logDEBUG;
    Output2FILE::Stream() = stdout; //Log to stdout

    // openCV window
    namedWindow(WIN_NAME);

    // libuvc variables
    uvc_context *ctx = NULL;
    uvc_device_t *dev = NULL;
    uvc_device_handle_t *handle = NULL;
    uvc_stream_ctrl_t ctrl;
    uvc_error_t res;

    // create a UVC context
    res = uvc_init(&ctx, NULL);
    uvc_error(res, "init");

    // search for the Intel camera specifically
    res = uvc_find_device(ctx, &dev, 0x8086, 0x0a66, NULL);
    uvc_error(res, "find_device");

    // open the camera device
    res = uvc_open(dev, &handle);
    uvc_error(res, "open2");

    //uvc_print_diag(handle, stderr);

    // configure the stream format to use
    res = uvc_get_stream_ctrl_format_size(handle, &ctrl, UVC_FRAME_FORMAT_YUYV, W, H, FPS);
    uvc_perror(res, "get_stream");

    //uvc_print_stream_ctrl(&ctrl, stderr);

    // OpenCV matrix for storing the captured image data, CV_8UC3
    // means 8-bit Unsigned Char, 3 channels per pixel.
    Mat im;
    im.create( H, W, CV_8UC3);

    image_data = malloc(W*H*3);

    // start streaming data from the camera
    printf("Start streaming...\n");
    res = uvc_start_streaming(handle, &ctrl, uvc_callback, (void*)&cmt, 0);
    uvc_error(res, "start_streaming");
    
    // wait until the first frame has arrived so it can be used for previewing
    printf("Waiting for first frame...\n");
    while(!got_first_frame)
        usleep(10000);

    // grab the data for the captured frame
    memcpy(im.data, image_data, W*H*3);

    // display it in OpenCV and select a region to track
    rect = getRect(im, WIN_NAME);
    printf("Bounding box: %d,%d+%dx%d\n", rect.x, rect.y, rect.width, rect.height);

    // convert to suitable format for CMT if needed
    Mat im0_gray;
    if (im.channels() > 1) 
        cvtColor(im, im0_gray, CV_BGR2GRAY);
    else 
        im0_gray = im;

    cmt.initialize(im0_gray, rect);
    printf("Main loop starting\n");

    int frame = 0;
    while (true)
    {
        frame++;

        // grab the data for the captured frame
        memcpy(im.data, image_data, W*H*3);

        Mat im_gray;
        if (im.channels() > 1) 
            cvtColor(im, im_gray, CV_BGR2GRAY);
        else
            im_gray = im;

        cmt.processFrame(im_gray);
       
        printf("#%d, active: %lu\n", frame, cmt.points_active.size());
        char key = display(im, cmt);
        if(key == 'q')
            break;
    }

    uvc_stop_streaming(handle);

    free(image_data);

    uvc_close(handle);
    uvc_unref_device(dev);
    uvc_exit(ctx);

    return 0;
}
Пример #11
0
Mixture::Component::Parameters* PyObject_ToMixtureComponentParameters(PyObject* parameters) {
	Mixture::Component::Parameters* params = new Mixture::Component::Parameters;

	if(parameters && parameters != Py_None) {
		PyObject* verbosity = PyDict_GetItemString(parameters, "verbosity");
		if(verbosity)
			if(PyInt_Check(verbosity))
				params->verbosity = PyInt_AsLong(verbosity);
			else if(PyFloat_Check(verbosity))
				params->verbosity = static_cast<int>(PyFloat_AsDouble(verbosity));
			else
				throw Exception("verbosity should be of type `int`.");

		PyObject* max_iter = PyDict_GetItemString(parameters, "max_iter");
		if(max_iter)
			if(PyInt_Check(max_iter))
				params->maxIter = PyInt_AsLong(max_iter);
			else if(PyFloat_Check(max_iter))
				params->maxIter = static_cast<int>(PyFloat_AsDouble(max_iter));
			else
				throw Exception("max_iter should be of type `int`.");

		PyObject* threshold = PyDict_GetItemString(parameters, "threshold");
		if(threshold)
			if(PyFloat_Check(threshold))
				params->threshold = PyFloat_AsDouble(threshold);
			else if(PyInt_Check(threshold))
				params->threshold = static_cast<double>(PyFloat_AsDouble(threshold));
			else
				throw Exception("threshold should be of type `float`.");

		PyObject* train_priors = PyDict_GetItemString(parameters, "train_priors");
		if(train_priors)
			if(PyBool_Check(train_priors))
				params->trainPriors = (train_priors == Py_True);
			else
				throw Exception("train_priors should be of type `bool`.");

		PyObject* train_covariance = PyDict_GetItemString(parameters, "train_covariance");
		if(train_covariance)
			if(PyBool_Check(train_covariance))
				params->trainCovariance = (train_covariance == Py_True);
			else
				throw Exception("train_covariance should be of type `bool`.");

		PyObject* train_scales = PyDict_GetItemString(parameters, "train_scales");
		if(train_scales)
			if(PyBool_Check(train_scales))
				params->trainScales = (train_scales == Py_True);
			else
				throw Exception("train_scales should be of type `bool`.");

		PyObject* train_mean = PyDict_GetItemString(parameters, "train_mean");
		if(train_mean)
			if(PyBool_Check(train_mean))
				params->trainMean = (train_mean == Py_True);
			else
				throw Exception("train_mean should be of type `bool`.");

		PyObject* regularize_priors = PyDict_GetItemString(parameters, "regularize_priors");
		if(regularize_priors)
			if(PyFloat_Check(regularize_priors))
				params->regularizePriors = PyFloat_AsDouble(regularize_priors);
			else if(PyInt_Check(regularize_priors))
				params->regularizePriors = static_cast<double>(PyFloat_AsDouble(regularize_priors));
			else
				throw Exception("regularize_priors should be of type `float`.");

		PyObject* regularize_covariance = PyDict_GetItemString(parameters, "regularize_covariance");
		if(regularize_covariance)
			if(PyFloat_Check(regularize_covariance))
				params->regularizeCovariance = PyFloat_AsDouble(regularize_covariance);
			else if(PyInt_Check(regularize_covariance))
				params->regularizeCovariance = static_cast<double>(PyFloat_AsDouble(regularize_covariance));
			else
				throw Exception("regularize_covariance should be of type `float`.");

		PyObject* regularize_scales = PyDict_GetItemString(parameters, "regularize_scales");
		if(regularize_scales)
			if(PyFloat_Check(regularize_scales))
				params->regularizeScales = PyFloat_AsDouble(regularize_scales);
			else if(PyInt_Check(regularize_scales))
				params->regularizeScales = static_cast<double>(PyFloat_AsDouble(regularize_scales));
			else
				throw Exception("regularize_scales should be of type `float`.");

		PyObject* regularize_mean = PyDict_GetItemString(parameters, "regularize_mean");
		if(regularize_mean)
			if(PyFloat_Check(regularize_mean))
				params->regularizeMean = PyFloat_AsDouble(regularize_mean);
			else if(PyInt_Check(regularize_mean))
				params->regularizeMean = static_cast<double>(PyFloat_AsDouble(regularize_mean));
			else
				throw Exception("regularize_mean should be of type `float`.");
	}

	return params;
}
Пример #12
0
Mixture::Parameters* PyObject_ToMixtureParameters(PyObject* parameters) {
	Mixture::Parameters* params = new Mixture::Parameters;

	if(parameters && parameters != Py_None) {
		PyObject* verbosity = PyDict_GetItemString(parameters, "verbosity");
		if(verbosity)
			if(PyInt_Check(verbosity))
				params->verbosity = PyInt_AsLong(verbosity);
			else if(PyFloat_Check(verbosity))
				params->verbosity = static_cast<int>(PyFloat_AsDouble(verbosity));
			else
				throw Exception("verbosity should be of type `int`.");

		PyObject* max_iter = PyDict_GetItemString(parameters, "max_iter");
		if(max_iter)
			if(PyInt_Check(max_iter))
				params->maxIter = PyInt_AsLong(max_iter);
			else if(PyFloat_Check(max_iter))
				params->maxIter = static_cast<int>(PyFloat_AsDouble(max_iter));
			else
				throw Exception("max_iter should be of type `int`.");

		PyObject* threshold = PyDict_GetItemString(parameters, "threshold");
		if(threshold)
			if(PyFloat_Check(threshold))
				params->threshold = PyFloat_AsDouble(threshold);
			else if(PyInt_Check(threshold))
				params->threshold = static_cast<double>(PyFloat_AsDouble(threshold));
			else
				throw Exception("threshold should be of type `float`.");

		PyObject* val_iter = PyDict_GetItemString(parameters, "val_iter");
		if(val_iter)
			if(PyInt_Check(val_iter))
				params->valIter = PyInt_AsLong(val_iter);
			else if(PyFloat_Check(val_iter))
				params->valIter = static_cast<int>(PyFloat_AsDouble(val_iter));
			else
				throw Exception("val_iter should be of type `int`.");

		PyObject* val_look_ahead = PyDict_GetItemString(parameters, "val_look_ahead");
		if(val_look_ahead)
			if(PyInt_Check(val_look_ahead))
				params->valLookAhead = PyInt_AsLong(val_look_ahead);
			else if(PyFloat_Check(val_look_ahead))
				params->valLookAhead = static_cast<int>(PyFloat_AsDouble(val_look_ahead));
			else
				throw Exception("val_look_ahead should be of type `int`.");

		PyObject* initialize = PyDict_GetItemString(parameters, "initialize");
		if(initialize)
			if(PyBool_Check(initialize))
				params->initialize = (initialize == Py_True);
			else
				throw Exception("initialize should be of type `bool`.");

		PyObject* train_priors = PyDict_GetItemString(parameters, "train_priors");
		if(train_priors)
			if(PyBool_Check(train_priors))
				params->trainPriors = (train_priors == Py_True);
			else
				throw Exception("train_priors should be of type `bool`.");

		PyObject* train_components = PyDict_GetItemString(parameters, "train_components");
		if(train_components)
			if(PyBool_Check(train_components))
				params->trainComponents = (train_components == Py_True);
			else
				throw Exception("train_components should be of type `bool`.");

		PyObject* regularize_priors = PyDict_GetItemString(parameters, "regularize_priors");
		if(regularize_priors)
			if(PyFloat_Check(regularize_priors))
				params->regularizePriors = PyFloat_AsDouble(regularize_priors);
			else if(PyInt_Check(regularize_priors))
				params->regularizePriors = static_cast<double>(PyFloat_AsDouble(regularize_priors));
			else
				throw Exception("regularize_priors should be of type `float`.");
	}

	return params;
}
Пример #13
0
int main(int argc, char **argv){
	/**
	 * int : lifetime of tracker (number of frames)
	 * Point : initial position of the CMT
	 **/
 	vector< tuple<CMT*,int, Point> > cmt_list;
	CMT* cmt;
	CascadeClassifier logo_cascade;
	String logo_cascade_name;
	VideoCapture capture;
	Mat frame;
	const int nb_frame_threshold = 10;

	if(argc < 2) {
		usage(argv[0]);
		return 0;
	}
	namedWindow(window_name);
	FILELog::ReportingLevel() = logINFO;
	DetectAndDisplay d(window_name);
	logo_cascade_name = argv[1];

  //-- 1. Load the cascades
	if( !logo_cascade.load( logo_cascade_name ) ){ printf("--(!)Error loading face cascade\n"); return -1; };
	
	//-- Get video
	capture.open( -1 );
	if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
	// capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
	// capture.set(CV_CAP_PROP_FRAME_HEIGHT, 320);

	//process images
	while(capture.read(frame)) {
		if( frame.empty() ){
			printf(" --(!) No captured frame -- Break!");
			break;
		}
		frame.copyTo(frame);
		Mat im_gray;
		if (frame.channels() > 1) {
			cvtColor(frame, im_gray, CV_BGR2GRAY);
		} else {
			im_gray = frame;
		}
		vector<Rect> logos;
		//-- 3. Apply the classifier to the frame
		d.detectAndMark(frame, logo_cascade, logos);

		//detect and track new objects
		for(uint i= 0; i<logos.size(); i++){ 
			Point2f a(logos[i].x,logos[i].y);
			bool match = true;
			for(uint j = 0; j < cmt_list.size(); j++){
				Point2f b(std::get<0>(cmt_list[j])->bb_rot.center);
				double res = cv::norm(cv::Mat(a),cv::Mat(b));
				double sizee = sqrt(logos[i].width*logos[i].width + logos[i].height*logos[i].height);
				if(res < sizee){
					std::get<1>(cmt_list[j]) = nb_frame_threshold;
					match = false;
					break;
				} 
			}
			if(match || cmt_list.size() == 0) {
				cmt = new CMT();
				cmt->initialize(im_gray, logos[i]);
				cmt_list.push_back(tuple<CMT*,int, Point>(cmt,nb_frame_threshold,cmt->bb_rot.center));
			}
		}
				
		//dont track an object that has not been detected for a long time
		for(uint i = 0; i<cmt_list.size(); i++){
			Point2f b(std::get<0>(cmt_list[i])->bb_rot.center);
			for(uint j = 0; j<logos.size(); j++) {
				Point2f a(logos[j].x,logos[j].y);
				RotatedRect r = std::get<0>(cmt_list[i])->bb_rot;
				double res = cv::norm(cv::Mat(a),cv::Mat(b));
				double sizee = sqrt(r.size.width * r.size.width + r.size.height * r.size.height);
				if(res<sizee){
					std::get<1>(cmt_list[i])++;
					break;
				}
			}
			std::get<1>(cmt_list[i])--;
			if(std::get<1>(cmt_list[i]) <= 0) {
				cmt_list.erase(cmt_list.begin()+i);
				if(i>0)
					--i;
			}
			for(uint j = 0; j < cmt_list.size() && j!=i; j++){
				Point2f a(std::get<0>(cmt_list[j])->bb_rot.center);
				RotatedRect r = std::get<0>(cmt_list[j])->bb_rot;
				double res = cv::norm(cv::Mat(a),cv::Mat(b));
				double sizee = sqrt(r.size.width * r.size.width + r.size.height * r.size.height);
				if(res<sizee){
					cmt_list.erase(cmt_list.begin()+j);
					break;
				}
			}
		}
		
		d.displayObject(logos, frame);
		for(uint i = 0; i<cmt_list.size(); i++) {
			std::get<0>(cmt_list[i])->processFrame(im_gray);
			char key = display(frame, *(std::get<0>(cmt_list[i])));
			if(key == 'q') break;
		}
		process(frame);

		vector<Point> initials;
		vector<Point> currents;
		for(uint i = 0; i < cmt_list.size(); i++) {
			initials.push_back(std::get<2>(cmt_list[i]));
			currents.push_back(std::get<0>(cmt_list[i])->bb_rot.center);
		}
		
		for(uint i = 0; i < counters.size(); i++) {
			counters[i].setInitials(initials);
			counters[i].setCurrents(currents);
		}
		
		for(uint i = 0; i < counters.size(); i++) {
			tuple<int,int,int, vector<Point> > tmp = counters[i].getSituation();
			putText(frame, to_string(std::get<0>(tmp)) + to_string(std::get<1>(tmp)) + to_string(std::get<2>(tmp)), Point(5,15*i+15), CV_FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255,255,0));
			
			imshow(window_name, frame);
		}
		waitKey(0);
	}
	return EXIT_SUCCESS;
}
Пример #14
0
int main(int argc, char **argv)
{
    //Create a CMT object
    CMT cmt;

    //Initialization bounding box
    Rect rect;

    //Parse args
    int challenge_flag = 0;
    int loop_flag = 0;
    int verbose_flag = 0;
    int bbox_flag = 0;
    int skip_frames = 0;
    int skip_msecs = 0;
    int output_flag = 0;
    string input_path;
    string output_path;

    const int detector_cmd = 1000;
    const int descriptor_cmd = 1001;
    const int bbox_cmd = 1002;
    const int no_scale_cmd = 1003;
    const int with_rotation_cmd = 1004;
    const int skip_cmd = 1005;
    const int skip_msecs_cmd = 1006;
    const int output_file_cmd = 1007;

    struct option longopts[] =
    {
        //No-argument options
        {"challenge", no_argument, &challenge_flag, 1},
        {"loop", no_argument, &loop_flag, 1},
        {"verbose", no_argument, &verbose_flag, 1},
        {"no-scale", no_argument, 0, no_scale_cmd},
        {"with-rotation", no_argument, 0, with_rotation_cmd},
        //Argument options
        {"bbox", required_argument, 0, bbox_cmd},
        {"detector", required_argument, 0, detector_cmd},
        {"descriptor", required_argument, 0, descriptor_cmd},
        {"output-file", required_argument, 0, output_file_cmd},
        {"skip", required_argument, 0, skip_cmd},
        {"skip-msecs", required_argument, 0, skip_msecs_cmd},
        {0, 0, 0, 0}
    };

    int index = 0;
    int c;
    while((c = getopt_long(argc, argv, "v", longopts, &index)) != -1)
    {
        switch (c)
        {
            case 'v':
                verbose_flag = true;
                break;
            case bbox_cmd:
                {
                    //TODO: The following also accepts strings of the form %f,%f,%f,%fxyz...
                    string bbox_format = "%f,%f,%f,%f";
                    float x,y,w,h;
                    int ret = sscanf(optarg, bbox_format.c_str(), &x, &y, &w, &h);
                    if (ret != 4)
                    {
                        cerr << "bounding box must be given in format " << bbox_format << endl;
                        return 1;
                    }

                    bbox_flag = 1;
                    rect = Rect(x,y,w,h);
                }
                break;
            case detector_cmd:
                cmt.str_detector = optarg;
                break;
            case descriptor_cmd:
                cmt.str_descriptor = optarg;
                break;
            case output_file_cmd:
                output_path = optarg;
                output_flag = 1;
                break;
            case skip_cmd:
                {
                    int ret = sscanf(optarg, "%d", &skip_frames);
                    if (ret != 1)
                    {
                      skip_frames = 0;
                    }
                }
                break;
            case skip_msecs_cmd:
                {
                    int ret = sscanf(optarg, "%d", &skip_msecs);
                    if (ret != 1)
                    {
                      skip_msecs = 0;
                    }
                }
                break;
            case no_scale_cmd:
                cmt.consensus.estimate_scale = false;
                break;
            case with_rotation_cmd:
                cmt.consensus.estimate_rotation = true;
                break;
            case '?':
                return 1;
        }

    }

    // Can only skip frames or milliseconds, not both.
    if (skip_frames > 0 && skip_msecs > 0)
    {
      cerr << "You can only skip frames, or milliseconds, not both." << endl;
      return 1;
    }

    //One argument remains
    if (optind == argc - 1)
    {
        input_path = argv[optind];
    }

    else if (optind < argc - 1)
    {
        cerr << "Only one argument is allowed." << endl;
        return 1;
    }

    //Set up logging
    FILELog::ReportingLevel() = verbose_flag ? logDEBUG : logINFO;
    Output2FILE::Stream() = stdout; //Log to stdout

    //Challenge mode
    if (challenge_flag)
    {
        //Read list of images
        ifstream im_file("images.txt");
        vector<string> files;
        string line;
        while(getline(im_file, line ))
        {
            files.push_back(line);
        }

        //Read region
        ifstream region_file("region.txt");
        vector<float> coords = getNextLineAndSplitIntoFloats(region_file);

        if (coords.size() == 4) {
            rect = Rect(coords[0], coords[1], coords[2], coords[3]);
        }

        else if (coords.size() == 8)
        {
            //Split into x and y coordinates
            vector<float> xcoords;
            vector<float> ycoords;

            for (size_t i = 0; i < coords.size(); i++)
            {
                if (i % 2 == 0) xcoords.push_back(coords[i]);
                else ycoords.push_back(coords[i]);
            }

            float xmin = *min_element(xcoords.begin(), xcoords.end());
            float xmax = *max_element(xcoords.begin(), xcoords.end());
            float ymin = *min_element(ycoords.begin(), ycoords.end());
            float ymax = *max_element(ycoords.begin(), ycoords.end());

            rect = Rect(xmin, ymin, xmax-xmin, ymax-ymin);
            cout << "Found bounding box" << xmin << " " << ymin << " " <<  xmax-xmin << " " << ymax-ymin << endl;
        }

        else {
            cerr << "Invalid Bounding box format" << endl;
            return 0;
        }

        //Read first image
        Mat im0 = imread(files[0]);
        Mat im0_gray;
        cvtColor(im0, im0_gray, CV_BGR2GRAY);

        //Initialize cmt
        cmt.initialize(im0_gray, rect);

        //Write init region to output file
        ofstream output_file("output.txt");
        output_file << rect.x << ',' << rect.y << ',' << rect.width << ',' << rect.height << std::endl;

        //Process images, write output to file
        for (size_t i = 1; i < files.size(); i++)
        {
            FILE_LOG(logINFO) << "Processing frame " << i << "/" << files.size();
            Mat im = imread(files[i]);
            Mat im_gray;
            cvtColor(im, im_gray, CV_BGR2GRAY);
            cmt.processFrame(im_gray);
            if (verbose_flag)
            {
                display(im, cmt);
            }
            rect = cmt.bb_rot.boundingRect();
            output_file << rect.x << ',' << rect.y << ',' << rect.width << ',' << rect.height << std::endl;
        }

        output_file.close();

        return 0;
    }

    //Normal mode

    //Create window
    namedWindow(WIN_NAME);

    VideoCapture cap;

    bool show_preview = true;

    //If no input was specified
    if (input_path.length() == 0)
    {
        cap.open(0); //Open default camera device
    }

    //Else open the video specified by input_path
    else
    {
        cap.open(input_path);

        if (skip_frames > 0)
        {
          cap.set(CV_CAP_PROP_POS_FRAMES, skip_frames);
        }

        if (skip_msecs > 0)
        {
          cap.set(CV_CAP_PROP_POS_MSEC, skip_msecs);

          // Now which frame are we on?
          skip_frames = (int) cap.get(CV_CAP_PROP_POS_FRAMES);
        }

        show_preview = false;
    }

    //If it doesn't work, stop
    if(!cap.isOpened())
    {
        cerr << "Unable to open video capture." << endl;
        return -1;
    }

    //Show preview until key is pressed
    while (show_preview)
    {
        Mat preview;
        cap >> preview;

        screenLog(preview, "Press a key to start selecting an object.");
        imshow(WIN_NAME, preview);

        char k = waitKey(10);
        if (k != -1) {
            show_preview = false;
        }
    }

    //Get initial image
    Mat im0;
    cap >> im0;

    //If no bounding was specified, get it from user
    if (!bbox_flag)
    {
        rect = getRect(im0, WIN_NAME);
    }

    FILE_LOG(logINFO) << "Using " << rect.x << "," << rect.y << "," << rect.width << "," << rect.height
        << " as initial bounding box.";

    //Convert im0 to grayscale
    Mat im0_gray;
    if (im0.channels() > 1) {
        cvtColor(im0, im0_gray, CV_BGR2GRAY);
    } else {
        im0_gray = im0;
    }

    //Initialize CMT
    cmt.initialize(im0_gray, rect);

    int frame = skip_frames;

    //Open output file.
    ofstream output_file;

    if (output_flag)
    {
        int msecs = (int) cap.get(CV_CAP_PROP_POS_MSEC);

        output_file.open(output_path.c_str());
        output_file << OUT_FILE_COL_HEADERS << endl;
        output_file << frame << "," << msecs << ",";
        output_file << cmt.points_active.size() << ",";
        output_file << write_rotated_rect(cmt.bb_rot) << endl;
    }

    //Main loop
    while (true)
    {
        frame++;

        Mat im;

        //If loop flag is set, reuse initial image (for debugging purposes)
        if (loop_flag) im0.copyTo(im);
        else cap >> im; //Else use next image in stream

        if (im.empty()) break; //Exit at end of video stream

        Mat im_gray;
        if (im.channels() > 1) {
            cvtColor(im, im_gray, CV_BGR2GRAY);
        } else {
            im_gray = im;
        }

        //Let CMT process the frame
        cmt.processFrame(im_gray);

        //Output.
        if (output_flag)
        {
            int msecs = (int) cap.get(CV_CAP_PROP_POS_MSEC);
            output_file << frame << "," << msecs << ",";
            output_file << cmt.points_active.size() << ",";
            output_file << write_rotated_rect(cmt.bb_rot) << endl;
        }
        else
        {
            //TODO: Provide meaningful output
            FILE_LOG(logINFO) << "#" << frame << " active: " << cmt.points_active.size();
            FILE_LOG(logINFO) << "confidence: " << cmt.confidence;
        }

        //Display image and then quit if requested.
        char key = display(im, cmt);
        if(key == 'q') break;
    }

    //Close output file.
    if (output_flag) output_file.close();

    return 0;
}
Пример #15
0
Trainable::Parameters* PyObject_ToMCGSMParameters(PyObject* parameters) {
	MCGSM::Parameters* params = dynamic_cast<MCGSM::Parameters*>(
		PyObject_ToParameters(parameters, new MCGSM::Parameters));

	// read parameters from dictionary
	if(parameters && parameters != Py_None) {
		PyObject* callback = PyDict_GetItemString(parameters, "callback");
		if(callback)
			if(PyCallable_Check(callback))
				params->callback = new CallbackInterface(&MCGSM_type, callback);
			else if(callback != Py_None)
				throw Exception("callback should be a function or callable object.");

		PyObject* train_priors = PyDict_GetItemString(parameters, "train_priors");
		if(train_priors)
			if(PyBool_Check(train_priors))
				params->trainPriors = (train_priors == Py_True);
			else
				throw Exception("train_priors should be of type `bool`.");

		PyObject* train_scales = PyDict_GetItemString(parameters, "train_scales");
		if(train_scales)
			if(PyBool_Check(train_scales))
				params->trainScales = (train_scales == Py_True);
			else
				throw Exception("train_scales should be of type `bool`.");

		PyObject* train_weights = PyDict_GetItemString(parameters, "train_weights");
		if(train_weights)
			if(PyBool_Check(train_weights))
				params->trainWeights = (train_weights == Py_True);
			else
				throw Exception("train_weights should be of type `bool`.");

		PyObject* train_features = PyDict_GetItemString(parameters, "train_features");
		if(train_features)
			if(PyBool_Check(train_features))
				params->trainFeatures = (train_features == Py_True);
			else
				throw Exception("train_features should be of type `bool`.");

		PyObject* train_cholesky_factors = PyDict_GetItemString(parameters, "train_cholesky_factors");
		if(train_cholesky_factors)
			if(PyBool_Check(train_cholesky_factors))
				params->trainCholeskyFactors = (train_cholesky_factors == Py_True);
			else
				throw Exception("train_cholesky_factors should be of type `bool`.");

		PyObject* train_predictors = PyDict_GetItemString(parameters, "train_predictors");
		if(train_predictors)
			if(PyBool_Check(train_predictors))
				params->trainPredictors = (train_predictors == Py_True);
			else
				throw Exception("train_predictors should be of type `bool`.");

		PyObject* train_linear_features = PyDict_GetItemString(parameters, "train_linear_features");
		if(train_linear_features)
			if(PyBool_Check(train_linear_features))
				params->trainLinearFeatures = (train_linear_features == Py_True);
			else
				throw Exception("train_linear_features should be of type `bool`.");

		PyObject* train_means = PyDict_GetItemString(parameters, "train_means");
		if(train_means)
			if(PyBool_Check(train_means))
				params->trainMeans = (train_means == Py_True);
			else
				throw Exception("train_means should be of type `bool`.");

		PyObject* regularize_features = PyDict_GetItemString(parameters, "regularize_features");
		if(regularize_features)
			params->regularizeFeatures = PyObject_ToRegularizer(regularize_features);

		PyObject* regularize_predictors = PyDict_GetItemString(parameters, "regularize_predictors");
		if(regularize_predictors)
			params->regularizePredictors = PyObject_ToRegularizer(regularize_predictors);

		PyObject* regularize_weights = PyDict_GetItemString(parameters, "regularize_weights");
		if(regularize_weights)
			params->regularizeWeights = PyObject_ToRegularizer(regularize_weights);

		PyObject* regularize_linear_features = PyDict_GetItemString(parameters, "regularize_linear_features");
		if(regularize_linear_features)
			params->regularizeLinearFeatures = PyObject_ToRegularizer(regularize_linear_features);

		PyObject* regularize_means = PyDict_GetItemString(parameters, "regularize_means");
		if(regularize_means)
			params->regularizeMeans = PyObject_ToRegularizer(regularize_means);
	}

	return params;
}
int main(int argc, char *argv[])
{
   ros::init(argc,argv,"Vision");
   ROS_INFO("node creation");
   ros::NodeHandle nh_v;
   ros::Rate loop_rate(10);

   
    int start = 0;
    bool SelectROI=false;//For bounding box
    cout<<"Do you  want to give the ROI ?"<<endl<<"Press y for YES and n for NO"<<endl;
    char input='n';
    cin>>input;
    if(input!='n')
	SelectROI=true; 

    cout<<"SelectROI : "<<SelectROI<<endl;
	
    CMT cmt;
    
    //Code for normal camera using OpenCV
    /*VideoCapture cap(0);
    cap.set(CV_CAP_PROP_FRAME_WIDTH, 640);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480); 
    bool open=true;
    if (!cap.isOpened())  // if not success, exit program
    {
        cout << "Cannot open the video cam" << endl;
        return -1;
        open=false;
    }
    cvNamedWindow("Initial frame",CV_WINDOW_AUTOSIZE );*/
 

    //Code for using Kinect input using OpenCV
    bool open=true;
    VideoCapture cap;
    cap.open(CV_CAP_OPENNI);
    cap.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_VGA_30HZ );
    if( !cap.isOpened() )
    {
        cout << "Can not open a capture object." << endl;
        return -1;
    }
    cout << " Device Open " << endl;
 
    //capture.set(CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_VGA_30HZ);
    if( !cap.grab() )
    	      {
    	              cout << "Can not grab images." << endl;
    	              return -1;
    	      }
   

    cvNamedWindow("Initial frame",CV_WINDOW_AUTOSIZE ); 
    	
    	
 
    
    int Keyhit=-1;//Used to get Enter input from the keyboard for ROI selection
    
    while(Keyhit==-1){

    	//cap.read(img);//used for normal cameras
	cap.grab();
	cap.retrieve(img,CV_CAP_OPENNI_BGR_IMAGE);
	
	if(SelectROI==true)
		putText(img, "Press enter when the object is in the field of view ", cvPoint(20,30),FONT_HERSHEY_COMPLEX_SMALL, 0.6, cvScalar(255,255,255), 1, CV_AA);
	else {
		putText(img, "Using Default Bounding box at the center of frame ", cvPoint(20,30),FONT_HERSHEY_COMPLEX_SMALL, 0.6, cvScalar(255,255,255), 1, CV_AA);
		putText(img, "Press enter to start tracking ", cvPoint(20,60),FONT_HERSHEY_COMPLEX_SMALL, 0.6, cvScalar(255,255,255), 1, CV_AA);
	
	     }

	imshow("Initial frame",img);
	Keyhit=cv::waitKey(1);


    }

    while(open && ros::ok())
    {

    	//Capturing images from the  Kinect OpenCV
	//cap.read(img);//used for normal cameras
	cap.grab();
	cap.retrieve(img,CV_CAP_OPENNI_BGR_IMAGE);
	cv::cvtColor(img, im_gray, CV_RGB2GRAY);
	//imshow("Initial frame",img);
	//cv::waitKey(0);
	

	while(start==0 && SelectROI==true){

		cout<<"Inside ROI selection"<<endl;
	    //set the callback function for any mouse event
		static CvFont font;
		cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 0, 1, 8);
		
		//cout<<"Starting CMT initialization"<<endl;
		setMouseCallback("Initial frame", getRect, NULL); // Mouse Callback function with image as param
 			
		putText(img, "Use mouse to select the rectangle around object to track ", cvPoint(20,30),FONT_HERSHEY_COMPLEX_SMALL, 0.6, cvScalar(255,255,255), 1, CV_AA);
		putText(img, "You can reselect till you press Enter ", cvPoint(20,50),FONT_HERSHEY_COMPLEX_SMALL, 0.6, cvScalar(255,255,255), 1, CV_AA);
		putText(img, "After selecting PRESS ENTER", cvPoint(20,70),FONT_HERSHEY_COMPLEX_SMALL, 0.6, cvScalar(255,255,255), 1, CV_AA);
		
		imshow("Initial frame",img);
		cv::waitKey(0);
		start=1;
		cout<<"ROI Selected Manually"<<endl;
		cout<<"Rectangular Edge Points : "<<initBottomDown<<","<<initTopLeft<<endl;
		cout<<"CMT Initialising "<<endl;
		cmt.initialise(im_gray, initTopLeft, initBottomDown);//Using the Initialise Method with predefined bounding box
		setMouseCallback("Initial frame", NULL, NULL);//Disabling the Callback function
			}

	if(start==0 && SelectROI==false	){//If manual ROI is not selected the default case 
		
		initTopLeft.x=270;
		initTopLeft.y=190;	//Default bounding box positions - Frame Center
		initBottomDown.x=370;
		initBottomDown.y=290;

		cout<<"Rectangular Edge Points : "<<initBottomDown<<","<<initTopLeft<<endl;
		cout<<"CMT Initialising "<<endl;
		cmt.initialise(im_gray, initTopLeft, initBottomDown);//Using the Initialise Method with default bounding box
		start=1;
	}


	//cout<<endl<<"Starting CMT frame processing"<<endl;
    	cmt.processFrame(im_gray);//Using the process frame method
	
	cout<<"The Center is : "<<cmt.CENTER<<endl;

	cout<<"The scaling is : "<<cmt.SCALE<<endl;
	
	cout<<"The orientation is :"<<cmt.ROTATION<<endl;
	



    //cout<<"CMT Frame Processing Done"<<endl;

    for(int i = 0; i<cmt.trackedKeypoints.size(); i++)
    	cv::circle(img, cmt.trackedKeypoints[i].first.pt, 3, cv::Scalar(255,255,255));//draw circles around track points
        
    cv::line(img, cmt.topLeft, cmt.topRight, cv::Scalar(255,255,255));
    cv::line(img, cmt.topRight, cmt.bottomRight, cv::Scalar(255,255,255));
    cv::line(img, cmt.bottomRight, cmt.bottomLeft, cv::Scalar(255,255,255));
    cv::line(img, cmt.bottomLeft, cmt.topLeft, cv::Scalar(255,255,255));

    putText(img, "Tracking ", cvPoint(20,30),FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(255,255,255), 1, CV_AA);
    imshow("Initial frame", img);//Show the image frame with circles of key points
    cv::waitKey(1);

    }//close for open while loop

   
    return 0;
}