void initTracking(cv::LatentSvmDetector::ObjectDetection object, std::vector<kstate>& kstates, cv::LatentSvmDetector::ObjectDetection detection, cv::Mat& image, std::vector<cv::Scalar> colors, float range) { kstate new_state; //cv::KalmanFilter KF(4, 2, 0);//XY Only cv::KalmanFilter KF(8, 4, 0); /*cv::Mat_<float> measurement = (cv::Mat_<float>(2, 1) << object.rect.x,//XY Only object.rect.y);*/ cv::Mat_<float> measurement = (cv::Mat_<float>(4, 1) << object.rect.x, object.rect.y, object.rect.width, object.rect.height); /*KF.transitioncv::Matrix = (cv::Mat_<float>(4, 4) << 1, 0, 1, 0,//XY Only 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1);*/ KF.transitionMatrix = (cv::Mat_<float>(8, 8) << 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1); //init pre KF.statePre.at<float>(0) = object.rect.x; KF.statePre.at<float>(1) = object.rect.y; KF.statePre.at<float>(2) = object.rect.width;//XY Only KF.statePre.at<float>(3) = object.rect.height;//XY Only //init post KF.statePost.at<float>(0) = object.rect.x; KF.statePost.at<float>(1) = object.rect.y; KF.statePost.at<float>(2) = object.rect.width;//XY Only KF.statePost.at<float>(3) = object.rect.height;//XY Only cv::setIdentity(KF.measurementMatrix); cv::setIdentity(KF.processNoiseCov, cv:: Scalar::all(NOISE_COV));//1e-4 cv::setIdentity(KF.measurementNoiseCov, cv::Scalar::all(MEAS_NOISE_COV));//1e-3 cv::setIdentity(KF.errorCovPost, cv::Scalar::all(ERROR_ESTIMATE_COV));//100 //clip detection //check that predicted positions are inside the image if (detection.rect.x < 0) detection.rect.x = 0; if (detection.rect.x > image.cols) detection.rect.x = image.cols - 1; if (detection.rect.y < 0) detection.rect.y = 0; if (detection.rect.height > image.rows) detection.rect.height = image.rows - 1; if (detection.rect.width + detection.rect.x > image.cols) detection.rect.width = image.cols - detection.rect.x; if (detection.rect.height + detection.rect.y > image.rows) detection.rect.height = image.rows - detection.rect.y; //save data to kstate new_state.active = true; new_state.image = image(cv::Rect(detection.rect.x, detection.rect.y, detection.rect.width, detection.rect.height)).clone();//Crop image and obtain only object (ROI) new_state.KF = KF; new_state.lifespan = INITIAL_LIFESPAN;//start only with 1 new_state.pos = object.rect; new_state.score = object.score; new_state.id = getAvailableIndex(kstates); new_state.color = colors[new_state.id]; new_state.real_data = 1; new_state.range = range; //extractOrbFeatures(new_state.image, new_state.orbKeypoints, new_state.orbDescriptors, ORB_NUM_FEATURES); kstates.push_back(new_state); }
/* XXX unfortunately API function gnk of which pyk.gk is based is a vararg function and therefore cannot be portably exported to Python. It would be better if libk20 supplied a function gnk_(I, K*) in addition to gnk(I,...) which would take an array of K objects as the second argument */ static PyObject* _gk(PyObject* self, PyObject* args) { int n = PyTuple_Size(args); if (!n) { return _mk_K(gtn(0,0)); } int i, type = INT_MAX; K* ks = (K*)malloc(n*sizeof(K)); K kobj; for(i = 0; i < n; i++) { K ki; int t; PyObject* argi = PyTuple_GET_ITEM(args, i); if (!IS_K(argi)) { goto fail; } ks[i] = ki = ((_K*)argi)->kobj; t = ki->t; if (INT_MAX == type) { type = t; } else if (t > 4 || t < 1 || t != type) { type = 0; } } kobj = gtn((type>0 && type<5)?-type:0, n); if (!kobj) { free(ks); return PyErr_Format(PyExc_TypeError, "gtn(%d,%d) returned null", -type, n); } switch (type) { case 1: for (i = 0; i < n; i++) { KI(kobj)[i] = Ki(ks[i]); } break; case 2: for (i = 0; i < n; i++) { KF(kobj)[i] = Kf(ks[i]); } break; case 3: for (i = 0; i < n; i++) { KC(kobj)[i] = Kc(ks[i]); } break; case 4: for (i = 0; i < n; i++) { KS(kobj)[i] = Ks(ks[i]); } break; default: memcpy(KK(kobj), ks, n*sizeof(K)); for (i = 0; i < n; i++) { ci(ks[i]); } break; } free(ks); return _mk_K(kobj); fail: free(ks); PyErr_BadArgument(); return NULL; }