void Game_loop() { /* TODO: Your code goes here * * Any code that modifies the screen * which needs to run once per frame * goes here */ screen.text(font5x7_font, // <-- the font to use Point_t(10, 10), // <-- where to put the upper-left hand corner of the text "Test message!\n" // <-- the string to print "You can also print things on multiple\n" "lines by using the \\n character sequence\n" "in your strings (newline).", MODE_BLEND_INVERT // <-- the drawing mode (Invert the source bitmap, then blend) ); screen.text_plus_offset(font5x7_font, // <-- the font to use Point_t(10, 64), // <-- where to put the upper-left hand corner of the text "Wavy text!\n" // <-- the string to print "You can also print wavy text and apply other effects\n" "by calling screen.text_plus_offset and passing a\n" "position modification function as an argument!\n" "WHEEEE!", modpos, MODE_BLEND_INVERT // <-- the drawing mode (Invert the source bitmap, then blend) ); // Swap the buffer (present the drawing to the screen) TVOut_Swap(); // Wait for the frame to complete drawing to the screen TVOut_WaitFrames(1); }
void Game_loop() { /* TODO: Your code goes here * * Any code that modifies the screen * which needs to run once per frame * goes here */ // Draw the bitmap without scaling in the upper-left hand corner screen.bitmap(&smiley_bitmap, &src_rect, &dest_rect, MODE_OVERWRITE); // Throw a label on there screen.text(font5x7_font, dest_rect.lowerLeft() + Point_t(0,2), "OW 64x64", MODE_OVERWRITE_INVERT); // Draw the bitmap with scaling in the lower-left hand corner screen.bitmap_scaled(&smiley_bitmap, &src_rect, &dest_rect_scaled, MODE_OVERWRITE); // Throw a label on there screen.text(font5x7_font, dest_rect_scaled.lowerLeft() + Point_t(0,2), "OW sc. 72x72", MODE_OVERWRITE_INVERT); // For 2 rows for(int j = 0; j < 2; j++) // For 4 columns for(int i = 0; i < 4; i++) { // The relative position of the top rectangle (drawn first), 50% scaling Rect_t overlap_top_rect(0,0,32,32); // The relative position of the bottom rectangle (drawn second), 50% scaling Rect_t overlap_bottom_rect(16,16,32,32); // The temporary destination rectangle, used to store the // absolute position before drawing Rect_t overlap_dest_rect; // The absolute offset; relative positions are added // to this to produce the output positions Point_t offset((i + 1) * (64 + 10), 10 + (10 + 64) * j); // Compute the absolute destination rectangles and draw the bitmap scaled to 50% overlap_dest_rect = overlap_top_rect + offset; screen.bitmap_scaled(&smiley_bitmap, &src_rect, &overlap_dest_rect, MODE_OVERWRITE); overlap_dest_rect = overlap_bottom_rect + offset; screen.bitmap_scaled(&smiley_bitmap, &src_rect, &overlap_dest_rect, modes[i + 4*j]); // Throw a label on there screen.text(font5x7_font, overlap_dest_rect.lowerLeft() + Point_t(0,2), mode_strings[i + 4*j], MODE_OVERWRITE_INVERT); } // Swap the buffer (present the drawing to the screen) TVOut_Swap(); // Wait for the frame to complete drawing to the screen TVOut_WaitFrames(1); }
//--------------------------------------------------------------------------- Point_t TKalmanFilter::GetPointPrediction() { if (m_initialized) { cv::Mat prediction; switch (m_type) { case TypeLinear: prediction = m_linearKalman->predict(); break; case TypeUnscented: case TypeAugmentedUnscented: #if USE_OCV_UKF prediction = m_uncsentedKalman->predict(); #else prediction = m_linearKalman->predict(); std::cerr << "UnscentedKalmanFilter was disabled in CMAKE! Set KalmanLinear in constructor." << std::endl; #endif break; } m_lastPointResult = Point_t(prediction.at<track_t>(0), prediction.at<track_t>(1)); } else { } return m_lastPointResult; }
PlayerSObj::PlayerSObj(uint id) : ServerObject(id) { // Configuration options jumpDist = CM::get()->find_config_as_float("JUMP_DIST"); movDamp = CM::get()->find_config_as_int("MOV_DAMP"); if(SOM::get()->debugFlag) DC::get()->print("Reinitialized PlayerSObj %d\n", this->getId()); Point_t pos = Point_t(0, 5, 10); Box bxVol = CM::get()->find_config_as_box("BOX_CUBE");//Box(-10, 0, -10, 20, 20, 20); //pm = new PhysicsModel(Point_t(-50,0,150), Rot_t(), 5); pm = new PhysicsModel(pos, Rot_t(), CM::get()->find_config_as_float("PLAYER_MASS")); pm->addBox(bxVol); lastCollision = pos; this->health = CM::get()->find_config_as_int("INIT_HEALTH"); // Initialize input status istat.attack = false; istat.jump = false; istat.quit = false; istat.start = false; istat.specialPower = false; istat.rotAngle = 0.0; istat.rotHoriz = 0.0; istat.rotVert = 0.0; istat.rightDist = 0.0; istat.forwardDist = 0.0; newJump = true; // any jump at this point is a new jump newAttack = true; // same here appliedJumpForce = false; bool firedeath = false; attacking = false; gravityTimer = 0; }
void singleObjectTracker::MissUpdate(int frameNum) { KF.GetPrediction(); prediction = KF.Update(Point_t(0,0),false); if(AvoidUpdate){ AvoidUpdate=false; return; } status=MISSING_STATUS; vec_status.push_back(status); frames.push_back(frameNum); skipped_frames++; Rect_t lastRect=rects.back(); Point_t diff=prediction-feature->pos; // may out of image range! rects.push_back(Rect_t(lastRect.x+diff.x,lastRect.y+diff.y,lastRect.width,lastRect.height)); trace.push_back(prediction); lifetime++; // lastSeePos=of.pos; assert(lifetime==1+frameNum-frames[0]); assert(lifetime==(catch_frames+skipped_frames)); }
//--------------------------------------------------------------------------- Point_t TKalmanFilter::GetPrediction() { cv::Mat prediction = kalman->predict(); LastResult = Point_t(prediction.at<track_t>(0), prediction.at<track_t>(1)); return LastResult; }
int main(int ac, char** av) { std::string inFile("..\\..\\data\\TrackingBugs.mp4"); if (ac > 1) { inFile = av[1]; } #if ExampleNum cv::Scalar Colors[] = { cv::Scalar(255, 0, 0), cv::Scalar(0, 255, 0), cv::Scalar(0, 0, 255), cv::Scalar(255, 255, 0), cv::Scalar(0, 255, 255), cv::Scalar(255, 0, 255), cv::Scalar(255, 127, 255), cv::Scalar(127, 0, 255), cv::Scalar(127, 0, 127) }; cv::VideoCapture capture(inFile); if (!capture.isOpened()) { return 1; } cv::namedWindow("Video"); cv::Mat frame; cv::Mat gray; CTracker tracker(0.2f, 0.1f, 60.0f, 5, 10); capture >> frame; cv::cvtColor(frame, gray, cv::COLOR_BGR2GRAY); CDetector* detector = new CDetector(gray); int k = 0; double freq = cv::getTickFrequency(); int64 allTime = 0; while (k != 27) { capture >> frame; if (frame.empty()) { //capture.set(cv::CAP_PROP_POS_FRAMES, 0); //continue; break; } cv::cvtColor(frame, gray, cv::COLOR_BGR2GRAY); int64 t1 = cv::getTickCount(); const std::vector<Point_t>& centers = detector->Detect(gray); tracker.Update(centers); int64 t2 = cv::getTickCount(); allTime += t2 - t1; for (int i = 0; i < centers.size(); i++) { cv::circle(frame, centers[i], 3, cv::Scalar(0, 255, 0), 1, CV_AA); } std::cout << tracker.tracks.size() << std::endl; for (int i = 0; i < tracker.tracks.size(); i++) { if (tracker.tracks[i]->trace.size()>1) { for (int j = 0; j < tracker.tracks[i]->trace.size() - 1; j++) { cv::line(frame, tracker.tracks[i]->trace[j], tracker.tracks[i]->trace[j + 1], Colors[tracker.tracks[i]->track_id % 9], 2, CV_AA); } } } cv::imshow("Video", frame); k = cv::waitKey(20); } std::cout << "work time = " << (allTime / freq) << std::endl; delete detector; cv::destroyAllWindows(); return 0; #else int k = 0; cv::Scalar Colors[] = { cv::Scalar(255, 0, 0), cv::Scalar(0, 255, 0), cv::Scalar(0, 0, 255), cv::Scalar(255, 255, 0), cv::Scalar(0, 255, 255), cv::Scalar(255, 255, 255) }; cv::namedWindow("Video"); cv::Mat frame = cv::Mat(800, 800, CV_8UC3); cv::VideoWriter vw = cv::VideoWriter("output.mpeg", CV_FOURCC('P', 'I', 'M', '1'), 20, frame.size()); // Set mouse callback cv::Point2f pointXY; cv::setMouseCallback("Video", mv_MouseCallback, (void*)&pointXY); CTracker tracker(0.3f, 0.5f, 60.0f, 25, 25); track_t alpha = 0; cv::RNG rng; while (k != 27) { frame = cv::Scalar::all(0); // Noise addition (measurements/detections simulation ) float Xmeasured = pointXY.x + static_cast<float>(rng.gaussian(2.0)); float Ymeasured = pointXY.y + static_cast<float>(rng.gaussian(2.0)); // Append circulating around mouse cv::Points (frequently intersecting) std::vector<Point_t> pts; pts.push_back(Point_t(Xmeasured + 100.0f*sin(-alpha), Ymeasured + 100.0f*cos(-alpha))); pts.push_back(Point_t(Xmeasured + 100.0f*sin(alpha), Ymeasured + 100.0f*cos(alpha))); pts.push_back(Point_t(Xmeasured + 100.0f*sin(alpha / 2.0f), Ymeasured + 100.0f*cos(alpha / 2.0f))); pts.push_back(Point_t(Xmeasured + 100.0f*sin(alpha / 3.0f), Ymeasured + 100.0f*cos(alpha / 1.0f))); alpha += 0.05f; for (size_t i = 0; i < pts.size(); i++) { cv::circle(frame, pts[i], 3, cv::Scalar(0, 255, 0), 1, CV_AA); } tracker.Update(pts); std::cout << tracker.tracks.size() << std::endl; for (size_t i = 0; i < tracker.tracks.size(); i++) { const auto& tracks = tracker.tracks[i]; if (tracks->trace.size()>1) { for (size_t j = 0; j < tracks->trace.size() - 1; j++) { cv::line(frame, tracks->trace[j], tracks->trace[j + 1], Colors[i % 6], 2, CV_AA); } } } cv::imshow("Video", frame); vw << frame; k = cv::waitKey(10); } vw.release(); cv::destroyAllWindows(); return 0; #endif }
TentacleCObj::TentacleCObj(uint id, char *data) : ClientObject(id) { if (COM::get()->debugFlag) DC::get()->print("Created new TentacleCObj %d\n", id); TentacleState *state = (TentacleState*)data; rm = new RenderModel(Point_t(),Rot_t(), state->modelNum, Vec3f(1.f,1.f,1.f)); }
// --------------------------------------------------------------------------- //param rects input/output bounding boxes // --------------------------------------------------------------------------- void CTracker::Update( const std::vector<Point_t>& detections, const std::vector<cv::Rect>& rects, DistType distType ) { assert(detections.size() == rects.size()); SWRI_PROFILE("Tracking"); // ----------------------------------- // If there is no tracks yet, then every cv::Point begins its own track. // ----------------------------------- if (tracks.size() == 0) { // If no tracks yet for (size_t i = 0; i < detections.size(); ++i) { tracks.push_back(std::make_shared<CTrack>(detections[i], rects[i], dt, Accel_noise_mag, NextTrackID++)); } } size_t N = tracks.size(); // треки size_t M = detections.size(); // детекты assignments_t assignment; // назначения if (!tracks.empty()) { // Матрица расстояний от N-ного трека до M-ного детекта. distMatrix_t Cost(N * M); // ----------------------------------- // Треки уже есть, составим матрицу расстояний // ----------------------------------- switch (distType) { case CentersDist: for (size_t i = 0; i < tracks.size(); i++) { for (size_t j = 0; j < detections.size(); j++) { Cost[i + j * N] = tracks[i]->CalcDist(detections[j]); } } break; case RectsDist: for (size_t i = 0; i < tracks.size(); i++) { for (size_t j = 0; j < detections.size(); j++) { Cost[i + j * N] = tracks[i]->CalcDist(rects[j]); } } break; } // ----------------------------------- // Solving assignment problem (tracks and predictions of Kalman filter) // ----------------------------------- AssignmentProblemSolver APS; APS.Solve(Cost, N, M, assignment, AssignmentProblemSolver::optimal); // ----------------------------------- // clean assignment from pairs with large distance // ----------------------------------- for (size_t i = 0; i < assignment.size(); i++) { if (assignment[i] != -1) { //std::cout << "Kalman cost: " << Cost[i + assignment[i] * N] << std::endl; if (Cost[i + assignment[i] * N] > dist_thres) { assignment[i] = -1; tracks[i]->skipped_frames++; } } else { // If track have no assigned detect, then increment skipped frames counter. tracks[i]->skipped_frames++; } } // ----------------------------------- // If track didn't get detects long time, remove it. // ----------------------------------- for (int i = 0; i < static_cast<int>(tracks.size()); i++) { if (tracks[i]->skipped_frames > maximum_allowed_skipped_frames) { tracks.erase(tracks.begin() + i); assignment.erase(assignment.begin() + i); i--; } } } // ----------------------------------- // Search for unassigned detects and start new tracks for them. // ----------------------------------- for (size_t i = 0; i < detections.size(); ++i) { if (find(assignment.begin(), assignment.end(), i) == assignment.end()) { tracks.push_back(std::make_shared<CTrack>(detections[i], rects[i], dt, Accel_noise_mag, NextTrackID++)); } } // Update Kalman Filters state for (size_t i = 0; i<assignment.size(); i++) { // If track updated less than one time, than filter state is not correct. if (assignment[i] != -1) // If we have assigned detect, then update using its coordinates, { tracks[i]->skipped_frames = 0; tracks[i]->seen_frames++; tracks[i]->Update(detections[assignment[i]], rects[assignment[i]], true, max_trace_length); } else // if not continue using predictions { tracks[i]->Update(Point_t(), cv::Rect(), false, max_trace_length); } } }
//--------------------------------------------------------------------------- cv::Rect TKalmanFilter::Update(cv::Rect rect, bool dataCorrect) { if (!m_initialized) { if (m_initialRects.size() < MIN_INIT_VALS) { if (dataCorrect) { m_initialRects.push_back(rect); } } if (m_initialRects.size() == MIN_INIT_VALS) { std::vector<Point_t> initialPoints; Point_t averageSize(0, 0); for (const auto& r : m_initialRects) { initialPoints.push_back(Point_t(r.x, r.y)); averageSize.x += r.width; averageSize.y += r.height; } averageSize.x /= MIN_INIT_VALS; averageSize.y /= MIN_INIT_VALS; track_t kx = 0; track_t bx = 0; track_t ky = 0; track_t by = 0; get_lin_regress_params(initialPoints, 0, MIN_INIT_VALS, kx, bx, ky, by); cv::Rect_<track_t> rect0(kx * (MIN_INIT_VALS - 1) + bx, ky * (MIN_INIT_VALS - 1) + by, averageSize.x, averageSize.y); Point_t rectv0(kx, ky); switch (m_type) { case TypeLinear: CreateLinear(rect0, rectv0); break; case TypeUnscented: #if USE_OCV_UKF CreateUnscented(rect0, rectv0); #else CreateLinear(rect0, rectv0); std::cerr << "UnscentedKalmanFilter was disabled in CMAKE! Set KalmanLinear in constructor." << std::endl; #endif break; case TypeAugmentedUnscented: CreateAugmentedUnscented(rect0, rectv0); break; } } } if (m_initialized) { cv::Mat measurement(4, 1, Mat_t(1)); if (!dataCorrect) { measurement.at<track_t>(0) = m_lastRectResult.x; // update using prediction measurement.at<track_t>(1) = m_lastRectResult.y; measurement.at<track_t>(2) = m_lastRectResult.width; measurement.at<track_t>(3) = m_lastRectResult.height; } else { measurement.at<track_t>(0) = static_cast<track_t>(rect.x); // update using measurements measurement.at<track_t>(1) = static_cast<track_t>(rect.y); measurement.at<track_t>(2) = static_cast<track_t>(rect.width); measurement.at<track_t>(3) = static_cast<track_t>(rect.height); } // Correction cv::Mat estimated; switch (m_type) { case TypeLinear: estimated = m_linearKalman->correct(measurement); m_lastRectResult.x = estimated.at<track_t>(0); //update using measurements m_lastRectResult.y = estimated.at<track_t>(1); m_lastRectResult.width = estimated.at<track_t>(2); m_lastRectResult.height = estimated.at<track_t>(3); break; case TypeUnscented: case TypeAugmentedUnscented: #if USE_OCV_UKF estimated = m_uncsentedKalman->correct(measurement); m_lastRectResult.x = estimated.at<track_t>(0); //update using measurements m_lastRectResult.y = estimated.at<track_t>(1); m_lastRectResult.width = estimated.at<track_t>(6); m_lastRectResult.height = estimated.at<track_t>(7); #else estimated = m_linearKalman->correct(measurement); m_lastRectResult.x = estimated.at<track_t>(0); //update using measurements m_lastRectResult.y = estimated.at<track_t>(1); m_lastRectResult.width = estimated.at<track_t>(2); m_lastRectResult.height = estimated.at<track_t>(3); std::cerr << "UnscentedKalmanFilter was disabled in CMAKE! Set KalmanLinear in constructor." << std::endl; #endif break; } } else { if (dataCorrect) { m_lastRectResult.x = static_cast<track_t>(rect.x); m_lastRectResult.y = static_cast<track_t>(rect.y); m_lastRectResult.width = static_cast<track_t>(rect.width); m_lastRectResult.height = static_cast<track_t>(rect.height); } } return cv::Rect(static_cast<int>(m_lastRectResult.x), static_cast<int>(m_lastRectResult.y), static_cast<int>(m_lastRectResult.width), static_cast<int>(m_lastRectResult.height)); }
Point_t operator- (const Point_t &a, const Point_t &b) { return Point_t(a.x - b.x, a.y - b.y); }
Point_t operator+ (const Point_t &a, const Point_t &b) { return Point_t(a.x + b.x, a.y + b.y); }