void set_buffer_loader(EASYRPG_SHARED_PTR<buffer_loader> const &l) { SET_CONTEXT(ctx_); alSourceStop(src_); alSourcei(src_, AL_BUFFER, AL_NONE); if (not l) { loader_.reset(); return; } ALint unqueuing_count; alGetSourceiv(src_, AL_BUFFERS_QUEUED, &unqueuing_count); std::vector<ALuint> unqueued(unqueuing_count); alSourceUnqueueBuffers(src_, unqueuing_count, &unqueued.front()); loader_ = l; int queuing_count = 0; BOOST_ASSERT(not l->is_end()); ticks_.push_back(0); for (; queuing_count < BUFFER_NUMBER; ++queuing_count) { buf_sizes_.push_back(loader_->load_buffer(buffers_[queuing_count])); ticks_.push_back(loader_->midi_ticks()); if (loader_->is_end()) { queuing_count++; break; } } alSourceQueueBuffers(src_, queuing_count, buffers_.data()); alSourcePlay(src_); }
void WaypointVelocityVisualizer::controlCallback(const geometry_msgs::PoseStamped::ConstPtr& current_pose_msg, const geometry_msgs::TwistStamped::ConstPtr& current_twist_msg, const geometry_msgs::TwistStamped::ConstPtr& command_twist_msg) { // buffers are reset when time goes back, e.g. playback rosbag ros::Time current_time = ros::Time::now(); if (previous_time_ > current_time) { ROS_WARN("Detected jump back in time of %.3fs. Clearing markers and buffers.", (previous_time_ - current_time).toSec()); deleteMarkers(); // call 'DELETEALL' resetBuffers(); // clear circular buffers } previous_time_ = current_time; // if plot_metric_interval <= 0, velocity is plotted by each callback. if (plot_metric_interval_ > 0 && current_pose_buf_.size() > 0) { tf::Vector3 p1, p2; tf::pointMsgToTF(current_pose_buf_.back().pose.position, p1); tf::pointMsgToTF(current_pose_msg->pose.position, p2); if (!(p1.distance(p2) > plot_metric_interval_)) return; // skipping plot } current_pose_buf_.push_back(*current_pose_msg); current_twist_buf_.push_back(*current_twist_msg); command_twist_buf_.push_back(*command_twist_msg); current_twist_marker_array_.markers.clear(); command_twist_marker_array_.markers.clear(); createVelocityMarker(current_pose_buf_, current_twist_buf_, "current_velocity", current_twist_color_, current_twist_marker_array_); createVelocityMarker(current_pose_buf_, command_twist_buf_, "twist_cmd", command_twist_color_, command_twist_marker_array_); publishVelocityMarker(); }
void update() { ALint processed; alGetSourceiv(src_, AL_BUFFERS_PROCESSED, &processed); std::vector<ALuint> unqueued(processed); alSourceUnqueueBuffers(src_, processed, &unqueued.front()); int queuing_count = 0; for (; queuing_count < processed; ++queuing_count) { if (not loop_play_ and loader_->is_end()) { loader_.reset(); ++queuing_count; break; } if (loader_->is_end()) { ticks_.push_back(0); } buf_sizes_.push_back(loader_->load_buffer(unqueued[queuing_count])); ticks_.push_back(loader_->midi_ticks()); } alSourceQueueBuffers(src_, queuing_count, &unqueued.front()); if (fade_milli_ != 0) { SET_CONTEXT(ctx_); loop_count_++; if (fade_ended()) { alSourceStop(src_); } else { alSourcef(src_, AL_GAIN, current_volume()); } } }
/** Try to write a value to the pipe \param[in] value is what we want to write \param[in] blocking specify if the call wait for the operation to succeed \return true on success \todo provide a && version */ bool write(const T &value, bool blocking = false) { // Lock the pipe to avoid being disturbed std::unique_lock<std::mutex> ul { cb_mutex }; TRISYCL_DUMP_T("Write pipe full = " << full() << " value = " << value); if (blocking) /* If in blocking mode, wait for the not full condition, that may be changed when a read is done */ read_done.wait(ul, [&] { return !full(); }); else if (full()) return false; cb.push_back(value); TRISYCL_DUMP_T("Write pipe front = " << cb.front() << " back = " << cb.back() << " cb.begin() = " << (void *)&*cb.begin() << " cb.size() = " << cb.size() << " cb.end() = " << (void *)&*cb.end() << " reserved_for_reading() = " << reserved_for_reading() << " reserved_for_writing() = " << reserved_for_writing()); // Notify the clients waiting to read something from the pipe write_done.notify_all(); return true; }
bool PythonServer::LoadChatHistory(boost::circular_buffer<ChatHistoryEntity>& chat_history) { boost::python::object chat_provider = m_python_module_chat.attr("__dict__")["chat_history_provider"]; if (!chat_provider) { ErrorLogger() << "Unable to get Python object chat_history_provider"; return false; } boost::python::object f = chat_provider.attr("load_history"); if (!f) { ErrorLogger() << "Unable to call Python method load_history"; return false; } boost::python::object r = f(); boost::python::extract<list> py_history(r); if (py_history.check()) { boost::python::stl_input_iterator<boost::python::tuple> entity_begin(py_history), entity_end; for (auto& it = entity_begin; it != entity_end; ++ it) { ChatHistoryEntity e; e.m_timestamp = boost::posix_time::from_time_t(boost::python::extract<time_t>((*it)[0]));; e.m_player_name = boost::python::extract<std::string>((*it)[1]); e.m_text = boost::python::extract<std::string>((*it)[2]); e.m_text_color = boost::python::extract<GG::Clr>((*it)[3]); chat_history.push_back(e); } } return true; }
void render(sf::RenderTarget& target) { float dx = 1.0f/static_cast<float>(line_data_.capacity()); float x = static_cast<float>(line_data_.capacity()-line_data_.size())*dx; line_data_.push_back(std::make_pair(tick_data_, tick_tag_)); tick_tag_ = false; glBegin(GL_LINE_STRIP); auto c = color(color_); glColor4f(std::get<0>(c), std::get<1>(c), std::get<2>(c), 0.8f); for(size_t n = 0; n < line_data_.size(); ++n) if(line_data_[n].first > -0.5) glVertex3d(x+n*dx, std::max(0.05, std::min(0.95, (1.0f-line_data_[n].first)*0.8 + 0.1f)), 0.0); glEnd(); glEnable(GL_LINE_STIPPLE); glLineStipple(3, 0xAAAA); for(size_t n = 0; n < line_data_.size(); ++n) { if(line_data_[n].second) { glBegin(GL_LINE_STRIP); glVertex3f(x+n*dx, 0.0f, 0.0f); glVertex3f(x+n*dx, 1.0f, 0.0f); glEnd(); } } glDisable(GL_LINE_STIPPLE); }
void odomCallback (nav_msgs::Odometry odomPoseMsg) { pair<geometry_msgs::Pose, ros::Time> next; next.first = odomPoseMsg.pose.pose; next.second = odomPoseMsg.header.stamp; cb.push_back (next); current = next; }
line(size_t res = 1200) : line_data_(res) { tick_data_ = -1.0f; color_ = 0xFFFFFFFF; tick_tag_ = false; line_data_.push_back(std::make_pair(-1.0f, false)); }
bool MapsBuffer::pushBack(boost::shared_ptr<const MapsRgb> maps_rgb ) { bool retVal = false; { boost::mutex::scoped_lock buff_lock (bmutex_); if (!buffer_.full ()) retVal = true; buffer_.push_back (maps_rgb); } buff_empty_.notify_one (); return (retVal); }
void scan(std::istream& stream, std::function<void (const std::string &, int, size_t)> fun) { auto line_number = 0; for (std::string line; std::getline(stream, line);) { line_number++; metrics.lines_scanned++; if (line.length() > 0) { buffer.push_back(line); if (buffer.full()) { emit_block(line_number, fun); } } } }
void put(int x) { for(auto i=0;i<x;i++) { unique_lock<mutex> locker(m_mutex); while(Q.full()) empty.wait(locker); assert(!Q.full()); Q.push_back(i); cout << "@ "<< i <<endl; full.notify_all(); } flag = false; }
bool PCDBuffer::pushBack (pcl::PointCloud<pcl::PointXYZRGBA>::ConstPtr cloud) { bool retVal = false; { boost::mutex::scoped_lock buff_lock (bmutex_); if (!buffer_.full ()) retVal = true; buffer_.push_back (cloud); } buff_empty_.notify_one (); return (retVal); }
bool fillOne() { int ch = super_t::read(); if (ch<0) { // EOF reached --> cannot be filled //m_bReady = false; m_bEOF = true; return false; } if (!checkMatch(ch)) return false; m_cbuf.push_back((unsigned char)ch); return true; }
int main(int argc, char* argv[]) { v_circular_buffer_2.push_back(1); v_circular_buffer_2.push_back(4); MyClass tmp(x); v_intrusive_set_2.insert(tmp); MyClass_list tmp_list(x); v_intrusive_list_2.push_front(tmp_list); int r = done(); // break here r += argc + (char)argv[0][0]; return r % 2; }
//TODO: change this a lot! void updateGyroState(sensor_msgs::Imu& imuMsg, packet_t rxPkt, boost::circular_buffer<float>& calibration) { uint8_t gyro_adc = rxPkt.payload[0]; double current_time = ros::Time::now().toSec(); double last_time = imuMsg.header.stamp.toSec(); if (!isMoving) { calibration.push_back(float(gyro_adc)); double total = 0; BOOST_FOREACH( float reading, calibration ) { total += reading; } cal_offset = total / calibration.size(); }
virtual boost::unique_future<bool> send(const safe_ptr<read_frame>& frame) override { if(audio_cadence_.size() == 1) return consumer_->send(frame); boost::unique_future<bool> result = caspar::wrap_as_future(true); if(boost::range::equal(sync_buffer_, audio_cadence_) && audio_cadence_.front() * frame->num_channels() == static_cast<size_t>(frame->audio_data().size())) { // Audio sent so far is in sync, now we can send the next chunk. result = consumer_->send(frame); boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1); } else CASPAR_LOG(trace) << print() << L" Syncing audio."; sync_buffer_.push_back(static_cast<size_t>(frame->audio_data().size() / frame->num_channels())); return std::move(result); }
virtual bool OnGetData(sf::SoundStream::Chunk& data) override { win32_exception::ensure_handler_installed_for_thread( "sfml-audio-thread"); std::pair<std::shared_ptr<core::read_frame>, std::shared_ptr<audio_buffer_16>> audio_data; input_.pop(audio_data); // Block until available graph_->set_value("tick-time", perf_timer_.elapsed()*format_desc_.fps*0.5); perf_timer_.restart(); container_.push_back(std::move(*audio_data.second)); data.Samples = container_.back().data(); data.NbSamples = container_.back().size(); if (audio_data.first) presentation_age_ = audio_data.first->get_age_millis(); return is_running_; }
/** Reserve some part of the pipe for writing \param[in] s is the number of element to reserve \param[out] rid is an iterator to a description of the reservation that has been done if successful \param[in] blocking specify if the call wait for the operation to succeed \return true if the reservation was successful */ bool reserve_write(std::size_t s, rid_iterator &rid, bool blocking = false) { // Lock the pipe to avoid being disturbed std::unique_lock<std::mutex> ul { cb_mutex }; TRISYCL_DUMP_T("Before write reservation cb.size() = " << cb.size() << " size() = " << size()); if (s == 0) // Empty reservation requested, so nothing to do return false; if (blocking) /* If in blocking mode, wait for enough room in the pipe, that may be changed when a read is done. Do not use a difference here because it is only about unsigned values */ read_done.wait(ul, [&] { return cb.size() + s <= capacity(); }); else if (cb.size() + s > capacity()) // Not enough room in the pipe for the reservation return false; /* If there is enough room in the pipe, just create default values in it to do the reservation */ for (std::size_t i = 0; i != s; ++i) cb.push_back(); /* Compute the location of the first element a posteriori since it may not exist a priori if cb was empty before */ auto first = cb.end() - s; /* Add a description of the reservation at the end of the reservation queue */ w_rid_q.emplace_back(first, s); // Return the iterator to the last reservation descriptor rid = w_rid_q.end() - 1; TRISYCL_DUMP_T("After reservation cb.size() = " << cb.size() << " size() = " << size()); return true; }
virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame* video, IDeckLinkAudioInputPacket* audio) { if(!video) return S_OK; try { graph_->set_value("tick-time", tick_timer_.elapsed()*format_desc_.fps*0.5); tick_timer_.restart(); frame_timer_.restart(); // PUSH void* bytes = nullptr; if(FAILED(video->GetBytes(&bytes)) || !bytes) return S_OK; safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free); avcodec_get_frame_defaults(av_frame.get()); av_frame->data[0] = reinterpret_cast<uint8_t*>(bytes); av_frame->linesize[0] = video->GetRowBytes(); av_frame->format = PIX_FMT_UYVY422; av_frame->width = video->GetWidth(); av_frame->height = video->GetHeight(); av_frame->interlaced_frame = format_desc_.field_mode != core::field_mode::progressive; av_frame->top_field_first = format_desc_.field_mode == core::field_mode::upper ? 1 : 0; std::shared_ptr<core::audio_buffer> audio_buffer; // It is assumed that audio is always equal or ahead of video. if(audio && SUCCEEDED(audio->GetBytes(&bytes)) && bytes) { auto sample_frame_count = audio->GetSampleFrameCount(); auto audio_data = reinterpret_cast<int32_t*>(bytes); audio_buffer = std::make_shared<core::audio_buffer>(audio_data, audio_data + sample_frame_count*format_desc_.audio_channels); } else audio_buffer = std::make_shared<core::audio_buffer>(audio_cadence_.front(), 0); // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601) // This cadence fills the audio mixer most optimally. sync_buffer_.push_back(audio_buffer->size()); if(!boost::range::equal(sync_buffer_, audio_cadence_)) { CASPAR_LOG(trace) << print() << L" Syncing audio."; return S_OK; } muxer_.push(audio_buffer); muxer_.push(av_frame, hints_); boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1); // POLL for(auto frame = muxer_.poll(); frame; frame = muxer_.poll()) { if(!frame_buffer_.try_push(make_safe_ptr(frame))) { auto dummy = core::basic_frame::empty(); frame_buffer_.try_pop(dummy); frame_buffer_.try_push(make_safe_ptr(frame)); graph_->set_tag("dropped-frame"); } } graph_->set_value("frame-time", frame_timer_.elapsed()*format_desc_.fps*0.5); graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity())); } catch(...) { exception_ = std::current_exception(); return E_FAIL; } return S_OK; }
void pointsCallback(const sensor_msgs::PointCloud2ConstPtr& input) { if (count >= 0) { counter++; pcl::PointCloud<pcl::PointXYZRGB> input_cloud; pcl::fromROSMsg (*input, input_cloud); pcl::PointCloud<pcl::PointXYZRGB>::Ptr input_cloud_ptr (new pcl::PointCloud<pcl::PointXYZRGB>); *input_cloud_ptr = input_cloud; count = 0; if (firstTime == true) { //Create a standard map object m->setVerbose(true); //Set the map to give text output m->loadCalibrationWords(bow_path,"orb", 500); //set bag of words to orb 500 orb features from bow_path m->setFeatureExtractor(new OrbExtractor()); //Use orb features int max_points = 300; //Number of keypoints used by matcher int nr_iter = 8; //Number of iterations the matcher will run float shrinking = 0.7; //The rate of convergence for the matcher float bow_threshold = 0.15; //Bag of words threshold to avoid investigating bad matches float distance_threshold = 0.015; //Distance threshold to discard bad matches using euclidean information. float feature_threshold = 0.15; //Feature threshold to discard bad matches using feature information. m->setMatcher(new BowAICK(max_points, nr_iter,shrinking,bow_threshold,distance_threshold,feature_threshold));//Create a new matcher firstTime = false; vector< RGBDFrame * > frames; m->addFrame(input_cloud_ptr); } else { printf("----------------------%i-------------------\nadding a new frame\n",counter); //Add frame to map m->addFrame(input_cloud_ptr); nrMatches = m->numberOfMatchesInLastFrame(); int hej = m->numberOfFrames(); /*float time = (input->header.stamp - lastCloudTime).toSec(); xSpeed = (lastTransformationMatrix.front()(0,3) - transformationMatrix.front()(0,3))/time; ySpeed = (lastTransformationMatrix.front()(1,3) - transformationMatrix.front()(1,3))/time; zSpeed = (lastTransformationMatrix.front()(2,3) - transformationMatrix.front()(2,3))/time; //cout << time << " HEJ" << endl;*/ if ((nrMatches < 40 and hej > 2)) //or xSpeed > 1.3 or ySpeed > 1.3 or zSpeed > 1.3) { cout << "BAD MATCH" << endl << endl << endl; m->removeLastFrame(); badcount ++; //cout << lastPoses.front().pose.position.x << endl; pose.header.stamp = ros::Time::now(); pose.header.frame_id = "local_origin"; pose.pose.position.x = lastPoses.at(2).pose.position.x + (lastPoses.at(2).pose.position.x - lastPoses.at(1).pose.position.x); pose.pose.position.y = lastPoses.at(2).pose.position.y + (lastPoses.at(2).pose.position.y - lastPoses.at(1).pose.position.y); pose.pose.position.z = lastPoses.at(2).pose.position.z + (lastPoses.at(2).pose.position.z - lastPoses.at(1).pose.position.z); pose.pose.orientation.x = lastPoses.at(2).pose.orientation.x; //lastLocalPose.pose.orientation.x; //q.x(); pose.pose.orientation.y = lastPoses.at(2).pose.orientation.y; //lastLocalPose.pose.orientation.y; //q.y(); pose.pose.orientation.z = lastPoses.at(2).pose.orientation.z; //lastLocalPose.pose.orientation.z; //q.z(); pose.pose.orientation.w = lastPoses.at(2).pose.orientation.w; //lastLocalPose.pose.orientation.w; //q.w(); //pub_Pose.publish(pose); pub_Pose.publish(pose); } else { //transformationMatrix = m->estimateCurrentPose(lastTransformationMatrix); transformationMatrix = m->estimateCurrentPose(lastTransformationMatrix); //cout << transformationMatrix.front() << endl << endl; lastTransformationMatrix = transformationMatrix; transformationMatrix.front() = frameConversionMat*transformationMatrix.front(); //Convert rotation matrix to quaternion tf::Matrix3x3 rotationMatrix; rotationMatrix.setValue(transformationMatrix.front()(0,0), transformationMatrix.front()(0,1),transformationMatrix.front()(0,2), transformationMatrix.front()(1,0), transformationMatrix.front()(1,1),transformationMatrix.front()(1,2), transformationMatrix.front()(2,0), transformationMatrix.front()(2,1),transformationMatrix.front()(2,2) ); tf::Quaternion q; rotationMatrix.getRotation(q); //tf::Transform transform; //transform.setOrigin(tf::Vector3(transformationMatrix.front()(0,3), transformationMatrix.front()(1,3), transformationMatrix.front()(2,3))); //publish pose //geometry_msgs::PoseStamped pose; pose.header.stamp = input->header.stamp; pose.header.frame_id = "local_origin"; pose.pose.position.x = transformationMatrix.front()(0,3); pose.pose.position.y = transformationMatrix.front()(1,3); pose.pose.position.z = transformationMatrix.front()(2,3); pose.pose.orientation.x = q.x(); // pose.pose.orientation.y = q.y(); // pose.pose.orientation.z = q.z(); // pose.pose.orientation.w = q.w(); // pub_Pose.publish(pose); } //pub_transform.sendTransform(tf::StampedTransform(transform, now, "map", "robot")); //ros::Time now(0); /*while (!tfl.waitForTransform("local_origin", "camera_link", now, ros::Duration(1))) ROS_ERROR("Couldn't find transform from 'camera_link' to 'local_origin', retrying..."); geometry_msgs::PoseStamped local_origin_pose; tfl.transformPose("local_origin", now, pose, "camera_link", local_origin_pose);*/ //pub_Pose.publish(pose); //pub_Pose_test.publish(pose); lastMatches = nrMatches; lastPoses.push_back(pose); lastCloudTime = input->header.stamp; } } else { count++; } }
void targetsCallback(const mtt::TargetList& list) { //will print information that should be stored in a file //file format: id, good/bad tag, time, pos x, pos y, vel, theta // position_diff, heading_diff, angle_to_robot, velocity_diff static ros::Time start_time = ros::Time::now(); time_elapsed = ros::Time::now() - start_time; /// /// ROBOT PART ////// //use transformations to extract robot features try{ p_listener->lookupTransform("/map", "/base_link", ros::Time(0), transform); p_listener->lookupTwist("/map", "/base_link", ros::Time(0), ros::Duration(0.5), twist); } catch (tf::TransformException ex){ ROS_ERROR("%s",ex.what()); } robot_x = transform.getOrigin().x(); robot_posex_buffer.push_back(robot_x); robot_y = transform.getOrigin().y(); robot_theta = tf::getYaw(transform.getRotation()); robot_vel = sqrt(pow(twist.linear.x, 2) + pow(twist.linear.y, 2)); //robot output line /// uncomment the following for training! printf("%d,%d,%.10f,%.10f,%.10f,%.10f,%.10f,0,0,0,0\n", -1, leader_tag, time_elapsed.toSec(), robot_x, robot_y, robot_vel, robot_theta); //testing new features extraction // accumulator_set<double, stats<tag::variance> > acc; // for_each(robot_posex_buffer.begin(), robot_posex_buffer.end(), boost::bind<void>(boost::ref(acc), _1)); // printf("%f,%f,%f\n", robot_x, mean(acc), sqrt(variance(acc))); /// /// TARGETS PART ////// //sweeps target list and extract features for(uint i = 0; i < list.Targets.size(); i++){ target_id = list.Targets[i].id; target_x = list.Targets[i].pose.position.x; target_y = list.Targets[i].pose.position.y; target_theta = tf::getYaw(list.Targets[i].pose.orientation); target_vel = sqrt(pow(list.Targets[i].velocity.linear.x,2)+ pow(list.Targets[i].velocity.linear.y,2)); position_diff = sqrt(pow(robot_x - target_x,2)+ pow(robot_y - target_y,2)); heading_diff = robot_theta - target_theta; angle_to_robot = -robot_theta + atan2(target_y - robot_y, target_x - robot_x ); velocity_diff = robot_vel - target_vel; //target output (to be used in adaboost training) // % output file format: // % 1: id // % 2: good/bad tag // % 3: time // % 4: pos x // % 5: pos y // % 6: vel // % 7: theta // % 8: pos diff // % 9: head diff // %10: angle 2 robot // %11: velocity diff /// uncomment the following to generate training file! printf("%d,%d,%.10f,%.10f,%.10f,%.10f,%.10f,%.10f,%.10f,%.10f,%.10f\n", target_id, leader_tag, time_elapsed.toSec(), target_x, target_y, target_vel, target_theta, position_diff, heading_diff, angle_to_robot, velocity_diff); // if(position_diff < 6.0 && target_vel > 0.5){ // //if inside boundaries (in meters) // //store features in a covariance struct to send to matlab // nfeatures.pose.position.x = target_x; // nfeatures.pose.position.y = target_y; // nfeatures.pose.position.z = target_id; // // nfeatures.covariance[0] = target_vel; // nfeatures.covariance[1] = velocity_diff; // nfeatures.covariance[2] = heading_diff; // nfeatures.covariance[3] = angle_to_robot; // nfeatures.covariance[4] = position_diff; // // matlab_list.push_back(nfeatures); // // counter++; // } } // printf("targets within range: %d\n",counter); // counter = 0; //check if enough time has passed //and send batch of msgs to matlab // duration_btw_msg = ros::Time::now() - time_last_msg; // // if(duration_btw_msg.toSec() > 0.01){ // while(!matlab_list.empty()){ // nfeatures_pub.publish(matlab_list.front()); // matlab_list.pop_front(); // usleep(0.01e6); //has to sleep, otherwise matlab do not get the msg // } // time_last_msg = ros::Time::now(); // } }