void Animation::update(float dt) { if (playing_) { int previous_frame = current_frame_; timeline_ += dt; if (timeline_ >= frame_to_time(current_frame_+1)) { ++current_frame_; } if (current_frame_ >= loop_start_ + loop_duration_) { current_frame_ = loop_start_; timeline_ = frame_to_time(loop_start_); } if (previous_frame != current_frame_) { update_sprite_to_frame(); } if (!looping_ && current_frame_ == loop_start_ + loop_duration_ - 1) { playing_ = false; } } }
static void sndfile_stream_decode(struct decoder *decoder, struct input_stream *is) { GError *error = NULL; SNDFILE *sf; SF_INFO info; struct audio_format audio_format; size_t frame_size; sf_count_t read_frames, num_frames; int buffer[4096]; enum decoder_command cmd; info.format = 0; sf = sf_open_virtual(&vio, SFM_READ, &info, is); if (sf == NULL) { g_warning("sf_open_virtual() failed"); return; } /* for now, always read 32 bit samples. Later, we could lower MPD's CPU usage by reading 16 bit samples with sf_readf_short() on low-quality source files. */ if (!audio_format_init_checked(&audio_format, info.samplerate, SAMPLE_FORMAT_S32, info.channels, &error)) { g_warning("%s", error->message); g_error_free(error); return; } decoder_initialized(decoder, &audio_format, info.seekable, frame_to_time(info.frames, &audio_format)); frame_size = audio_format_frame_size(&audio_format); read_frames = sizeof(buffer) / frame_size; do { num_frames = sf_readf_int(sf, buffer, read_frames); if (num_frames <= 0) break; cmd = decoder_data(decoder, is, buffer, num_frames * frame_size, 0); if (cmd == DECODE_COMMAND_SEEK) { sf_count_t c = time_to_frame(decoder_seek_where(decoder), &audio_format); c = sf_seek(sf, c, SEEK_SET); if (c < 0) decoder_seek_error(decoder); else decoder_command_finished(decoder); cmd = DECODE_COMMAND_NONE; } } while (cmd == DECODE_COMMAND_NONE); sf_close(sf); }
void Animation::hold(int frame) { playing_ = false; current_frame_ = frame; timeline_ = frame_to_time(frame); update_sprite_to_frame(); }
void Animation::play(int frame, bool looping) { set_looping(looping); playing_ = true; current_frame_ = frame; timeline_ = frame_to_time(frame); update_sprite_to_frame(); }
int vrpn_Tracker_PhaseSpace::get_report(void) { if(!owlRunning) return 0; int maxiter = 1; if(readMostRecent) maxiter = 1024; // not technically most recent, but if the client is slow, avoids infinite loop. int ret = 1; int oldframe = frame; for(int i = 0; i < maxiter && ret; i++) { int cframe = frame; while(ret && cframe == frame) { ret = read_frame(); } } // no new data? abort. if(oldframe == frame) return 0; #ifdef DEBUG char buffer[1024]; owlGetString(OWL_FRAME_BUFFER_SIZE, buffer); printf("%s\n", buffer); #endif for(int i = 0; i < markers.size(); i++) { if(markers[i].cond <= 0) continue; //set the sensor d_sensor = markers[i].id; pos[0] = markers[i].x; pos[1] = markers[i].y; pos[2] = markers[i].z; //raw positions have no rotation d_quat[0] = 0; d_quat[1] = 0; d_quat[2] = 0; d_quat[3] = 1; // send time out in OWL time if(frequency) frame_to_time(frame, frequency, timestamp); else memset(×tamp, 0, sizeof(timestamp)); //send the report send_report(); } for(int j = 0; j < rigids.size(); j++) { if(rigids[j].cond <= 0) continue; //set the sensor d_sensor = r2s_map[rigids[j].id]; if(slave && d_sensor == 0) { // rigid bodies aren't allowed to be sensor zero in slave mode r2s_map[rigids[j].id] = rigids[j].id; } //set the position pos[0] = rigids[j].pose[0]; pos[1] = rigids[j].pose[1]; pos[2] = rigids[j].pose[2]; //set the orientation quaternion //OWL has the scale factor first, whereas VRPN has it last. d_quat[0] = rigids[j].pose[4]; d_quat[1] = rigids[j].pose[5];; d_quat[2] = rigids[j].pose[6];; d_quat[3] = rigids[j].pose[3];; // send time out in OWL time if(frequency) frame_to_time(frame, frequency, timestamp); else memset(×tamp, 0, sizeof(timestamp)); //send the report send_report(); } return markers.size() || rigids.size() > 0 ? 1 : 0; }