/* called to display each frame */ static void video_refresh_timer(FFMovie* movie) { /*moving to DECODE THREAD, from queue_frame*/ double actual_delay, delay, sync_threshold, ref_clock, diff; int skipframe = 0; if (movie->video_st) { /*shouldn't ever even get this far if no video_st*/ /* update current video pts */ movie->video_current_pts = movie->video_clock; /* compute nominal delay */ delay = movie->video_clock - movie->frame_last_pts; if (delay <= 0 || delay >= 1.0) { /* if incorrect delay, use previous one */ delay = movie->frame_last_delay; } movie->frame_last_delay = delay; movie->frame_last_pts = movie->video_clock; /* we try to correct big delays by duplicating or deleting a frame */ ref_clock = get_master_clock(movie); diff = movie->video_clock - ref_clock; //printf("get_master_clock = %f\n", (float)(ref_clock/1000000.0)); /* skip or repeat frame. We take into account the delay to compute the threshold. I still don't know if it is the best guess */ sync_threshold = AV_SYNC_THRESHOLD; if (delay > sync_threshold) sync_threshold = delay; if (fabs(diff) < AV_NOSYNC_THRESHOLD) { if (diff <= -sync_threshold) { skipframe = 1; delay = 0; } else if (diff >= sync_threshold) { delay = 2 * delay; } } movie->frame_timer += delay; actual_delay = movie->frame_timer - get_master_clock(movie); //printf("DELAY: delay=%f, frame_timer=%f, video_clock=%f\n", // (float)delay, (float)movie->frame_timer, (float)movie->video_clock); if (actual_delay > 0.010) { movie->dest_showtime = movie->frame_timer; } if (skipframe) { movie->dest_showtime = 0; /*movie->dest_showtime = get_master_clock(movie); this shows every frame*/ } } }
bool VideoLayer::relative_seek(double increment) { int ret=0; double current_time=get_master_clock(); // printf("master_clock(): %f\n",current_time); current_time += increment; /** * Check the seek time is correct! * It should not be before or after the beginning and the end of the movie */ if (current_time < 0) // beginning current_time = 0; /** * Forward in video as a loop */ else { // beginning while(current_time > (avformat_context -> duration / AV_TIME_BASE)) { current_time = current_time - (avformat_context->duration / AV_TIME_BASE); } } // printf("VideoLayer::seeking to: %f\n",current_time); ret = seek ((int64_t) current_time * AV_TIME_BASE); if (ret < 0) { error ("Can't seek file: %s", get_filename()); return false; } else notice("seek to %.1f\%",current_time); return true; }
double ffplayer::compute_target_delay(double delay) { double sync_threshold, diff = 0; /* update delay to follow master synchronisation source */ if (m_master_sync_type != AV_SYNC_VIDEO_MASTER) { /* if video is slave, we try to correct big delays by duplicating or deleting a frame */ diff = get_clock(&m_vidclk) - get_master_clock(); /* skip or repeat frame. We take into account the delay to compute the threshold. I still don't know if it is the best guess */ sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay)); if (!isnan(diff) && fabs(diff) <max_frame_duration) { if (diff <= -sync_threshold) delay = FFMAX(0, delay + diff); else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD) delay = delay + diff; else if (diff >= sync_threshold) delay = 2 * delay; } } av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n", delay, -diff); return delay; }
/* Add or subtract samples to get a better sync, return new audio buffer size */ int synchronize_audio(VideoState *is, short *samples, int samples_size, double pts) { int n; double ref_clock; n = 2 * is->audio_st->codec->channels; if(is->av_sync_type != AV_SYNC_AUDIO_MASTER) { double diff, avg_diff; int wanted_size, min_size, max_size, nb_samples; ref_clock = get_master_clock(is); diff = get_audio_clock(is) - ref_clock; if(diff < AV_NOSYNC_THRESHOLD) { // accumulate the diffs is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum; if(is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { is->audio_diff_avg_count++; } else { avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef); if(fabs(avg_diff) >= is->audio_diff_threshold) { wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n); min_size = samples_size * ((100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100); max_size = samples_size * ((100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100); if(wanted_size < min_size) { wanted_size = min_size; } else if (wanted_size > max_size) { wanted_size = max_size; } if(wanted_size < samples_size) { /* remove samples */ samples_size = wanted_size; } else if(wanted_size > samples_size) { uint8_t *samples_end, *q; int nb; /* add samples by copying final sample*/ nb = (samples_size - wanted_size); samples_end = (uint8_t *)samples + samples_size - n; q = samples_end + n; while(nb > 0) { memcpy(q, samples_end, n); q += n; nb -= n; } samples_size = wanted_size; } } } } else { /* difference is TOO big; reset diff stuff */ is->audio_diff_avg_count = 0; is->audio_diff_cum = 0; } } return samples_size; }
void video_refresh_timer(void *userdata) { VideoState *is = (VideoState *)userdata; VideoPicture *vp; double actual_delay, delay, sync_threshold, ref_clock, diff; if(is->video_st) { if(is->pictq_size == 0) { schedule_refresh(is, 1); } else { vp = &is->pictq[is->pictq_rindex]; is->video_current_pts = vp->pts; is->video_current_pts_time = av_gettime(); delay = vp->pts - is->frame_last_pts; /* the pts from last time */ if(delay <= 0 || delay >= 1.0) { /* if incorrect delay, use previous one */ delay = is->frame_last_delay; } /* save for next time */ is->frame_last_delay = delay; is->frame_last_pts = vp->pts; /* update delay to sync to audio if not master source */ if(is->av_sync_type != AV_SYNC_VIDEO_MASTER) { ref_clock = get_master_clock(is); diff = vp->pts - ref_clock; /* Skip or repeat the frame. Take delay into account FFPlay still doesn't "know if this is the best guess." */ sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; if(fabs(diff) < AV_NOSYNC_THRESHOLD) { if(diff <= -sync_threshold) { delay = 0; } else if(diff >= sync_threshold) { delay = 2 * delay; } } } is->frame_timer += delay; /* computer the REAL delay */ actual_delay = is->frame_timer - (av_gettime() / 1000000.0); if(actual_delay < 0.010) { /* Really it should skip the picture instead */ actual_delay = 0.010; } schedule_refresh(is, (int)(actual_delay * 1000 + 0.5)); /* show the picture! */ video_display(is); /* update queue for next picture! */ if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { is->pictq_rindex = 0; } SDL_LockMutex(is->pictq_mutex); is->pictq_size--; SDL_CondSignal(is->pictq_cond); SDL_UnlockMutex(is->pictq_mutex); } } else { schedule_refresh(is, 100); } }
/* this thread gets the stream from the disk or the network */ static int decode_thread(void *arg) { /* DECODE THREAD */ FFMovie *movie = arg; int status; AVPacket pkt1, *pkt = &pkt1; while(!movie->abort_request && !Global_abort_all) { /* read if the queues have room */ if (movie->audioq.size < MAX_AUDIOQ_SIZE && !movie->dest_showtime) { if (av_read_packet(movie->context, pkt) < 0) { break; } if (movie->audio_st && pkt->stream_index == movie->audio_st->index) { packet_queue_put(&movie->audioq, pkt); } else if (movie->video_st && pkt->stream_index == movie->video_st->index) { status = video_read_packet(movie, pkt); av_free_packet(pkt); if(status < 0) { break; } } else { av_free_packet(pkt); } } if(movie->dest_showtime) { double now = get_master_clock(movie); if(now >= movie->dest_showtime) { video_display(movie); movie->dest_showtime = 0; } else { // printf("showtime not ready, waiting... (%.2f,%.2f)\n", // (float)now, (float)movie->dest_showtime); SDL_Delay(10); } } if(movie->paused) { double endpause, startpause = SDL_GetTicks() / 1000.0; while(movie->paused && !movie->abort_request && !Global_abort_all) { SDL_Delay(100); } endpause = SDL_GetTicks() / 1000.0; movie->dest_showtime = 0; movie->time_offset += endpause - startpause; } } ffmovie_cleanup(movie); return 0; }
bool VideoLayer::set_mark_out() { if (mark_out == NO_MARK) { mark_out = get_master_clock(); notice("mark_out: %f", mark_out); } else { mark_out = NO_MARK; notice("mark_out deleted"); } return true; }
// default: // break; // } // return true; // } bool VideoLayer::set_mark_in() { if (mark_in == NO_MARK) { mark_in = get_master_clock(); notice("mark_in: %f", mark_in); } else { mark_in = NO_MARK; notice("mark_in deleted"); } return true; }
double FFVideo::cur_clock() const { VideoState* _vs = (VideoState*)_ctx; if (_vs) { double pos; pos = get_master_clock(_vs); if (isnan(pos)) pos = (double)_vs->seek_pos / AV_TIME_BASE; return pos; } return -1; }
int sync_audio(Media *audio, short *samples, int samples_size, double pts) { int n; double ref_clk; n = 2*audio->stream->codec->channels; double diff, avg_diff; int wanted_size, min_size, max_size, nb_samples; ref_clk = get_master_clock(); diff = get_audio_clock(audio) - ref_clk; if(diff < AV_NOSYNC_THRESHOLD) { audio->audio_diff_cum = diff + audio->audio_diff_avg_coef * audio->audio_diff_cum; if(audio->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { audio->audio_diff_avg_count++; } else { avg_diff = audio->audio_diff_cum * (1.0 - audio->audio_diff_avg_coef); if(fabs(avg_diff) >= audio->audio_diff_threshold) { wanted_size = samples_size + ((int)(diff * audio->stream->codec->sample_rate) * n); min_size = samples_size * ((100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100); max_size = samples_size * ((100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100); if(wanted_size < min_size) { wanted_size = min_size; } else if (wanted_size > max_size) { wanted_size = max_size; } if(wanted_size < samples_size) { samples_size = wanted_size; } else if(wanted_size > samples_size) { uint8_t *samples_end, *q; int nb; nb = (samples_size - wanted_size); samples_end = (uint8_t *)samples + samples_size - n; q = samples_end + n; while(nb > 0) { memcpy(q, samples_end, n); q += n; nb -= n; } samples_size = wanted_size; } } } } else { audio->audio_diff_avg_count = 0; audio->audio_diff_cum = 0; } return samples_size; }
int synchronize_audio(VideoState *is, short *samples, int samples_size, double pts) { if(is->av_sync_type != AV_SYNC_AUDIO_MASTER) { int n; double ref_clock; n = 2 * is->audio_st->codec->channels; double diff, avg_diff; int wanted_size, min_size, max_size, nb_samples; ref_clock = get_master_clock(is); diff = get_audio_clock(is) - ref_clock; if(diff < AV_NOSYNC_THRESHOLD) { // accumulate the diffs is->audio_diff_cum = diff + is->audio_diff_avg_coef* is->audio_diff_cum; if(is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { is->audio_diff_avg_count++; } else { avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef); } } else { is->audio_diff_avg_count = 0; is->audio_diff_cum = 0; } } return samples_size; }
/* return the new audio buffer size (samples can be added or deleted to get better sync if video or external master clock) */ static int synchronize_audio(FFMovie *movie, short *samples, int samples_size1, double pts) { /*SDL AUDIO THREAD*/ int n, samples_size; double ref_clock; double diff, avg_diff; int wanted_size, min_size, max_size, nb_samples; n = 2 * movie->audio_st->codec.channels; samples_size = samples_size1; /* try to remove or add samples to correct the clock */ ref_clock = get_master_clock(movie); diff = get_audio_clock(movie) - ref_clock; if (diff < AV_NOSYNC_THRESHOLD) { movie->audio_diff_cum = diff + movie->audio_diff_avg_coef * movie->audio_diff_cum; if (movie->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { /* not enough measures to have a correct estimate */ movie->audio_diff_avg_count++; } else { /* estimate the A-V difference */ avg_diff = movie->audio_diff_cum * (1.0 - movie->audio_diff_avg_coef); if (fabs(avg_diff) >= movie->audio_diff_threshold) { wanted_size = samples_size + ((int)(diff * movie->audio_st->codec.sample_rate) * n); nb_samples = samples_size / n; min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n; max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n; if (wanted_size < min_size) wanted_size = min_size; else if (wanted_size > max_size) wanted_size = max_size; /* add or remove samples to correction the synchro */ if (wanted_size < samples_size) { /* remove samples */ samples_size = wanted_size; } else if (wanted_size > samples_size) { uint8_t *samples_end, *q; int nb; /* add samples */ nb = (samples_size - wanted_size); samples_end = (uint8_t *)samples + samples_size - n; q = samples_end + n; while (nb > 0) { memcpy(q, samples_end, n); q += n; nb -= n; } samples_size = wanted_size; } } } } else { /* too big difference : may be initial PTS errors, so reset A-V filter */ movie->audio_diff_avg_count = 0; movie->audio_diff_cum = 0; } return samples_size; }
int main(int argc, char *argv[]) { SDL_Event event; VideoState *is; int i; puts("start"); global_mutex_lock = SDL_CreateMutex(); is = av_mallocz(sizeof(VideoState)); if (argc < 2){ fprintf(stderr, "Usage: test <file>\n"); exit(1); } av_register_all(); // Register all formats and codecs puts("avregister"); //if (av_register_protocol(&e2URLProtocol) < 0){ // printf("Error - URL protocol \n"); // exit(-1) //} if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)){ fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } for(i=0; i<MAX_CHANNELS;i++){ // Make a screen to put our video #ifndef __DARWIN__ screen[i] = SDL_SetVideoMode(640, 480, 0, 0); #else screen[i] = SDL_SetVideoMode(640, 480, 24, 0); #endif if (!screen[i]){ fprintf(stderr, "SDL: could not set video mode - exiting\n"); exit(1); } } for(i=0; i<MAX_CHANNELS;i++){ global_video_state[i] = av_mallocz(sizeof(VideoState)); global_video_state[i]->videoIndex = i; puts("screen created"); printf("i is: %d\n",i); av_strlcpy(global_video_state[i]->filename, argv[i+1], sizeof(global_video_state[i]->filename)); puts("avstrlcpy"); global_video_state[i]->pictq_mutex = SDL_CreateMutex(); global_video_state[i]->pictq_cond = SDL_CreateCond(); schedule_refresh(global_video_state[i], 40); global_video_state[i]->av_sync_type = DEFAULT_AV_SYNC_TYPE; global_video_state[i]->parse_tid = SDL_CreateThread(decode_thread, global_video_state[i]); puts("main var created"); if (!global_video_state[i]->parse_tid) { av_free(global_video_state[i]); return -1; } } av_init_packet(&f_pkt); puts("av_init_packet"); f_pkt.data = (unsigned char*)"FLUSH"; for (;;) { double inc , pos; SDL_WaitEvent(&event); switch (event.type) { case SDL_KEYDOWN: switch (event.key.keysym.sym) { case SDLK_LEFT: inc = -10.0; goto do_seek; case SDLK_RIGHT: inc = 10.0; goto do_seek; case SDLK_UP: inc = 60.0; goto do_seek; case SDLK_DOWN: inc = -60.0; goto do_seek; do_seek: SDL_LockMutex(global_mutex_lock); if (global_video_state[global_videoIndex]){ pos = get_master_clock(global_video_state[global_videoIndex]); pos += inc; stream_seek(global_video_state[global_videoIndex],(int64_t)(pos *AV_TIME_BASE),inc); } SDL_UnlockMutex(global_mutex_lock); break; case SDLK_b: global_video_state[global_videoIndex]->color_req = 'b'; break; case SDLK_r: global_video_state[global_videoIndex]->color_req = 'r'; break; case SDLK_g: global_video_state[global_videoIndex]->color_req = 'g'; break; case SDLK_w: global_video_state[global_videoIndex]->color_req = 'w'; break; case SDLK_n: global_video_state[global_videoIndex]->color_req = 'n'; break; case SDLK_1: change_channel(1); break; case SDLK_2: change_channel(2); break; case SDLK_3: change_channel(3); break; case SDLK_4: change_vidchannel(1); break; case SDLK_5: change_vidchannel(2); break; case SDLK_6: change_vidchannel(3); break; case SDLK_7: change_audchannel(1); break; case SDLK_8: change_audchannel(2); break; case SDLK_9: change_audchannel(3); break; default: break; } break; case FF_QUIT_EVENT: case SDL_QUIT: for(i=0; i<MAX_CHANNELS; i++){ global_video_state[i]->quit = 1; SDL_CondSignal(global_video_state[i]->audioq.cond); SDL_CondSignal(global_video_state[i]->videoq.cond); } SDL_Quit(); exit(0); break; case FF_ALLOC_EVENT: alloc_picture(event.user.data1); break; case FF_REFRESH_EVENT: video_refresh_timer(event.user.data1); break; default: break; } } return 0; }
int decode_thread(void *arg) { VideoState *is = (VideoState *)arg; AVFormatContext *pFormatCtx = NULL; AVPacket pkt1, *packet = &pkt1; AVDictionary *io_dict = NULL; AVIOInterruptCB callback; int video_index = -1; int audio_index = -1; int i; is->videoStream = -1; is->audioStream = -1; is->audio_need_resample = 0; global_video_state = is; // will interrupt blocking functions if we quit! callback.callback = decode_interrupt_cb; callback.opaque = is; if(avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) { fprintf(stderr, "Unable to open I/O for %s\n", is->filename); return -1; } // Open video file if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL) != 0) { return -1; // Couldn't open file } is->pFormatCtx = pFormatCtx; // Retrieve stream information if(avformat_find_stream_info(pFormatCtx, NULL) < 0) { return -1; // Couldn't find stream information } // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, is->filename, 0); // Find the first video stream for(i = 0; i < pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0) { video_index = i; } if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0) { audio_index = i; } } if(audio_index >= 0) { stream_component_open(is, audio_index); } if(video_index >= 0) { stream_component_open(is, video_index); } if(is->videoStream < 0 && is->audioStream < 0) { fprintf(stderr, "%s: could not open codecs\n", is->filename); goto fail; } #ifdef __RESAMPLER__ if( audio_index >= 0 && pFormatCtx->streams[audio_index]->codec->sample_fmt != AV_SAMPLE_FMT_S16) { is->audio_need_resample = 1; is->pResampledOut = NULL; is->pSwrCtx = NULL; printf("Configure resampler: "); #ifdef __LIBAVRESAMPLE__ printf("libAvResample\n"); is->pSwrCtx = avresample_alloc_context(); #endif #ifdef __LIBSWRESAMPLE__ printf("libSwResample\n"); is->pSwrCtx = swr_alloc(); #endif // Some MP3/WAV don't tell this so make assumtion that // They are stereo not 5.1 if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0 && pFormatCtx->streams[audio_index]->codec->channels == 2) { pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO; } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0 && pFormatCtx->streams[audio_index]->codec->channels == 1) { pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_MONO; } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0 && pFormatCtx->streams[audio_index]->codec->channels == 0) { pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO; pFormatCtx->streams[audio_index]->codec->channels = 2; } av_opt_set_int(is->pSwrCtx, "in_channel_layout", pFormatCtx->streams[audio_index]->codec->channel_layout, 0); av_opt_set_int(is->pSwrCtx, "in_sample_fmt", pFormatCtx->streams[audio_index]->codec->sample_fmt, 0); av_opt_set_int(is->pSwrCtx, "in_sample_rate", pFormatCtx->streams[audio_index]->codec->sample_rate, 0); av_opt_set_int(is->pSwrCtx, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(is->pSwrCtx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int(is->pSwrCtx, "out_sample_rate", 44100, 0); #ifdef __LIBAVRESAMPLE__ if (avresample_open(is->pSwrCtx) < 0) { #else if (swr_init(is->pSwrCtx) < 0) { #endif fprintf(stderr, " ERROR!! From Samplert: %d Hz Sample format: %s\n", pFormatCtx->streams[audio_index]->codec->sample_rate, av_get_sample_fmt_name(pFormatCtx->streams[audio_index]->codec->sample_fmt)); fprintf(stderr, " To 44100 Sample format: s16\n"); is->audio_need_resample = 0; is->pSwrCtx = NULL;; } } #endif // main decode loop for(;;) { if(is->quit) { break; } // seek stuff goes here if(is->seek_req) { int stream_index = -1; int64_t seek_target = is->seek_pos; if(is->videoStream >= 0) { stream_index = is->videoStream; } else if(is->audioStream >= 0) { stream_index = is->audioStream; } if(stream_index >= 0) { seek_target = av_rescale_q(seek_target, AV_TIME_BASE_Q, pFormatCtx->streams[stream_index]->time_base); } if(av_seek_frame(is->pFormatCtx, stream_index, seek_target, is->seek_flags) < 0) { fprintf(stderr, "%s: error while seeking\n", is->pFormatCtx->filename); } else { if(is->audioStream >= 0) { packet_queue_flush(&is->audioq); packet_queue_put(&is->audioq, &flush_pkt); } if(is->videoStream >= 0) { packet_queue_flush(&is->videoq); packet_queue_put(&is->videoq, &flush_pkt); } } is->seek_req = 0; } if(is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE) { SDL_Delay(10); continue; } if(av_read_frame(is->pFormatCtx, packet) < 0) { if(is->pFormatCtx->pb->error == 0) { SDL_Delay(100); /* no error; wait for user input */ continue; } else { break; } } // Is this a packet from the video stream? if(packet->stream_index == is->videoStream) { packet_queue_put(&is->videoq, packet); } else if(packet->stream_index == is->audioStream) { packet_queue_put(&is->audioq, packet); } else { av_free_packet(packet); } } /* all done - wait for it */ while(!is->quit) { SDL_Delay(100); } fail: { SDL_Event event; event.type = FF_QUIT_EVENT; event.user.data1 = is; SDL_PushEvent(&event); } return 0; } void stream_seek(VideoState *is, int64_t pos, int rel) { if(!is->seek_req) { is->seek_pos = pos; is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0; is->seek_req = 1; } } int main(int argc, char *argv[]) { SDL_Event event; //double pts; VideoState *is; is = av_mallocz(sizeof(VideoState)); if(argc < 2) { fprintf(stderr, "Usage: test <file>\n"); exit(1); } // Register all formats and codecs av_register_all(); if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } // Make a screen to put our video #ifndef __DARWIN__ screen = SDL_SetVideoMode(640, 480, 0, 0); #else screen = SDL_SetVideoMode(640, 480, 24, 0); #endif if(!screen) { fprintf(stderr, "SDL: could not set video mode - exiting\n"); exit(1); } av_strlcpy(is->filename, argv[1], 1024); is->pictq_mutex = SDL_CreateMutex(); is->pictq_cond = SDL_CreateCond(); schedule_refresh(is, 40); is->av_sync_type = DEFAULT_AV_SYNC_TYPE; is->parse_tid = SDL_CreateThread(decode_thread, is); if(!is->parse_tid) { av_free(is); return -1; } av_init_packet(&flush_pkt); flush_pkt.data = (unsigned char *)"FLUSH"; for(;;) { double incr, pos; SDL_WaitEvent(&event); switch(event.type) { case SDL_KEYDOWN: switch(event.key.keysym.sym) { case SDLK_LEFT: incr = -10.0; goto do_seek; case SDLK_RIGHT: incr = 10.0; goto do_seek; case SDLK_UP: incr = 60.0; goto do_seek; case SDLK_DOWN: incr = -60.0; goto do_seek; do_seek: if(global_video_state) { pos = get_master_clock(global_video_state); pos += incr; stream_seek(global_video_state, (int64_t)(pos * AV_TIME_BASE), incr); } break; default: break; } break; case FF_QUIT_EVENT: case SDL_QUIT: is->quit = 1; /* * If the video has finished playing, then both the picture and * audio queues are waiting for more data. Make them stop * waiting and terminate normally. */ SDL_CondSignal(is->audioq.cond); SDL_CondSignal(is->videoq.cond); SDL_Quit(); exit(0); break; case FF_ALLOC_EVENT: alloc_picture(event.user.data1); break; case FF_REFRESH_EVENT: video_refresh_timer(event.user.data1); break; default: break; } } return 0; }
static void *decode_thread(ALLEGRO_THREAD *t, void *arg) { VideoState *is = (VideoState *) arg; AVFormatContext *format_context = is->format_context; AVPacket pkt1, *packet = &pkt1; is->videoStream = -1; is->audioStream = -1; if (is->audio_index >= 0) { stream_component_open(is, is->audio_index); } if (is->video_index >= 0) { stream_component_open(is, is->video_index); } if (is->videoStream < 0 && is->audioStream < 0) { ALLEGRO_ERROR("%s: could not open codecs\n", is->filename); goto fail; } for (;;) { if (is->quit) { break; } if (is->seek_req) { int stream_index = -1; int64_t seek_target = is->seek_pos; if (is->videoStream >= 0) stream_index = is->videoStream; else if (is->audioStream >= 0) stream_index = is->audioStream; if (stream_index >= 0) { seek_target = av_rescale_q(seek_target, AV_TIME_BASE_Q, format_context->streams[stream_index]->time_base); } if (av_seek_frame(is->format_context, stream_index, seek_target, is->seek_flags) < 0) { ALLEGRO_WARN("%s: error while seeking (%d, %lu)\n", is->format_context->filename, stream_index, seek_target); } else { if (is->audioStream >= 0) { packet_queue_flush(&is->audioq); packet_queue_put(&is->audioq, &flush_pkt); } if (is->videoStream >= 0) { packet_queue_flush(&is->videoq); packet_queue_put(&is->videoq, &flush_pkt); } } is->seek_req = 0; is->after_seek_sync = true; } if (is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE) { al_rest(0.01); continue; } if (av_read_frame(is->format_context, packet) < 0) { #ifdef FFMPEG_0_8 if (!format_context->pb->eof_reached && !format_context->pb->error) { #else if (url_ferror((void *)&format_context->pb) == 0) { #endif al_rest(0.01); continue; } else { break; } } // Is this a packet from the video stream? if (packet->stream_index == is->videoStream) { packet_queue_put(&is->videoq, packet); } else if (packet->stream_index == is->audioStream) { packet_queue_put(&is->audioq, packet); } else { av_free_packet(packet); } } /* all done - wait for it */ while (!is->quit) { al_rest(0.1); } fail: return t; } /* We want to be able to send an event to the user exactly at the time * a new video frame should be displayed. */ static void *timer_thread(ALLEGRO_THREAD *t, void *arg) { VideoState *is = (VideoState *) arg; double ot = 0, nt = 0; while (!is->quit) { ALLEGRO_EVENT event; double d; /* Wait here until someone signals to us when a new frame was * scheduled at is->show_next. */ al_lock_mutex(is->timer_mutex); al_wait_cond(is->timer_cond, is->timer_mutex); al_unlock_mutex(is->timer_mutex); if (is->quit) break; /* Wait until that time. This wait is why we have our own thread * here so the user doesn't need to do it. */ while (1) { d = is->show_next - get_master_clock(is); if (d <= 0) break; //printf("waiting %4.1f ms\n", d * 1000); al_rest(d); } nt = get_master_clock(is); //printf("event after %4.1f ms\n", (nt - ot) * 1000); ot = nt; /* Now is the time. */ event.type = ALLEGRO_EVENT_VIDEO_FRAME_SHOW; event.user.data1 = (intptr_t)is->video; al_emit_user_event(&is->video->es, &event, NULL); } return t; }
static void video_refresh_timer(void *userdata) { VideoState *is = (VideoState *) userdata; VideoPicture *vp; double actual_delay, delay, sync_threshold, ref_clock, diff; if (!is->first) return; if (!is->video_st) return; if (is->pictq_size == 0) return; if (is->paused) return; if (get_master_clock(is) < is->show_next) return; vp = &is->pictq[is->pictq_rindex]; is->video_current_pts = vp->pts; is->video_current_pts_time = av_gettime(); delay = vp->pts - is->frame_last_pts; /* the pts from last time */ if (delay <= 0 || delay >= 1.0) { /* if incorrect delay, use previous one */ delay = is->frame_last_delay; } /* save for next time */ is->frame_last_delay = delay; is->frame_last_pts = vp->pts; /* update delay to sync to audio if not master source */ if (is->av_sync_type != AV_SYNC_VIDEO_MASTER) { ref_clock = get_master_clock(is); diff = vp->pts - ref_clock; /* Skip or repeat the frame. Take delay into account */ sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; if (fabs(diff) < AV_NOSYNC_THRESHOLD) { if (diff <= -sync_threshold) { delay = 0; } else if (diff >= sync_threshold) { delay = 2 * delay; } } } is->frame_timer += delay; /* computer the REAL delay */ actual_delay = is->frame_timer - (av_gettime() / 1000000.0); if (!vp->dropped && vp->bmp) { is->video->current_frame = vp->bmp; /* Can be NULL or wrong size, will be (re-)allocated as needed. */ vp->bmp = is->shown.bmp; /* That way it won't be overwritten. */ is->shown.bmp = is->video->current_frame; is->video->position = get_master_clock(is); is->video->video_position = get_video_clock(is); is->video->audio_position = get_audio_clock(is); is->show_next = is->video->position + actual_delay; al_signal_cond(is->timer_cond); if (is->video_st->codec->sample_aspect_ratio.num == 0) { is->video->aspect_ratio = 0; } else { is->video->aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio) * is->video_st->codec->width / is->video_st->codec->height; } if (is->video->aspect_ratio <= 0.0) { is->video->aspect_ratio = (float)is->video_st->codec->width / (float)is->video_st->codec->height; } } else is->dropped_count++; //printf("[%d] %f %s\n", is->pictq_rindex, // actual_delay, vp->dropped ? "dropped" : "shown"); /* update queue for next picture! */ if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { is->pictq_rindex = 0; } al_lock_mutex(is->pictq_mutex); is->pictq_size--; al_signal_cond(is->pictq_cond); al_unlock_mutex(is->pictq_mutex); /* We skipped a frame... let's grab more until we catch up. */ if (actual_delay < 0) video_refresh_timer(userdata); }
/* Add or subtract samples to get a better sync, return new audio buffer size */ static int synchronize_audio(VideoState * is, short *samples, int samples_size, double pts) { int n; double ref_clock; (void)pts; double diff; if (is->after_seek_sync) { /* Audio seems to be off for me after seeking, but skipping * video is less annoying than audio noise after the seek * when synching to the external clock. */ is->external_clock_start = av_gettime() - get_audio_clock(is) * 1000000.0; is->after_seek_sync = false; } n = 2 * is->audio_st->codec->channels; if (is->av_sync_type != AV_SYNC_AUDIO_MASTER) { double avg_diff; int wanted_size, min_size, max_size; ref_clock = get_master_clock(is); diff = get_audio_clock(is) - ref_clock; //printf("%f, %f\n", diff, get_audio_clock(is)); if (diff < AV_NOSYNC_THRESHOLD) { // accumulate the diffs is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum; if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { is->audio_diff_avg_count++; } else { avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef); if (fabs(avg_diff) >= is->audio_diff_threshold) { //printf("AV_NOSYNC_THRESHOLD %f\n", avg_diff); wanted_size = samples_size + ((int)(avg_diff * is->audio_st->codec->sample_rate) * n); min_size = samples_size * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100; min_size &= ~3; max_size = samples_size * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100; max_size &= ~3; if (wanted_size < min_size) { wanted_size = min_size; } else if (wanted_size > max_size) { wanted_size = max_size; } if (wanted_size < samples_size) { /* remove samples */ samples_size = wanted_size; } else if (wanted_size > samples_size) { uint8_t *samples_end, *q; int nb; /* add samples by copying final sample */ nb = (samples_size - wanted_size); samples_end = (uint8_t *) samples + samples_size - n; q = samples_end + n; while (nb > 0) { memcpy(q, samples_end, n); q += n; nb -= n; } samples_size = wanted_size; } } } } else { /* difference is TOO big; reset diff stuff */ is->audio_diff_avg_count = 0; is->audio_diff_cum = 0; } } return samples_size; }
int main(int argc, char *argv[]) { SDL_Event event; double pts; VideoState *is; is = av_mallocz(sizeof(VideoState)); if(argc < 2) { fprintf(stderr, "Usage: test <file>\n"); exit(1); } // Register all formats and codecs av_register_all(); if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } // Make a screen to put our video #ifndef __DARWIN__ screen = SDL_SetVideoMode(640, 480, 0, 0); #else screen = SDL_SetVideoMode(640, 480, 24, 0); #endif if(!screen) { fprintf(stderr, "SDL: could not set video mode - exiting\n"); exit(1); } pstrcpy(is->filename, sizeof(is->filename), argv[1]); is->pictq_mutex = SDL_CreateMutex(); is->pictq_cond = SDL_CreateCond(); schedule_refresh(is, 40); is->av_sync_type = DEFAULT_AV_SYNC_TYPE; is->parse_tid = SDL_CreateThread(decode_thread, is); if(!is->parse_tid) { av_free(is); return -1; } av_init_packet(&flush_pkt); flush_pkt.data = "FLUSH"; for(;;) { double incr, pos; SDL_WaitEvent(&event); switch(event.type) { case SDL_KEYDOWN: switch(event.key.keysym.sym) { case SDLK_LEFT: incr = -10.0; goto do_seek; case SDLK_RIGHT: incr = 10.0; goto do_seek; case SDLK_UP: incr = 60.0; goto do_seek; case SDLK_DOWN: incr = -60.0; goto do_seek; do_seek: if(global_video_state) { pos = get_master_clock(global_video_state); pos += incr; stream_seek(global_video_state, (int64_t)(pos * AV_TIME_BASE), incr); } break; default: break; } break; case FF_QUIT_EVENT: case SDL_QUIT: is->quit = 1; SDL_Quit(); exit(0); break; case FF_ALLOC_EVENT: alloc_picture(event.user.data1); break; case FF_REFRESH_EVENT: video_refresh_timer(event.user.data1); break; default: break; } } return 0; }
void video_display(VideoState *is) { SDLMOD_Rect rect; VideoPicture *vp; AVPicture pict; float aspect_ratio; int w, h, x, y; int i; vp = &is->pictq[is->pictq_rindex]; #if !USE_SWS_YUV_CONVERT if(vp->bmp) { #else if(vp->sfc) { #endif if(is->video_st->codec->sample_aspect_ratio.num == 0) { aspect_ratio = 0; } else { aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio) * is->video_st->codec->width / is->video_st->codec->height; } if(aspect_ratio <= 0.0) aspect_ratio = (float)is->video_st->codec->width / (float)is->video_st->codec->height; h = screen->h; w = ((int)(h * aspect_ratio)) & -3; if(w > screen->w) { w = screen->w; h = ((int)(w / aspect_ratio)) & -3; } x = (screen->w - w) / 2; y = (screen->h - h) / 2; rect.x = x; rect.y = y; rect.w = w; rect.h = h; #if !USE_SWS_YUV_CONVERT SDLMOD_DisplayYUVOverlay(vp->bmp, &rect); #else if (vp->sfc->w > 0 && vp->sfc->h > 0 && rect.w > 0 && rect.h > 0) { SDLMOD_Rect srcrect; srcrect.x = 0; srcrect.y = 0; srcrect.w = vp->sfc->w; srcrect.h = vp->sfc->h; SDLMOD_LockSurface(screen); //FIXME: SoftStretch doesn't support empty rect (dstrect->h == 0), will crash. SDLMOD_SoftStretch(vp->sfc, &srcrect, screen, &rect); SDLMOD_UnlockSurface(screen); } #endif #if USE_GL glutPostRedisplay(); #endif } } void stream_seek(VideoState *is, int64_t pos, int rel) { if(!is->seek_req) { is->seek_pos = pos; is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0; is->seek_req = 1; } } void video_refresh_timer(void *userdata) { VideoState *is = (VideoState*)userdata; VideoPicture *vp; double actual_delay, delay, sync_threshold, ref_clock, diff; if(is->video_st) { if(is->pictq_size == 0) { schedule_refresh(is, 1); } else { vp = &is->pictq[is->pictq_rindex]; is->video_current_pts = vp->pts; is->video_current_pts_time = av_gettime(); delay = vp->pts - is->frame_last_pts; if(delay <= 0 || delay >= 1.0) delay = is->frame_last_delay; is->frame_last_delay = delay; is->frame_last_pts = vp->pts; if(is->av_sync_type != AV_SYNC_VIDEO_MASTER) { ref_clock = get_master_clock(is); diff = vp->pts - ref_clock; sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; if(fabs(diff) < AV_NOSYNC_THRESHOLD) { if(diff <= -sync_threshold) { delay = 0; } else if(diff >= sync_threshold) { delay = 2 * delay; } } } is->frame_timer += delay; actual_delay = is->frame_timer - (av_gettime() / 1000000.0); if(actual_delay < 0.010) actual_delay = 0.010; schedule_refresh(is, (int)(actual_delay * 1000 + 0.5)); video_display(is); if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) is->pictq_rindex = 0; pthread_mutex_lock(is->pictq_mutex); is->pictq_size--; //printf("video_refresh_timer signal %d\n", is->pictq_size); pthread_cond_signal(is->pictq_cond); pthread_mutex_unlock(is->pictq_mutex); } } else schedule_refresh(is, 100); }
void *event_loop(void *data) { SDLMOD_Event event; VideoState *is; is = (VideoState *)data; for(;;) { double incr, pos; SDLMOD_WaitEvent(&event); switch(event.type) { #if 0 case SDL_VIDEORESIZE: g_video_width = event.resize.w; g_video_height = event.resize.h; g_video_resized = 1; break; case SDL_KEYDOWN: switch(event.key.keysym.sym) { case SDLK_LEFT: incr = -1.0; goto do_seek; case SDLK_RIGHT: incr = 1.0; goto do_seek; case SDLK_UP: incr = 6.0; goto do_seek; case SDLK_DOWN: incr = -6.0; goto do_seek; do_seek: if(global_video_state) { /* 获取当前播放位置 */ pos = get_master_clock(global_video_state); pos += incr; stream_seek(global_video_state, (int64_t)(pos * AV_TIME_BASE), incr); } default: break; } break; #endif case FF_QUIT_EVENT: case SDLMOD_QUIT: SDLMOD_TimerQuit(); //FIXME: no refresh op SDLMOD_StopEventLoop(); is->quit = 1; exit(0); break; case FF_ALLOC_EVENT: //printf("FF_ALLOC_EVENT begin\n"); alloc_picture(event.user.data1); //printf("FF_ALLOC_EVENT end\n"); break; case FF_REFRESH_EVENT: //printf("FF_REFRESH_EVENT begin\n"); video_refresh_timer(event.user.data1); //printf("FF_REFRESH_EVENT end\n"); break; default: printf("event.type == %d\n", event.type); break; } } }
void *VideoLayer::feed() { int got_picture=0; int len1=0 ; int ret=0; bool got_it=false; double now = get_master_clock(); if(paused) return rgba_picture->data[0]; /** * follow user video loop */ if(mark_in!=NO_MARK && mark_out!=NO_MARK && seekable) { if (now >= mark_out) seek((int64_t)mark_in * AV_TIME_BASE); } // operate seek if was requested if(to_seek>=0) { seek(to_seek); to_seek = -1; } got_it=false; while (!got_it) { if(packet_len<=0) { /** * Read one packet from the media and put it in pkt */ while(1) { #ifdef DEBUG func("av_read_frame ..."); #endif ret = av_read_frame(avformat_context, &pkt); #ifdef DEBUG if(pkt.stream_index == video_index) std::cout << "video read packet"; else if(pkt.stream_index == audio_index) std::cout << "audio read packet"; std::cout << " pkt.data=" << pkt.data; std::cout << " pkt.size=" << pkt.size; std::cout << " pkt.pts/dts=" << pkt.pts << "/" << pkt.dts << std::endl; std::cout << "pkt.duration=" << pkt.duration; std::cout << " avformat_context->start_time=" << avformat_context->start_time; std::cout << " avformat_context->duration=" << avformat_context->duration/AV_TIME_BASE << std::endl; std::cout << "avformat_context->duration=" << avformat_context->duration << std::endl; #endif /* TODO(shammash): this may be good for streams but breaks * looping in files, needs fixing. */ // if(!pkt.duration) continue; // if(!pkt.size || !pkt.data) { // return NULL; // } /** * check eof and loop */ if(ret!= 0) { //does not enter if data are available eos->notify(); // eos->dispatcher->do_jobs(); /// XXX hack hack hack ret = seek(avformat_context->start_time); if (ret < 0) { error("VideoLayer::could not loop file"); return rgba_picture->data[0]; } continue; } else if( (pkt.stream_index == video_index) || (pkt.stream_index == audio_index) ) break; /* exit loop */ } } // loop break after a known index is found frame_number++; //std::cout << "frame_number :" << frame_number << std::endl; /** * Decode video */ if(pkt.stream_index == video_index) { len1 = decode_video_packet(&got_picture); AVFrame *yuv_picture=&av_frame; if(len1<0) { // error("VideoLayer::Error while decoding frame"); func("one frame only?"); return NULL; } else if (len1 == 0) { packet_len=0; return NULL; } /** * We've found a picture */ ptr += len1; packet_len -= len1; if (got_picture!=0) { got_it=true; avformat_stream=avformat_context->streams[video_index]; /** Deinterlace input if requested */ if(deinterlaced) deinterlace((AVPicture *)yuv_picture); #ifdef WITH_SWSCALE sws_scale(img_convert_ctx, yuv_picture->data, yuv_picture->linesize, 0, video_codec_ctx->height, rgba_picture->data, rgba_picture->linesize); #else /** * yuv2rgb */ img_convert(rgba_picture, PIX_FMT_RGB32, (AVPicture *)yuv_picture, video_codec_ctx->pix_fmt, //avformat_stream.codec->pix_fmt, video_codec_ctx->width, video_codec_ctx->height); #endif // memcpy(frame_fifo.picture[fifo_position % FIFO_SIZE]->data[0],rgba_picture->data[0],geo.size); /* TODO move */ if(fifo_position == FIFO_SIZE) fifo_position=0; /* workaround since sws_scale conversion from YUV returns an buffer RGBA with alpha set to 0x0 */ { register int bufsize = ( rgba_picture->linesize[0] * video_codec_ctx->height ) /4; int32_t *pbuf = (int32_t*)rgba_picture->data[0]; for(; bufsize>0; bufsize--) { *pbuf = (*pbuf | alpha_bitmask); pbuf++; } } jmemcpy(frame_fifo.picture[fifo_position]->data[0], rgba_picture->data[0], rgba_picture->linesize[0] * video_codec_ctx->height); // avpicture_get_size(PIX_FMT_RGBA32, enc->width, enc->height)); fifo_position++; } } // end video packet decoding //////////////////////// // audio packet decoding else if(pkt.stream_index == audio_index) { // XXX(shammash): audio decoding seems to depend on screen properties, so // we skip decoding audio frames if there's no screen // long unsigned int m_SampleRate = screen->m_SampleRate?*(screen->m_SampleRate):48000; // ringbuffer_write(screen->audio, (const char*)audio_float_buf, samples*sizeof(float)); // ... and so on ... if(use_audio && screen) { int data_size; len1 = decode_audio_packet(&data_size); if (len1 > 0) { int samples = data_size/sizeof(uint16_t); long unsigned int m_SampleRate = screen->m_SampleRate?*(screen->m_SampleRate):48000; double m_ResampleRatio = (double)(m_SampleRate)/(double)audio_samplerate; long unsigned max_buf = ceil(AVCODEC_MAX_AUDIO_FRAME_SIZE * m_ResampleRatio * audio_channels); if (audio_resampled_buf_len < max_buf) { if (audio_resampled_buf) free (audio_resampled_buf); audio_resampled_buf = (float*) malloc(max_buf * sizeof(float)); audio_resampled_buf_len = max_buf; } src_short_to_float_array ((const short*) audio_buf, audio_float_buf, samples); if (m_ResampleRatio == 1.0) { ringbuffer_write(screen->audio, (const char*)audio_float_buf, samples*sizeof(float)); time_t *tm = (time_t *)malloc(sizeof(time_t)); time (tm); // std::cerr << "-- VL:" << asctime(localtime(tm)); } else { src_short_to_float_array ((const short*) audio_buf, audio_float_buf, samples); SRC_DATA src_data; int offset = 0; do { src_data.input_frames = samples/audio_channels; src_data.output_frames = audio_resampled_buf_len/audio_channels - offset; src_data.end_of_input = 0; src_data.src_ratio = m_ResampleRatio; src_data.input_frames_used = 0; src_data.output_frames_gen = 0; src_data.data_in = audio_float_buf + offset; src_data.data_out = audio_resampled_buf + offset; src_simple (&src_data, SRC_SINC_MEDIUM_QUALITY, audio_channels) ; ringbuffer_write(screen->audio, (const char*)audio_resampled_buf, src_data.output_frames_gen * audio_channels *sizeof(float)); offset += src_data.input_frames_used * audio_channels; samples -= src_data.input_frames_used * audio_channels; if (samples>0) warning("resampling left: %i < %i", src_data.input_frames_used, samples/audio_channels); } while (samples > audio_channels); } } } } av_free_packet(&pkt); /* sun's good. love's bad */ } // end of while(!got_it) return frame_fifo.picture[fifo_position-1]->data[0]; }
int main(int argc, char* argv[]) { // SetUnhandledExceptionFilter(callback); SDL_Event event; VideoState* is = NULL; is = (VideoState*) av_mallocz(sizeof(VideoState)); if (argc < 2) { fprintf(stderr, "Usage: test <file>\n"); exit(1); } av_register_all(); if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr, "Could't not initialize SDL - %s\n", SDL_GetError()); exit(1); } screen = SDL_CreateWindow("Hello World", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, WINDOW_WIDTH, WINDOW_HEIGHT, SDL_WINDOW_OPENGL); if (!screen) { printf("Could not initialize SDL -%s\n", SDL_GetError()); return -1; } render = SDL_CreateRenderer(screen, -1, 0); screen_mutex = SDL_CreateMutex(); av_strlcpy(is->filename, argv[1], sizeof(is->filename)); is->pictq_mutex = SDL_CreateMutex(); is->pictq_cond = SDL_CreateCond(); schedule_refresh(is, 40); is->av_sync_type = DEFAULT_AV_SYNC_TYPE; is->parse_tid = SDL_CreateThread(decode_thread, "decode_thread", is); if (!is->parse_tid) { av_free(is); return -1; } av_init_packet(&flush_pkt); flush_pkt.data = (unsigned char*) "FLUSH"; for (;;) { double incr, pos; SDL_WaitEvent(&event); switch (event.type) { case SDL_KEYDOWN: switch (event.key.keysym.sym) { case SDLK_LEFT: incr = -10.0; goto do_seek; case SDLK_RIGHT: incr = 10.0; goto do_seek; case SDLK_UP: incr = 60.0; goto do_seek; case SDLK_DOWN: incr = -60.0; goto do_seek; do_seek: if (global_video_state) { pos = get_master_clock(global_video_state); pos += incr; stream_seek(global_video_state, (int64_t) (pos * AV_TIME_BASE), incr); } break; default: break; } break; case FF_QUIT_EVENT: case SDL_QUIT: is->quit = 1; SDL_Quit(); return 0; break; case FF_REFRESH_EVENT: video_refresh_timer(event.user.data1); break; default: break; } } return 0; }
void ffmovie_setdisplay(FFMovie *movie, SDL_Surface *dest, SDL_Rect *rect) { /*MAIN THREAD*/ if(!movie->video_st || movie->abort_request || movie->context==NULL) { /*This movie has no video stream, or finished*/ return; } SDL_LockMutex(movie->dest_mutex); if(movie->dest_overlay) { /*clean any existing overlay*/ SDL_FreeYUVOverlay(movie->dest_overlay); movie->dest_overlay = NULL; } if(!dest) { /*no destination*/ movie->dest_overlay = NULL; } else { if(rect) { movie->dest_rect.x = rect->x; movie->dest_rect.y = rect->y; movie->dest_rect.w = rect->w; movie->dest_rect.h = rect->h; } else { movie->dest_rect.x = 0; movie->dest_rect.y = 0; movie->dest_rect.w = 0; movie->dest_rect.h = 0; } if(movie->dest_rect.w == 0) { movie->dest_rect.w = MIN(movie->video_st->codec.width, dest->w); } if(movie->dest_rect.h == 0) { movie->dest_rect.h = MIN(movie->video_st->codec.height, dest->h); } #if 0 /* XXX: use generic function */ /* XXX: disable overlay if no hardware acceleration or if RGB format */ switch(movie->video_st->codec.pix_fmt) { case PIX_FMT_YUV420P: case PIX_FMT_YUV422P: case PIX_FMT_YUV444P: case PIX_FMT_YUV422: case PIX_FMT_YUV410P: case PIX_FMT_YUV411P: is_yuv = 1; break; default: is_yuv = 0; break; } #endif movie->dest_surface = dest; movie->dest_overlay = SDL_CreateYUVOverlay( movie->video_st->codec.width, movie->video_st->codec.height, SDL_YV12_OVERLAY, dest); } SDL_UnlockMutex(movie->dest_mutex); /*set display time to now, force redraw*/ movie->dest_showtime = get_master_clock(movie); }
/* Add or subtract samples to get a better sync, return new audio buffer size */ int synchronize_audio(VideoState *is, short *samples, int samples_size, double pts) { int n = 0; double ref_clock = 0; n = 2 * is->audio_st->codec->channels; // If AV_SYNC_AUDIO_MASTER, that means we sync video with audio. // So no need to adjust audio render, just render as samplea rate // and channel count requires. static int syncAudioFlag = 1; if(is->av_sync_type != AV_SYNC_AUDIO_MASTER) { if(1 == syncAudioFlag){ fprintf(stderr, "sync type is not AV_SYNC_AUDIO_MASTER\n"); syncAudioFlag = 0; } double diff, avg_diff; int wanted_size, min_size, max_size /*, nb_samples */; ref_clock = get_master_clock(is); diff = get_audio_clock(is) - ref_clock; if(diff < AV_NOSYNC_THRESHOLD) { // accumulate the diffs is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum; if(is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { is->audio_diff_avg_count++; } else { /* * Please refer to http://dranger.com/ffmpeg/tutorial06.html to understand * this complex code logic. * * We're going to take an average of how far each of those have been out of sync. * So for example, the first call might have shown we were out of sync by 40ms, * the next by 50ms, and so on. But we're not going to take a simple average * because the most recent values are more important than the previous ones. * So we're going to use a fractional coefficient, say c, * and sum the differences like this: diff_sum = new_diff + diff_sum*c. * When we are ready to find the average difference, * we simply calculate avg_diff = diff_sum * (1-c). */ avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef); if(fabs(avg_diff) >= is->audio_diff_threshold) { wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n); min_size = samples_size * ((100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100); max_size = samples_size * ((100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100); if(wanted_size < min_size) { wanted_size = min_size; } else if (wanted_size > max_size) { wanted_size = max_size; } if(wanted_size < samples_size) { /* remove samples */ samples_size = wanted_size; } else if(wanted_size > samples_size) { uint8_t *samples_end, *q; int nb; /* add samples by copying final sample*/ nb = (samples_size - wanted_size); samples_end = (uint8_t *)samples + samples_size - n; q = samples_end + n; while(nb > 0) { memcpy(q, samples_end, n); q += n; nb -= n; } samples_size = wanted_size; } } } } else { /* difference is TOO big; reset diff stuff */ is->audio_diff_avg_count = 0; is->audio_diff_cum = 0; } } return samples_size; }