/* Add or subtract samples to get a better sync, return new audio buffer size */ int synchronize_audio(VideoState *is, short *samples, int samples_size, double pts) { int n; double ref_clock; n = 2 * is->audio_ctx->channels; if(is->av_sync_type != AV_SYNC_AUDIO_MASTER) { double diff, avg_diff; int wanted_size, min_size, max_size /*, nb_samples */; ref_clock = get_master_clock(is); diff = get_audio_clock(is) - ref_clock; if(diff < AV_NOSYNC_THRESHOLD) { // accumulate the diffs is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum; if(is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { is->audio_diff_avg_count++; } else { avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef); if(fabs(avg_diff) >= is->audio_diff_threshold) { wanted_size = samples_size + ((int)(diff * is->audio_ctx->sample_rate) * n); min_size = samples_size * ((100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100); max_size = samples_size * ((100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100); if(wanted_size < min_size) { wanted_size = min_size; } else if (wanted_size > max_size) { wanted_size = max_size; } if(wanted_size < samples_size) { /* remove samples */ samples_size = wanted_size; } else if(wanted_size > samples_size) { uint8_t *samples_end, *q; int nb; /* add samples by copying final sample*/ nb = (samples_size - wanted_size); samples_end = (uint8_t *)samples + samples_size - n; q = samples_end + n; while(nb > 0) { memcpy(q, samples_end, n); q += n; nb -= n; } samples_size = wanted_size; } } } } else { /* difference is TOO big; reset diff stuff */ is->audio_diff_avg_count = 0; is->audio_diff_cum = 0; } } return samples_size; }
void video_refresh_timer(void *userdata) { VideoState *is = (VideoState *)userdata; VideoPicture *vp; double actual_delay, delay, sync_threshold, ref_clock, diff; if(is->video_st) { if(is->pictq_size == 0) { schedule_refresh(is, 1); } else { vp = &is->pictq[is->pictq_rindex]; delay = vp->pts - is->frame_last_pts; /* the pts from last time */ if(delay <= 0 || delay >= 1.0) { /* if incorrect delay, use previous one */ delay = is->frame_last_delay; } /* save for next time */ is->frame_last_delay = delay; is->frame_last_pts = vp->pts; /* update delay to sync to audio */ ref_clock = get_audio_clock(is); diff = vp->pts - ref_clock; /* Skip or repeat the frame. Take delay into account FFPlay still doesn't "know if this is the best guess." */ sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; if(fabs(diff) < AV_NOSYNC_THRESHOLD) { if(diff <= -sync_threshold) { delay = 0; } else if(diff >= sync_threshold) { delay = 2 * delay; } } is->frame_timer += delay; /* computer the REAL delay */ actual_delay = is->frame_timer - (av_gettime() / 1000000.0); if(actual_delay < 0.010) { /* Really it should skip the picture instead */ actual_delay = 0.010; } schedule_refresh(is, (int)(actual_delay * 1000 + 0.5)); /* show the picture! */ video_display(is); /* update queue for next picture! */ if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { is->pictq_rindex = 0; } SDL_LockMutex(is->pictq_mutex); is->pictq_size--; SDL_CondSignal(is->pictq_cond); SDL_UnlockMutex(is->pictq_mutex); } } else { schedule_refresh(is, 100); } }
double video_master_clock( void ) { if( audio_codec_context ) /* Sync video to audio, if we have it */ return get_audio_clock(); else /* Otherwise, sync to the video clock (based on frame rate) */ return video_clock; }
double get_master_clock(VideoState *is) { if(is->av_sync_type == AV_SYNC_VIDEO_MASTER) { return get_video_clock(is); } else if(is->av_sync_type == AV_SYNC_AUDIO_MASTER) { return get_audio_clock(is); } else { return get_external_clock(is); } }
/* 每次timer到时间会进来(timer到时间发 FF_REFRESH_EVENT,收到 FF_REFRESH_EVENT 会进来) 一个timer只进一次timer就失效了。不过本函数里面会再起一个timer。 从is->pictq拿出一个 VideoPicture 进行显示,然后pictq的读指针向前移动一步 */ void video_refresh_timer(void *userdata) { VideoState *is = (VideoState *)userdata; VideoPicture *vp; double actual_delay, delay, sync_threshold, ref_clock, diff; if (is->video_st) { if (is->pictq_size == 0) { schedule_refresh(is, 1); } else { vp = &is->pictq[is->pictq_rindex]; delay = vp->pts - is->frame_last_pts; /* the pts from last time */ is->frame_last_pts = vp->pts; /* ----------- */ /*音视频同步*/ ref_clock = get_audio_clock(is); diff = vp->pts - ref_clock; if (diff <= -0.015) { delay = 0; } else if (diff >= 0.015) { delay = 2 * delay; } /* ----------- */ if (delay == 0) { count_delay_is_zero++; delay = 0.010; } count_pict++; printf("delay==0 percentage is %lf",(double)count_delay_is_zero/count_pict); schedule_refresh(is, (int)(delay * 1000 + 0.5)); /* show the picture! */ video_display(is); /* update queue for next picture! */ if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { is->pictq_rindex = 0; } SDL_LockMutex(is->pictq_mutex); is->pictq_size--; SDL_CondSignal(is->pictq_cond); SDL_UnlockMutex(is->pictq_mutex); } } else { schedule_refresh(is, 100); } }
double get_master_clock(VideoState *is) { double masterClock; if(is->av_sync_type == AV_SYNC_VIDEO_MASTER) { masterClock = get_video_clock(is); } else if(is->av_sync_type == AV_SYNC_AUDIO_MASTER) { masterClock = get_audio_clock(is); } else { masterClock = get_external_clock(is); } // fprintf(stderr, "---> get_master_clock: %.8f\n", masterClock); return masterClock; }
int sync_audio(Media *audio, short *samples, int samples_size, double pts) { int n; double ref_clk; n = 2*audio->stream->codec->channels; double diff, avg_diff; int wanted_size, min_size, max_size, nb_samples; ref_clk = get_master_clock(); diff = get_audio_clock(audio) - ref_clk; if(diff < AV_NOSYNC_THRESHOLD) { audio->audio_diff_cum = diff + audio->audio_diff_avg_coef * audio->audio_diff_cum; if(audio->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { audio->audio_diff_avg_count++; } else { avg_diff = audio->audio_diff_cum * (1.0 - audio->audio_diff_avg_coef); if(fabs(avg_diff) >= audio->audio_diff_threshold) { wanted_size = samples_size + ((int)(diff * audio->stream->codec->sample_rate) * n); min_size = samples_size * ((100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100); max_size = samples_size * ((100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100); if(wanted_size < min_size) { wanted_size = min_size; } else if (wanted_size > max_size) { wanted_size = max_size; } if(wanted_size < samples_size) { samples_size = wanted_size; } else if(wanted_size > samples_size) { uint8_t *samples_end, *q; int nb; nb = (samples_size - wanted_size); samples_end = (uint8_t *)samples + samples_size - n; q = samples_end + n; while(nb > 0) { memcpy(q, samples_end, n); q += n; nb -= n; } samples_size = wanted_size; } } } } else { audio->audio_diff_avg_count = 0; audio->audio_diff_cum = 0; } return samples_size; }
int synchronize_audio(VideoState *is, short *samples, int samples_size, double pts) { if(is->av_sync_type != AV_SYNC_AUDIO_MASTER) { int n; double ref_clock; n = 2 * is->audio_st->codec->channels; double diff, avg_diff; int wanted_size, min_size, max_size, nb_samples; ref_clock = get_master_clock(is); diff = get_audio_clock(is) - ref_clock; if(diff < AV_NOSYNC_THRESHOLD) { // accumulate the diffs is->audio_diff_cum = diff + is->audio_diff_avg_coef* is->audio_diff_cum; if(is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { is->audio_diff_avg_count++; } else { avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef); } } else { is->audio_diff_avg_count = 0; is->audio_diff_cum = 0; } } return samples_size; }
/* return the new audio buffer size (samples can be added or deleted to get better sync if video or external master clock) */ static int synchronize_audio(FFMovie *movie, short *samples, int samples_size1, double pts) { /*SDL AUDIO THREAD*/ int n, samples_size; double ref_clock; double diff, avg_diff; int wanted_size, min_size, max_size, nb_samples; n = 2 * movie->audio_st->codec.channels; samples_size = samples_size1; /* try to remove or add samples to correct the clock */ ref_clock = get_master_clock(movie); diff = get_audio_clock(movie) - ref_clock; if (diff < AV_NOSYNC_THRESHOLD) { movie->audio_diff_cum = diff + movie->audio_diff_avg_coef * movie->audio_diff_cum; if (movie->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { /* not enough measures to have a correct estimate */ movie->audio_diff_avg_count++; } else { /* estimate the A-V difference */ avg_diff = movie->audio_diff_cum * (1.0 - movie->audio_diff_avg_coef); if (fabs(avg_diff) >= movie->audio_diff_threshold) { wanted_size = samples_size + ((int)(diff * movie->audio_st->codec.sample_rate) * n); nb_samples = samples_size / n; min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n; max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n; if (wanted_size < min_size) wanted_size = min_size; else if (wanted_size > max_size) wanted_size = max_size; /* add or remove samples to correction the synchro */ if (wanted_size < samples_size) { /* remove samples */ samples_size = wanted_size; } else if (wanted_size > samples_size) { uint8_t *samples_end, *q; int nb; /* add samples */ nb = (samples_size - wanted_size); samples_end = (uint8_t *)samples + samples_size - n; q = samples_end + n; while (nb > 0) { memcpy(q, samples_end, n); q += n; nb -= n; } samples_size = wanted_size; } } } } else { /* too big difference : may be initial PTS errors, so reset A-V filter */ movie->audio_diff_avg_count = 0; movie->audio_diff_cum = 0; } return samples_size; }
void video_refresh_timer(void *userdata) { VideoState *is = (VideoState *)userdata; VideoPicture *vp; double actual_delay, delay, sync_threshold, ref_clock, diff; if(is->video_st) { if(is->pictq_size == 0) { fprintf(stderr, "%s pictq_size is 0, schedule another refresh\n", __FUNCTION__); schedule_refresh(is, 1); } else { vp = &is->pictq[is->pictq_rindex]; delay = vp->pts - is->frame_last_pts; /* the pts from last time */ fprintf(stderr, "delay 1: %.8f\n", delay); if(delay <= 0 || delay >= 1.0) { //larger than 1 seconds or smaller than 0 /* if incorrect delay, use previous one */ delay = is->frame_last_delay; fprintf(stderr, "delay 2: %.8f\n", delay); } /* save for next time */ is->frame_last_delay = delay; is->frame_last_pts = vp->pts; /* update delay to sync to audio */ ref_clock = get_audio_clock(is); diff = vp->pts - ref_clock; fprintf(stderr, "audio video diff: %.8f\n", diff); /* Skip or repeat the frame. Take delay into account FFPlay still doesn't "know if this is the best guess." */ sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; if(fabs(diff) < AV_NOSYNC_THRESHOLD) { if(diff <= -sync_threshold) { delay = 0; } else if(diff >= sync_threshold) { delay = 2 * delay; } } is->frame_timer += delay; /* computer the REAL delay */ actual_delay = is->frame_timer - (av_gettime() / 1000000.0); fprintf(stderr, "actual_delay %.8f\n", actual_delay); if(actual_delay < 0.010) { //smaller than 10 ms /* Really it should skip the picture instead */ actual_delay = 0.010; } // Why add 0.5 here. I see many 0.5 in multimedia framework code, such as stagefright. fprintf(stderr, "%s, delay: %.8f\n", __FUNCTION__, actual_delay*1000+0.5); // Video is faster than audio, so we need a delay render. // after we show a frame, we figure out when the next frame should be shown schedule_refresh(is, (int)(actual_delay * 1000 + 0.5)); /* show the picture! */ video_display(is); fprintf(stderr, "\n---------------------------------------------------------------------\n"); /* update queue for next picture! */ if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { is->pictq_rindex = 0; } SDL_LockMutex(is->pictq_mutex); is->pictq_size--; SDL_CondSignal(is->pictq_cond); SDL_UnlockMutex(is->pictq_mutex); } } else { fprintf(stderr, "%s, schedule_refresh for another 100 ms\n", __FUNCTION__); schedule_refresh(is, 100); } }
static void video_refresh_timer(void *userdata) { VideoState *is = (VideoState *) userdata; VideoPicture *vp; double actual_delay, delay, sync_threshold, ref_clock, diff; if (!is->first) return; if (!is->video_st) return; if (is->pictq_size == 0) return; if (is->paused) return; if (get_master_clock(is) < is->show_next) return; vp = &is->pictq[is->pictq_rindex]; is->video_current_pts = vp->pts; is->video_current_pts_time = av_gettime(); delay = vp->pts - is->frame_last_pts; /* the pts from last time */ if (delay <= 0 || delay >= 1.0) { /* if incorrect delay, use previous one */ delay = is->frame_last_delay; } /* save for next time */ is->frame_last_delay = delay; is->frame_last_pts = vp->pts; /* update delay to sync to audio if not master source */ if (is->av_sync_type != AV_SYNC_VIDEO_MASTER) { ref_clock = get_master_clock(is); diff = vp->pts - ref_clock; /* Skip or repeat the frame. Take delay into account */ sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; if (fabs(diff) < AV_NOSYNC_THRESHOLD) { if (diff <= -sync_threshold) { delay = 0; } else if (diff >= sync_threshold) { delay = 2 * delay; } } } is->frame_timer += delay; /* computer the REAL delay */ actual_delay = is->frame_timer - (av_gettime() / 1000000.0); if (!vp->dropped && vp->bmp) { is->video->current_frame = vp->bmp; /* Can be NULL or wrong size, will be (re-)allocated as needed. */ vp->bmp = is->shown.bmp; /* That way it won't be overwritten. */ is->shown.bmp = is->video->current_frame; is->video->position = get_master_clock(is); is->video->video_position = get_video_clock(is); is->video->audio_position = get_audio_clock(is); is->show_next = is->video->position + actual_delay; al_signal_cond(is->timer_cond); if (is->video_st->codec->sample_aspect_ratio.num == 0) { is->video->aspect_ratio = 0; } else { is->video->aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio) * is->video_st->codec->width / is->video_st->codec->height; } if (is->video->aspect_ratio <= 0.0) { is->video->aspect_ratio = (float)is->video_st->codec->width / (float)is->video_st->codec->height; } } else is->dropped_count++; //printf("[%d] %f %s\n", is->pictq_rindex, // actual_delay, vp->dropped ? "dropped" : "shown"); /* update queue for next picture! */ if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { is->pictq_rindex = 0; } al_lock_mutex(is->pictq_mutex); is->pictq_size--; al_signal_cond(is->pictq_cond); al_unlock_mutex(is->pictq_mutex); /* We skipped a frame... let's grab more until we catch up. */ if (actual_delay < 0) video_refresh_timer(userdata); }
/* Add or subtract samples to get a better sync, return new audio buffer size */ static int synchronize_audio(VideoState * is, short *samples, int samples_size, double pts) { int n; double ref_clock; (void)pts; double diff; if (is->after_seek_sync) { /* Audio seems to be off for me after seeking, but skipping * video is less annoying than audio noise after the seek * when synching to the external clock. */ is->external_clock_start = av_gettime() - get_audio_clock(is) * 1000000.0; is->after_seek_sync = false; } n = 2 * is->audio_st->codec->channels; if (is->av_sync_type != AV_SYNC_AUDIO_MASTER) { double avg_diff; int wanted_size, min_size, max_size; ref_clock = get_master_clock(is); diff = get_audio_clock(is) - ref_clock; //printf("%f, %f\n", diff, get_audio_clock(is)); if (diff < AV_NOSYNC_THRESHOLD) { // accumulate the diffs is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum; if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { is->audio_diff_avg_count++; } else { avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef); if (fabs(avg_diff) >= is->audio_diff_threshold) { //printf("AV_NOSYNC_THRESHOLD %f\n", avg_diff); wanted_size = samples_size + ((int)(avg_diff * is->audio_st->codec->sample_rate) * n); min_size = samples_size * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100; min_size &= ~3; max_size = samples_size * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100; max_size &= ~3; if (wanted_size < min_size) { wanted_size = min_size; } else if (wanted_size > max_size) { wanted_size = max_size; } if (wanted_size < samples_size) { /* remove samples */ samples_size = wanted_size; } else if (wanted_size > samples_size) { uint8_t *samples_end, *q; int nb; /* add samples by copying final sample */ nb = (samples_size - wanted_size); samples_end = (uint8_t *) samples + samples_size - n; q = samples_end + n; while (nb > 0) { memcpy(q, samples_end, n); q += n; nb -= n; } samples_size = wanted_size; } } } } else { /* difference is TOO big; reset diff stuff */ is->audio_diff_avg_count = 0; is->audio_diff_cum = 0; } } return samples_size; }
/* Add or subtract samples to get a better sync, return new audio buffer size */ int synchronize_audio(VideoState *is, short *samples, int samples_size, double pts) { int n = 0; double ref_clock = 0; n = 2 * is->audio_st->codec->channels; // If AV_SYNC_AUDIO_MASTER, that means we sync video with audio. // So no need to adjust audio render, just render as samplea rate // and channel count requires. static int syncAudioFlag = 1; if(is->av_sync_type != AV_SYNC_AUDIO_MASTER) { if(1 == syncAudioFlag){ fprintf(stderr, "sync type is not AV_SYNC_AUDIO_MASTER\n"); syncAudioFlag = 0; } double diff, avg_diff; int wanted_size, min_size, max_size /*, nb_samples */; ref_clock = get_master_clock(is); diff = get_audio_clock(is) - ref_clock; if(diff < AV_NOSYNC_THRESHOLD) { // accumulate the diffs is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum; if(is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { is->audio_diff_avg_count++; } else { /* * Please refer to http://dranger.com/ffmpeg/tutorial06.html to understand * this complex code logic. * * We're going to take an average of how far each of those have been out of sync. * So for example, the first call might have shown we were out of sync by 40ms, * the next by 50ms, and so on. But we're not going to take a simple average * because the most recent values are more important than the previous ones. * So we're going to use a fractional coefficient, say c, * and sum the differences like this: diff_sum = new_diff + diff_sum*c. * When we are ready to find the average difference, * we simply calculate avg_diff = diff_sum * (1-c). */ avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef); if(fabs(avg_diff) >= is->audio_diff_threshold) { wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n); min_size = samples_size * ((100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100); max_size = samples_size * ((100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100); if(wanted_size < min_size) { wanted_size = min_size; } else if (wanted_size > max_size) { wanted_size = max_size; } if(wanted_size < samples_size) { /* remove samples */ samples_size = wanted_size; } else if(wanted_size > samples_size) { uint8_t *samples_end, *q; int nb; /* add samples by copying final sample*/ nb = (samples_size - wanted_size); samples_end = (uint8_t *)samples + samples_size - n; q = samples_end + n; while(nb > 0) { memcpy(q, samples_end, n); q += n; nb -= n; } samples_size = wanted_size; } } } } else { /* difference is TOO big; reset diff stuff */ is->audio_diff_avg_count = 0; is->audio_diff_cum = 0; } } return samples_size; }