static double update_video_nocorrect_pts(struct MPContext *mpctx) { struct sh_video *sh_video = mpctx->sh_video; double frame_time = 0; while (1) { // In nocorrect-pts mode there is no way to properly time these frames if (load_next_vo_frame(mpctx, false)) break; frame_time = sh_video->next_frame_time; if (mpctx->restart_playback) frame_time = 0; struct demux_packet *pkt = video_read_frame(mpctx); if (!pkt) return -1; if (mpctx->sh_audio) mpctx->delay -= frame_time; // video_read_frame can change fps (e.g. for ASF video) update_fps(mpctx); int framedrop_type = check_framedrop(mpctx, frame_time); void *decoded_frame = decode_video(sh_video, pkt, framedrop_type, sh_video->pts); talloc_free(pkt); if (decoded_frame) { filter_video(mpctx, decoded_frame); } break; } return frame_time; }
SCM ffmpeg_decode_audio_video(SCM scm_self) { SCM retval = SCM_BOOL_F; struct ffmpeg_t *self = get_self(scm_self); if (!is_input_context(self)) scm_misc_error("ffmpeg-decode-audio/video", "Attempt to read frame from FFmpeg output video", SCM_EOL); while (scm_is_false(retval)) { if (packet_empty(self)) read_packet(self); int reading_cache = packet_empty(self); if (self->pkt.stream_index == self->audio_stream_idx) { av_frame_unref(self->audio_target_frame); retval = decode_audio(self, &self->pkt, self->audio_target_frame); } else if (self->pkt.stream_index == self->video_stream_idx) { av_frame_unref(self->video_target_frame); retval = decode_video(self, &self->pkt, self->video_target_frame); } else consume_packet_data(&self->pkt, self->pkt.size); if (scm_is_false(retval) && reading_cache) break; }; return retval; }
void *decoder_thread(void *arg) { PlayerCtx *ctx = arg; AVPacket packet; AVPacket cpacket; size_t decoded_bytes; int seekid = ctx->cur_seekid; printf("Decoder thread started\n"); memset(&packet, 0, sizeof(packet)); memset(&cpacket, 0, sizeof(cpacket)); while (!ctx->exit) { int new_packet = 0; if (cpacket.size == 0) { if (packet.data) av_free_packet(&packet); pthread_mutex_lock(&ctx->seek_mutex); if (ctx->cur_seekid > seekid) { printf("Seek! %f\n", ctx->seek_pos); av_seek_frame(ctx->fmt_ctx, -1, (int64_t)(ctx->seek_pos * AV_TIME_BASE), 0); seekid = ctx->cur_seekid; // HACK! Avoid deadlock by waking up the video waiter pthread_mutex_lock(&ctx->v_buf_mutex); pthread_cond_signal(&ctx->v_buf_not_empty); pthread_mutex_unlock(&ctx->v_buf_mutex); if (ctx->audio_idx != -1) avcodec_flush_buffers(ctx->a_codec_ctx); avcodec_flush_buffers(ctx->v_codec_ctx); } if (av_read_frame(ctx->fmt_ctx, &packet) < 0) { fprintf(stderr, "EOF!\n"); push_eof(ctx, seekid); pthread_cond_wait(&ctx->seek_cond, &ctx->seek_mutex); pthread_mutex_unlock(&ctx->seek_mutex); continue; } pthread_mutex_unlock(&ctx->seek_mutex); cpacket = packet; new_packet = 1; } if (ctx->audio_idx != -1 && cpacket.stream_index == ctx->audio_idx) { decoded_bytes = decode_audio(ctx, &cpacket, new_packet, seekid); } else if (cpacket.stream_index == ctx->video_idx) { decoded_bytes = decode_video(ctx, &cpacket, new_packet, seekid); } else { decoded_bytes = cpacket.size; } cpacket.data += decoded_bytes; cpacket.size -= decoded_bytes; } return NULL; }
static double update_video_attached_pic(struct MPContext *mpctx) { struct sh_video *sh_video = mpctx->sh_video; // Try to decode the picture multiple times, until it is displayed. if (mpctx->video_out->hasframe) return -1; struct mp_image *decoded_frame = decode_video(sh_video, sh_video->gsh->attached_picture, 0, 0); if (decoded_frame) filter_video(mpctx, decoded_frame); load_next_vo_frame(mpctx, true); mpctx->sh_video->pts = MP_NOPTS_VALUE; return 0; }
//decode video packet to picture queue void *decode_video_packt_thread(void *opaque){ //decode video ,and then to put into rgb565 data queue log_chris(ANDROID_LOG_INFO ,TAG ," in decode_video_packt_thread"); media_handle_union_t * media_handle = (media_handle_union_t *) opaque; if (media_handle == NULL) { log_chris(ANDROID_LOG_ERROR ,TAG ,"media handle is null ,in decode_video_packt_thread"); return; //ok } int ret = 0; while(1){ ret = decode_video(media_handle->decode_video_var ,media_handle->ptr_format_ctx); if( media_handle->stop_mark == 1 || ret == VIDEO_PACKET_GET_NON_PACKET){ log_chris(ANDROID_LOG_ERROR ,TAG ,"==media_handle->stop_mark = %d or VIDEO_PACKET_GET_NON_PACKET ,and set finish_mark" ,media_handle->stop_mark); media_handle->finish_mark = 1; log_chris(ANDROID_LOG_ERROR ,TAG ,"== decode_video thread over"); break; //exit the xplayer decode video function } } }
static void *toxav_decoding(void *arg) { void **pp = arg; ToxAv *av = pp[0]; CallSpecific *call = pp[1]; free(pp); while (1) { DECODE_PACKET *p; _Bool video = 0; pthread_mutex_lock(&call->decode_cond_mutex); if (call->exit) { break; } uint8_t r; /* first check for available packets, otherwise wait for condition*/ r = call->audio_decode_read; p = call->audio_decode_queue[r]; if (!p) { r = call->video_decode_read; p = call->video_decode_queue[r]; if (!p) { pthread_cond_wait(&call->decode_cond, &call->decode_cond_mutex); r = call->audio_decode_read; p = call->audio_decode_queue[r]; if (!p) { r = call->video_decode_read; p = call->video_decode_queue[r]; video = 1; } } else { video = 1; } } if (video) { if (p) { call->video_decode_queue[r] = NULL; call->video_decode_read = (r + 1) % VIDEO_DECODE_QUEUE_SIZE; } } else { call->audio_decode_queue[r] = NULL; call->audio_decode_read = (r + 1) % AUDIO_DECODE_QUEUE_SIZE; } pthread_mutex_unlock(&call->decode_cond_mutex); if (p) { if (video) { decode_video(av, call, p); } else { decode_audio(av, call, p); } } } call->exit = 0; pthread_cond_signal(&call->decode_cond); pthread_mutex_unlock(&call->decode_cond_mutex); return NULL; }
double update_video(struct MPContext *mpctx, double endpts) { struct sh_video *sh_video = mpctx->sh_video; struct vo *video_out = mpctx->video_out; sh_video->vfilter->control(sh_video->vfilter, VFCTRL_SET_OSD_OBJ, mpctx->osd); // for vf_sub if (!mpctx->opts->correct_pts) return update_video_nocorrect_pts(mpctx); if (sh_video->gsh->attached_picture) return update_video_attached_pic(mpctx); double pts; while (1) { if (load_next_vo_frame(mpctx, false)) break; pts = MP_NOPTS_VALUE; struct demux_packet *pkt = NULL; while (1) { pkt = demux_read_packet(mpctx->sh_video->gsh); if (!pkt || pkt->len) break; /* Packets with size 0 are assumed to not correspond to frames, * but to indicate the absence of a frame in formats like AVI * that must have packets at fixed timecode intervals. */ talloc_free(pkt); } if (pkt) pts = pkt->pts; if (pts != MP_NOPTS_VALUE) pts += mpctx->video_offset; if (pts >= mpctx->hrseek_pts - .005) mpctx->hrseek_framedrop = false; int framedrop_type = mpctx->hrseek_active && mpctx->hrseek_framedrop ? 1 : check_framedrop(mpctx, -1); struct mp_image *decoded_frame = decode_video(sh_video, pkt, framedrop_type, pts); talloc_free(pkt); if (decoded_frame) { determine_frame_pts(mpctx); filter_video(mpctx, decoded_frame); } else if (!pkt) { if (!load_next_vo_frame(mpctx, true)) return -1; } break; } if (!video_out->frame_loaded) return 0; pts = video_out->next_pts; if (pts == MP_NOPTS_VALUE) { MP_ERR(mpctx, "Video pts after filters MISSING\n"); // Try to use decoder pts from before filters pts = sh_video->pts; if (pts == MP_NOPTS_VALUE) pts = sh_video->last_pts; } if (endpts == MP_NOPTS_VALUE || pts < endpts) add_frame_pts(mpctx, pts); if (mpctx->hrseek_active && pts < mpctx->hrseek_pts - .005) { vo_skip_frame(video_out); return 0; } mpctx->hrseek_active = false; sh_video->pts = pts; if (sh_video->last_pts == MP_NOPTS_VALUE) sh_video->last_pts = sh_video->pts; else if (sh_video->last_pts > sh_video->pts) { MP_WARN(mpctx, "Decreasing video pts: %f < %f\n", sh_video->pts, sh_video->last_pts); /* If the difference in pts is small treat it as jitter around the * right value (possibly caused by incorrect timestamp ordering) and * just show this frame immediately after the last one. * Treat bigger differences as timestamp resets and start counting * timing of later frames from the position of this one. */ if (sh_video->last_pts - sh_video->pts > 0.5) sh_video->last_pts = sh_video->pts; else sh_video->pts = sh_video->last_pts; } else if (sh_video->pts >= sh_video->last_pts + 60) { // Assume a PTS difference >= 60 seconds is a discontinuity. MP_WARN(mpctx, "Jump in video pts: %f -> %f\n", sh_video->last_pts, sh_video->pts); sh_video->last_pts = sh_video->pts; } double frame_time = sh_video->pts - sh_video->last_pts; sh_video->last_pts = sh_video->pts; if (mpctx->sh_audio) mpctx->delay -= frame_time; return frame_time; }
/* pkt = NULL means EOF (needed to flush decoder buffers) */ static int output_packet(InputStream *ist, const AVPacket *pkt) { int ret = 0, i; int got_output = 0; AVPacket avpkt; if (!ist->saw_first_ts) { ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0; ist->pts = 0; if (pkt != NULL && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) { ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q); ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong } ist->saw_first_ts = 1; } if (ist->next_dts == AV_NOPTS_VALUE) ist->next_dts = ist->dts; if (ist->next_pts == AV_NOPTS_VALUE) ist->next_pts = ist->pts; if (pkt == NULL) { /* EOF handling */ av_init_packet(&avpkt); avpkt.data = NULL; avpkt.size = 0; goto handle_eof; } else { avpkt = *pkt; } if (pkt->dts != AV_NOPTS_VALUE) { ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed) ist->next_pts = ist->pts = ist->dts; } // while we have more to decode or while the decoder did output something on EOF while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) { int duration; handle_eof: ist->pts = ist->next_pts; ist->dts = ist->next_dts; if (avpkt.size && avpkt.size != pkt->size && !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) { av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING, "Multiple frames in a packet from stream %d\n", pkt->stream_index); ist->showed_multi_packet_warning = 1; } switch (ist->dec_ctx->codec_type) { case AVMEDIA_TYPE_AUDIO: ret = decode_audio (ist, &avpkt, &got_output); break; case AVMEDIA_TYPE_VIDEO: ret = decode_video (ist, &avpkt, &got_output); if (avpkt.duration) { duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q); } else if(ist->dec_ctx->time_base.num != 0 && ist->dec_ctx->time_base.den != 0) { int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->dec_ctx->ticks_per_frame; duration = ((int64_t)AV_TIME_BASE * ist->dec_ctx->time_base.num * ticks) / ist->dec_ctx->time_base.den; } else duration = 0; if(ist->dts != AV_NOPTS_VALUE && duration) { ist->next_dts += duration; }else ist->next_dts = AV_NOPTS_VALUE; if (got_output) ist->next_pts += duration; //FIXME the duration is not correct in some cases break; case AVMEDIA_TYPE_SUBTITLE: ret = transcode_subtitles(ist, &avpkt, &got_output); break; default: return -1; } if (ret < 0) return ret; avpkt.dts= avpkt.pts= AV_NOPTS_VALUE; // touch data and size only if not EOF if (pkt) { if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) ret = avpkt.size; avpkt.data += ret; avpkt.size -= ret; } if (!got_output) { continue; } } /* handle stream copy */ if (!ist->decoding_needed) { ist->dts = ist->next_dts; switch (ist->dec_ctx->codec_type) { case AVMEDIA_TYPE_AUDIO: ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) / ist->dec_ctx->sample_rate; break; case AVMEDIA_TYPE_VIDEO: if (ist->framerate.num) { // TODO: Remove work-around for c99-to-c89 issue 7 AVRational time_base_q = AV_TIME_BASE_Q; int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate)); ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q); } else if (pkt->duration) { ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); } else if(ist->dec_ctx->time_base.num != 0) { int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame; ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->time_base.num * ticks) / ist->dec_ctx->time_base.den; } break; } ist->pts = ist->dts; ist->next_pts = ist->next_dts; } for (i = 0; pkt && i < nb_output_streams; i++) { OutputStream *ost = output_streams[i]; if (!check_output_constraints(ist, ost) || ost->encoding_needed) continue; do_streamcopy(ist, ost, pkt); } return 0; }
// it is running in decoder thread void as_netstream::run() { set_status(status, playStart); m_start_time = now(); m_video_time = 0; m_status = PLAY; while (m_status == PLAY || m_status == PAUSE) { if (m_status == PAUSE) { double paused = now(); m_decoder.wait(); m_start_time += now() - paused; continue; } // seek request if (m_seek_time >= 0) { int64 timestamp = (int64) (m_seek_time * AV_TIME_BASE); int flags = m_seek_time > m_video_time ? 0 : AVSEEK_FLAG_BACKWARD; int ret = av_seek_frame(m_FormatCtx, -1, timestamp, flags); if (ret == 0) { m_aq.clear(); m_vq.clear(); m_start_time += m_video_time - m_seek_time; m_video_time = m_seek_time; set_status(status, seekNotify); } else { set_status(error, seekInvalidTime); } m_seek_time = -1; } if (get_bufferlength() < m_buffer_time) { //printf("m_buffer_length=%f, queue_size=%d\n", get_bufferlength(), m_vq.size()); AVPacket pkt; int rc = av_read_frame(m_FormatCtx, &pkt); if (rc < 0) { if (m_vq.size() == 0) { break; } } else { if (pkt.stream_index == m_video_index) { m_vq.push(new av_packet(pkt)); } else if (pkt.stream_index == m_audio_index) { if (get_sound_handler()) { m_aq.push(new av_packet(pkt)); } } else { continue; } } } // skip expired video frames double current_time = now() - m_start_time; while (current_time >= m_video_time) { gc_ptr<av_packet> packet; if (m_vq.pop(&packet)) { const AVPacket& pkt = packet->get_packet(); // update video clock with pts, if present if (pkt.dts > 0) { m_video_time = av_q2d(m_video_stream->time_base) * pkt.dts; } m_video_time += av_q2d(m_video_stream->codec->time_base); // +frame_delay set_video_data(decode_video(pkt)); } else { // no packets in queue // set_status("status", "NetStream.Buffer.Empty"); break; } } // Don't hog the CPU. // Queues have filled, video frame have shown // now it is possible and to have a rest int delay = (int) (1000 * (m_video_time - current_time)); // hack, adjust m_start_time after seek if (delay > 50) { m_start_time -= (m_video_time - current_time); current_time = now() - m_start_time; delay = (int) (1000 * (m_video_time - current_time)); } assert(delay <= 50); if (delay > 0) { if (get_bufferlength() >= m_buffer_time) { // set_status("status", "NetStream.Buffer.Full"); tu_timer::sleep(delay); } // printf("current_time=%f, video_time=%f, delay=%d\n", current_time, m_video_time, delay); } } sound_handler* sound = get_sound_handler(); if (sound) { sound->detach_aux_streamer(this); } close_stream(); set_status(status, playStop); m_status = STOP; }