static int request_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; FPSContext *s = ctx->priv; int frames_out = s->frames_out; int ret = 0; while (ret >= 0 && s->frames_out == frames_out) ret = ff_request_frame(ctx->inputs[0]); /* flush the fifo */ if (ret == AVERROR_EOF && av_fifo_size(s->fifo)) { int i; for (i = 0; av_fifo_size(s->fifo); i++) { AVFrame *buf; av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL); buf->pts = av_rescale_q(s->first_pts, ctx->inputs[0]->time_base, outlink->time_base) + s->frames_out; if ((ret = ff_filter_frame(outlink, buf)) < 0) return ret; s->frames_out++; } return 0; } return ret; }
static int interleave_new_audio_packet(AVFormatContext *s, AVPacket *pkt, int stream_index, int flush) { AVStream *st = s->streams[stream_index]; AudioInterleaveContext *aic = st->priv_data; int ret; int size = FFMIN(av_fifo_size(aic->fifo), *aic->samples * aic->sample_size); if (!size || (!flush && size == av_fifo_size(aic->fifo))) return 0; ret = av_new_packet(pkt, size); if (ret < 0) return ret; av_fifo_generic_read(aic->fifo, pkt->data, size, NULL); pkt->dts = pkt->pts = aic->dts; pkt->duration = av_rescale_q(*aic->samples, st->time_base, aic->time_base); pkt->stream_index = stream_index; aic->dts += pkt->duration; aic->samples++; if (!*aic->samples) aic->samples = aic->samples_per_frame; return size; }
int main(void) { /* create a FIFO buffer */ AVFifoBuffer *fifo = av_fifo_alloc(13 * sizeof(int)); int i, j, n; /* fill data */ for (i = 0; av_fifo_space(fifo) >= sizeof(int); i++) av_fifo_generic_write(fifo, &i, sizeof(int), NULL); /* peek at FIFO */ n = av_fifo_size(fifo) / sizeof(int); for (i = -n + 1; i < n; i++) { int *v = (int *)av_fifo_peek2(fifo, i * sizeof(int)); printf("%d: %d\n", i, *v); } printf("\n"); /* read data */ for (i = 0; av_fifo_size(fifo) >= sizeof(int); i++) { av_fifo_generic_read(fifo, &j, sizeof(int), NULL); printf("%d ", j); } printf("\n"); av_fifo_free(fifo); return 0; }
static int request_frame(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; SelectContext *select = ctx->priv; AVFilterLink *inlink = outlink->src->inputs[0]; select->select = 0; if (av_fifo_size(select->pending_frames)) { AVFilterBufferRef *picref; av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL); avfilter_start_frame(outlink, avfilter_ref_buffer(picref, ~0)); avfilter_draw_slice(outlink, 0, outlink->h, 1); avfilter_end_frame(outlink); avfilter_unref_buffer(picref); return 0; } while (!select->select) { int ret = avfilter_request_frame(inlink); if (ret < 0) return ret; } return 0; }
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags) { BufferSinkContext *buf = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; int ret; AVFrame *cur_frame; /* no picref available, fetch it from the filterchain */ if (!av_fifo_size(buf->fifo)) { if (flags & AV_BUFFERSINK_FLAG_NO_REQUEST) return AVERROR(EAGAIN); if ((ret = ff_request_frame(inlink)) < 0) return ret; } if (!av_fifo_size(buf->fifo)) return AVERROR(EINVAL); if (flags & AV_BUFFERSINK_FLAG_PEEK) { cur_frame = *((AVFrame **)av_fifo_peek2(buf->fifo, 0)); if ((ret = av_frame_ref(frame, cur_frame)) < 0) return ret; } else { av_fifo_generic_read(buf->fifo, &cur_frame, sizeof(cur_frame), NULL); av_frame_move_ref(frame, cur_frame); av_frame_free(&cur_frame); } return 0; }
static void flush_fifo(AVFifoBuffer *fifo) { while (av_fifo_size(fifo)) { AVFrame *tmp; av_fifo_generic_read(fifo, &tmp, sizeof(tmp), NULL); av_frame_free(&tmp); } }
status_t AVCodecEncoder::_EncodeAudio(const void* _buffer, int64 frameCount, media_encode_info* info) { TRACE("AVCodecEncoder::_EncodeAudio(%p, %lld, %p)\n", _buffer, frameCount, info); if (fChunkBuffer == NULL) return B_NO_MEMORY; status_t ret = B_OK; const uint8* buffer = reinterpret_cast<const uint8*>(_buffer); size_t inputSampleSize = fInputFormat.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK; size_t inputFrameSize = inputSampleSize * fInputFormat.u.raw_audio.channel_count; size_t bufferSize = frameCount * inputFrameSize; bufferSize = min_c(bufferSize, kDefaultChunkBufferSize); if (fContext->frame_size > 1) { // Encoded audio. Things work differently from raw audio. We need // the fAudioFifo to pipe data. if (av_fifo_realloc2(fAudioFifo, av_fifo_size(fAudioFifo) + bufferSize) < 0) { TRACE(" av_fifo_realloc2() failed\n"); return B_NO_MEMORY; } av_fifo_generic_write(fAudioFifo, const_cast<uint8*>(buffer), bufferSize, NULL); int frameBytes = fContext->frame_size * inputFrameSize; uint8* tempBuffer = new(std::nothrow) uint8[frameBytes]; if (tempBuffer == NULL) return B_NO_MEMORY; // Encode as many chunks as can be read from the FIFO. while (av_fifo_size(fAudioFifo) >= frameBytes) { av_fifo_generic_read(fAudioFifo, tempBuffer, frameBytes, NULL); ret = _EncodeAudio(tempBuffer, frameBytes, fContext->frame_size, info); if (ret != B_OK) break; } delete[] tempBuffer; } else { // Raw audio. The number of bytes returned from avcodec_encode_audio() // is always the same as the number of input bytes. return _EncodeAudio(buffer, bufferSize, frameCount, info); } return ret; }
static void free_pkt_fifo(AVFifoBuffer *fifo) { AVPacket pkt; while (av_fifo_size(fifo)) { av_fifo_generic_read(fifo, &pkt, sizeof(pkt), NULL); av_free_packet(&pkt); } av_fifo_free(fifo); }
static int audio_read_packet(AVFormatContext *context, AVPacket *pkt) { JackData *self = context->priv_data; struct timespec timeout = {0, 0}; int test; /* Activate the JACK client on first packet read. Activating the JACK client * means that process_callback() starts to get called at regular interval. * If we activate it in audio_read_header(), we're actually reading audio data * from the device before instructed to, and that may result in an overrun. */ if (!self->activated) { if (!jack_activate(self->client)) { self->activated = 1; av_log(context, AV_LOG_INFO, "JACK client registered and activated (rate=%dHz, buffer_size=%d frames)\n", self->sample_rate, self->buffer_size); } else { av_log(context, AV_LOG_ERROR, "Unable to activate JACK client\n"); return AVERROR(EIO); } } /* Wait for a packet comming back from process_callback(), if one isn't available yet */ timeout.tv_sec = av_gettime() / 1000000 + 2; if (sem_timedwait(&self->packet_count, &timeout)) { if (errno == ETIMEDOUT) { av_log(context, AV_LOG_ERROR, "Input error: timed out when waiting for JACK process callback output\n"); } else { av_log(context, AV_LOG_ERROR, "Error while waiting for audio packet: %s\n", strerror(errno)); } if (!self->client) av_log(context, AV_LOG_ERROR, "Input error: JACK server is gone\n"); return AVERROR(EIO); } if (self->pkt_xrun) { av_log(context, AV_LOG_WARNING, "Audio packet xrun\n"); self->pkt_xrun = 0; } if (self->jack_xrun) { av_log(context, AV_LOG_WARNING, "JACK xrun\n"); self->jack_xrun = 0; } /* Retrieve the packet filled with audio data by process_callback() */ av_fifo_generic_read(self->filled_pkts, pkt, sizeof(*pkt), NULL); if ((test = supply_new_packets(self, context))) return test; return 0; }
static av_cold void uninit(AVFilterContext *ctx) { BufferSourceContext *s = ctx->priv; while (s->fifo && av_fifo_size(s->fifo)) { AVFrame *frame; av_fifo_generic_read(s->fifo, &frame, sizeof(frame), NULL); av_frame_free(&frame); } av_fifo_freep(&s->fifo); }
/** * \brief read data from buffer and splitting it into channels * \param bufs num_bufs float buffers, each will contain the data of one channel * \param cnt number of samples to read per channel * \param num_bufs number of channels to split the data into * \return number of samples read per channel, equals cnt unless there was too * little data in the buffer * * Assumes the data in the buffer is of type float, the number of bytes * read is res * num_bufs * sizeof(float), where res is the return value. * If there is not enough data in the buffer remaining parts will be filled * with silence. */ static int read_buffer(float **bufs, int cnt, int num_bufs) { struct deinterleave di = {bufs, num_bufs, 0, 0}; int buffered = av_fifo_size(buffer); if (cnt * sizeof(float) * num_bufs > buffered) { silence(bufs, cnt, num_bufs); cnt = buffered / sizeof(float) / num_bufs; } av_fifo_generic_read(buffer, &di, cnt * num_bufs * sizeof(float), deinterleave); return cnt; }
static av_cold void uninit(AVFilterContext *ctx) { BufferSinkContext *sink = ctx->priv; while (sink->fifo && av_fifo_size(sink->fifo)) { AVFilterBufferRef *buf; av_fifo_generic_read(sink->fifo, &buf, sizeof(buf), NULL); avfilter_unref_buffer(buf); } av_fifo_free(sink->fifo); }
int ff_qsv_decode(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, AVPacket *avpkt) { AVPacket pkt_ref = { 0 }; int ret = 0; if (q->pkt_fifo && av_fifo_size(q->pkt_fifo) >= sizeof(AVPacket)) { /* we already have got some buffered packets. so add new to tail */ ret = av_packet_ref(&pkt_ref, avpkt); if (ret < 0) return ret; av_fifo_generic_write(q->pkt_fifo, &pkt_ref, sizeof(pkt_ref), NULL); } if (q->reinit_pending) { ret = do_qsv_decode(avctx, q, frame, got_frame, avpkt); if (!*got_frame) { /* Flushing complete, no more frames */ close_decoder(q); //return ff_qsv_decode(avctx, q, frame, got_frame, avpkt); } } if (!q->reinit_pending) { if (q->pkt_fifo && av_fifo_size(q->pkt_fifo) >= sizeof(AVPacket)) { /* process buffered packets */ while (!*got_frame && av_fifo_size(q->pkt_fifo) >= sizeof(AVPacket)) { av_fifo_generic_read(q->pkt_fifo, &pkt_ref, sizeof(pkt_ref), NULL); ret = do_qsv_decode(avctx, q, frame, got_frame, &pkt_ref); if (q->reinit_pending) { /* A rare case: new reinit pending when buffering existing. We should to return the pkt_ref back to same place of fifo */ qsv_packet_push_front(q, &pkt_ref); } else { av_packet_unref(&pkt_ref); } } } else { /* general decoding */ ret = do_qsv_decode(avctx, q, frame, got_frame, avpkt); if (q->reinit_pending) { ret = av_packet_ref(&pkt_ref, avpkt); if (ret < 0) return ret; av_fifo_generic_write(q->pkt_fifo, &pkt_ref, sizeof(pkt_ref), NULL); } } } return ret; }
static int process_callback(jack_nframes_t nframes, void *arg) { /* Warning: this function runs in realtime. One mustn't allocate memory here * or do any other thing that could block. */ int i, j; JackData *self = arg; float * buffer; jack_nframes_t latency, cycle_delay; AVPacket pkt; float *pkt_data; double cycle_time; if (!self->client) return 0; /* The approximate delay since the hardware interrupt as a number of frames */ cycle_delay = jack_frames_since_cycle_start(self->client); /* Retrieve filtered cycle time */ cycle_time = ff_timefilter_update(self->timefilter, av_gettime() / 1000000.0 - (double) cycle_delay / self->sample_rate, self->buffer_size); /* Check if an empty packet is available, and if there's enough space to send it back once filled */ if ((av_fifo_size(self->new_pkts) < sizeof(pkt)) || (av_fifo_space(self->filled_pkts) < sizeof(pkt))) { self->pkt_xrun = 1; return 0; } /* Retrieve empty (but allocated) packet */ av_fifo_generic_read(self->new_pkts, &pkt, sizeof(pkt), NULL); pkt_data = (float *) pkt.data; latency = 0; /* Copy and interleave audio data from the JACK buffer into the packet */ for (i = 0; i < self->nports; i++) { latency += jack_port_get_total_latency(self->client, self->ports[i]); buffer = jack_port_get_buffer(self->ports[i], self->buffer_size); for (j = 0; j < self->buffer_size; j++) pkt_data[j * self->nports + i] = buffer[j]; } /* Timestamp the packet with the cycle start time minus the average latency */ pkt.pts = (cycle_time - (double) latency / (self->nports * self->sample_rate)) * 1000000.0; /* Send the now filled packet back, and increase packet counter */ av_fifo_generic_write(self->filled_pkts, &pkt, sizeof(pkt), NULL); sem_post(&self->packet_count); return 0; }
int dc_audio_encoder_encode(AudioOutputFile * p_aout, AudioInputData * p_aind) { int i_got_pkt; //AVStream * p_audio_stream = p_aout->p_fmt->streams[p_aout->i_astream_idx]; //AVCodecContext * p_audio_codec_ctx = p_audio_stream->codec; AVCodecContext * p_audio_codec_ctx = p_aout->p_codec_ctx; while (av_fifo_size(p_aout->p_fifo) >= p_aout->i_frame_bytes) { av_fifo_generic_read(p_aout->p_fifo, p_aout->p_adata_buf, p_aout->i_frame_bytes, NULL); p_aout->p_aframe->data[0] = p_aout->p_adata_buf; p_aout->p_aframe->linesize[0] = p_aout->i_frame_bytes; av_init_packet(&p_aout->packet); /* * Set PTS (method 1) */ //p_aout->p_aframe->pts = p_aind->next_pts; /* * Set PTS (method 2) * int64_t now = av_gettime(); * outAudioCtx->pOutAudioFrame->pts = av_rescale_q(now,AV_TIME_BASE_Q, * audioEncCtx->time_base); */ /* Encode audio */ if (avcodec_encode_audio2(p_audio_codec_ctx, &p_aout->packet, p_aout->p_aframe, &i_got_pkt) != 0) { fprintf(stderr, "Error while encoding audio.\n"); return -1; } if (i_got_pkt) { //p_aout->acc_samples += p_aout->p_aframe->nb_samples; return 0; } av_free_packet(&p_aout->packet); } return 1; }
int ff_qsv_decode_close(QSVContext *q) { QSVFrame *cur = q->work_frames; if (q->session) MFXVideoDECODE_Close(q->session); while (q->async_fifo && av_fifo_size(q->async_fifo)) { QSVFrame *out_frame; mfxSyncPoint *sync; av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL); av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL); av_freep(&sync); } while (cur) { q->work_frames = cur->next; av_frame_free(&cur->frame); av_freep(&cur); cur = q->work_frames; } av_fifo_free(q->async_fifo); q->async_fifo = NULL; av_parser_close(q->parser); avcodec_free_context(&q->avctx_internal); if (q->internal_session) MFXClose(q->internal_session); av_buffer_unref(&q->frames_ctx.hw_frames_ctx); av_freep(&q->frames_ctx.mids); q->frames_ctx.nb_mids = 0; return 0; }
static av_cold void uninit(AVFilterContext *ctx) { BufferSourceContext *s = ctx->priv; while (s->fifo && av_fifo_size(s->fifo)) { AVFilterBufferRef *buf; av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL); avfilter_unref_buffer(buf); } av_fifo_free(s->fifo); s->fifo = NULL; avfilter_free(s->scale); s->scale = NULL; }
static av_cold void uninit(AVFilterContext *ctx) { SelectContext *select = ctx->priv; AVFilterBufferRef *picref; av_expr_free(select->expr); select->expr = NULL; while (select->pending_frames && av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL) == sizeof(picref)) avfilter_unref_buffer(picref); av_fifo_free(select->pending_frames); select->pending_frames = NULL; }
static av_cold void common_uninit(AVFilterContext *ctx) { BufferSinkContext *buf = ctx->priv; AVFilterBufferRef *picref; if (buf->fifo) { while (av_fifo_size(buf->fifo) >= sizeof(AVFilterBufferRef *)) { av_fifo_generic_read(buf->fifo, &picref, sizeof(picref), NULL); avfilter_unref_buffer(picref); } av_fifo_free(buf->fifo); buf->fifo = NULL; } }
int FFMpegFifo::read(unsigned char* dst,int readSize) { //SDL_LockMutex(mutex); if (av_fifo_size(fifo) < readSize) { waitReadBytes = readSize; //SDL_CondWait(condRead,mutex); } int ret = av_fifo_generic_read(fifo,dst,readSize,NULL); if (av_fifo_space(fifo) >= waitWriteBytes) //SDL_CondSignal(condWrite); //SDL_UnlockMutex(mutex); return readSize; }
static av_cold void uninit(AVFilterContext *ctx) { SelectContext *select = ctx->priv; AVFrame *frame; av_expr_free(select->expr); select->expr = NULL; while (select->pending_frames && av_fifo_generic_read(select->pending_frames, &frame, sizeof(frame), NULL) == sizeof(frame)) av_frame_free(&frame); av_fifo_free(select->pending_frames); select->pending_frames = NULL; }
static int request_frame(AVFilterLink *link) { BufferSourceContext *c = link->src->priv; AVFrame *frame; if (!av_fifo_size(c->fifo)) { if (c->eof) return AVERROR_EOF; c->nb_failed_requests++; return AVERROR(EAGAIN); } av_fifo_generic_read(c->fifo, &frame, sizeof(frame), NULL); return ff_filter_frame(link, frame); }
static void mediacodec_decode_flush(AVCodecContext *avctx) { MediaCodecH264DecContext *s = avctx->priv_data; while (av_fifo_size(s->fifo)) { AVPacket pkt; av_fifo_generic_read(s->fifo, &pkt, sizeof(pkt), NULL); av_packet_unref(&pkt); } av_fifo_reset(s->fifo); av_packet_unref(&s->filtered_pkt); ff_mediacodec_dec_flush(avctx, &s->ctx); }
static int av_thread_message_queue_recv_locked(AVThreadMessageQueue *mq, void *msg, unsigned flags) { while (!mq->err_recv && av_fifo_size(mq->fifo) < mq->elsize) { if ((flags & AV_THREAD_MESSAGE_NONBLOCK)) return AVERROR(EAGAIN); pthread_cond_wait(&mq->cond, &mq->lock); } if (av_fifo_size(mq->fifo) < mq->elsize) return mq->err_recv; av_fifo_generic_read(mq->fifo, msg, mq->elsize, NULL); pthread_cond_signal(&mq->cond); return 0; }
static av_cold void uninit(AVFilterContext *ctx) { SelectContext *select = ctx->priv; AVFilterBufferRef *picref; int i; av_expr_free(select->expr); select->expr = NULL; for (i = 0; i < av_fifo_size(select->pending_frames)/sizeof(picref); i++) { av_fifo_generic_read(select->pending_frames, &picref, sizeof(picref), NULL); avfilter_unref_buffer(picref); } av_fifo_free(select->pending_frames); }
static av_cold void uninit(AVFilterContext *ctx) { BufferSinkContext *sink = ctx->priv; AVFrame *frame; if (sink->audio_fifo) av_audio_fifo_free(sink->audio_fifo); if (sink->fifo) { while (av_fifo_size(sink->fifo) >= FIFO_INIT_ELEMENT_SIZE) { av_fifo_generic_read(sink->fifo, &frame, sizeof(frame), NULL); av_frame_free(&frame); } av_fifo_freep(&sink->fifo); } }
int CacheHttp_Read(void * handle, uint8_t * cache, int size) { if(!handle) return AVERROR(EIO); CacheHttpContext * s = (CacheHttpContext *)handle; pthread_mutex_lock(&s->read_mutex); if (s->fifo) { int avail; avail = av_fifo_size(s->fifo); //av_log(NULL, AV_LOG_INFO, "----------- http_read avail=%d, size=%d ",avail,size); if(s->is_first_read>0) { float value = 0.0; int ret = -1; ret = am_getconfig_float("libplayer.hls.initial_buffered", &value); if(ret>=0) { if(avail/1024<value) { //av_log(NULL, AV_LOG_INFO, "buffer data avail=%d, initial buffer buffered data size=%f ",avail,value*1024); pthread_mutex_unlock(&s->read_mutex); return AVERROR(EAGAIN); } } s->is_first_read = 0; } if(url_interrupt_cb()) { pthread_mutex_unlock(&s->read_mutex); return 0; } else if(avail) { // Maximum amount available size = FFMIN( avail, size); av_fifo_generic_read(s->fifo, cache, size, NULL); pthread_mutex_unlock(&s->read_mutex); return size; } else if(s->EXITED) { pthread_mutex_unlock(&s->read_mutex); return 0; } else if(!s->finish_flag) { pthread_mutex_unlock(&s->read_mutex); //read just need retry return AVERROR(EAGAIN); } } pthread_mutex_unlock(&s->read_mutex); return 0; }
static int request_frame(AVFilterLink *outlink) { ABufferSourceContext *abuffer = outlink->src->priv; AVFilterBufferRef *samplesref; if (!av_fifo_size(abuffer->fifo)) { av_log(outlink->src, AV_LOG_ERROR, "request_frame() called with no available frames!\n"); return AVERROR(EINVAL); } av_fifo_generic_read(abuffer->fifo, &samplesref, sizeof(samplesref), NULL); avfilter_filter_samples(outlink, avfilter_ref_buffer(samplesref, ~0)); avfilter_unref_buffer(samplesref); return 0; }
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size) { unsigned int old_size= f->end - f->buffer; if(old_size < new_size){ int len= av_fifo_size(f); AVFifoBuffer *f2= av_fifo_alloc(new_size); if (!f2) return -1; av_fifo_generic_read(f, f2->buffer, len, NULL); f2->wptr += len; f2->wndx += len; av_free(f->buffer); *f= *f2; av_free(f2); } return 0; }
int av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples) { int i, ret, size; if (nb_samples < 0) return AVERROR(EINVAL); nb_samples = FFMIN(nb_samples, af->nb_samples); if (!nb_samples) return 0; size = nb_samples * af->sample_size; for (i = 0; i < af->nb_buffers; i++) { if ((ret = av_fifo_generic_read(af->buf[i], data[i], size, NULL)) < 0) return AVERROR_BUG; } af->nb_samples -= nb_samples; return nb_samples; }