Esempio n. 1
0
BOOL freerdp_dsp_ffmpeg_encode(FREERDP_DSP_CONTEXT* context, const AUDIO_FORMAT* format,
                               const BYTE* data, size_t length, wStream* out)
{
	int rc;
	int samples, rest;

	if (!context || !format || !data || !out || !context->encoder)
		return FALSE;

	if (!context || !data || !out)
		return FALSE;

	/* Create input frame */
	if (!ffmpeg_fill_frame(context->frame, format, data, length))
		return FALSE;

	/* Resample to desired format. */
	if (!ffmpeg_resample_frame(context->rcontext,
	                           context->frame,
	                           context->resampled))
		return FALSE;

	if (context->context->frame_size <= 0)
	{
		return ffmpeg_encode_frame(context->context, context->resampled,
		                           context->packet, out);
	}
	else
	{
		rest = samples = context->resampled->nb_samples;

		do
		{
			if (samples + context->bufferedSamples > context->context->frame_size)
				samples = context->context->frame_size - context->bufferedSamples;

			rc = av_samples_copy(context->buffered->extended_data, context->resampled->extended_data,
			                     context->bufferedSamples, 0, samples,
			                     context->context->channels, context->context->sample_fmt);
			rest -= samples;
			context->bufferedSamples += samples;

			if (context->context->frame_size <= context->bufferedSamples)
			{
				/* Encode in desired format. */
				if (!ffmpeg_encode_frame(context->context, context->buffered,
				                         context->packet, out))
					return FALSE;

				context->bufferedSamples = 0;
			}
		}
		while (rest > 0);

		return TRUE;
	}
}
Esempio n. 2
0
	/*
	* ����Ƶ�ļ���ȡ��һ֡��������ͼ��֡��Ҳ������һ����Ƶ��
	*/
	AVRaw * ffReadFrame(AVDecodeCtx *pdc)
	{
		int ret, got_frame;
		AVPacket pkt;
		AVCodecContext *ctx;
		AVFrame * frame;
		while (true)
		{
			ret = av_read_frame(pdc->_ctx, &pkt);

			if (ret < 0)
			{
				char errmsg[ERROR_BUFFER_SIZE];
				av_strerror(ret, errmsg, ERROR_BUFFER_SIZE);
				av_log(NULL, AV_LOG_FATAL, "ffReadFrame av_read_frame : %s.\n", errmsg);
				return NULL;
			}
			if (pkt.stream_index == pdc->_video_st_index)
			{
				ctx = pdc->_video_st->codec;
				frame = pdc->_vctx.frame;
				ret = avcodec_decode_video2(ctx, frame, &got_frame, &pkt);

				if (got_frame)
				{
					AVRaw * praw = make_image_raw(ctx->pix_fmt, ctx->width, ctx->height);
					praw->pts = frame->pkt_pts;
					praw->time_base = ctx->pkt_timebase;
					av_image_copy(praw->data, praw->linesize, (const uint8_t **)frame->data, frame->linesize, ctx->pix_fmt, ctx->width, ctx->height);
					av_packet_unref(&pkt);
					return praw;
				}
			}
			else if (pkt.stream_index == pdc->_audio_st_index)
			{
				ctx = pdc->_audio_st->codec;
				frame = pdc->_actx.frame;
				ret = avcodec_decode_audio4(ctx, frame, &got_frame, &pkt);

				if (got_frame)
				{
					AVRaw * praw = make_audio_raw(ctx->sample_fmt, frame->channels, frame->nb_samples);
					praw->pts = frame->pkt_pts;
					praw->time_base = ctx->pkt_timebase;
					av_samples_copy(praw->data, frame->data, 0, 0, frame->nb_samples, frame->channels, ctx->sample_fmt);
					av_packet_unref(&pkt);
					return praw;
				}
			}

			av_packet_unref(&pkt);
		}
		return NULL;
	}
Esempio n. 3
0
int ff_filter_samples_framed(AVFilterLink *link, AVFilterBufferRef *samplesref)
{
    int (*filter_samples)(AVFilterLink *, AVFilterBufferRef *);
    AVFilterPad *src = link->srcpad;
    AVFilterPad *dst = link->dstpad;
    int64_t pts;
    AVFilterBufferRef *buf_out;
    int ret;

    FF_TPRINTF_START(NULL, filter_samples); ff_tlog_link(NULL, link, 1);

    if (link->closed) {
        avfilter_unref_buffer(samplesref);
        return AVERROR_EOF;
    }

    if (!(filter_samples = dst->filter_samples))
        filter_samples = default_filter_samples;

    av_assert1((samplesref->perms & src->min_perms) == src->min_perms);
    samplesref->perms &= ~ src->rej_perms;

    /* prepare to copy the samples if the buffer has insufficient permissions */
    if ((dst->min_perms & samplesref->perms) != dst->min_perms ||
        dst->rej_perms & samplesref->perms) {
        av_log(link->dst, AV_LOG_DEBUG,
               "Copying audio data in avfilter (have perms %x, need %x, reject %x)\n",
               samplesref->perms, link->dstpad->min_perms, link->dstpad->rej_perms);

        buf_out = ff_default_get_audio_buffer(link, dst->min_perms,
                                              samplesref->audio->nb_samples);
        if (!buf_out) {
            avfilter_unref_buffer(samplesref);
            return AVERROR(ENOMEM);
        }
        buf_out->pts                = samplesref->pts;
        buf_out->audio->sample_rate = samplesref->audio->sample_rate;

        /* Copy actual data into new samples buffer */
        av_samples_copy(buf_out->extended_data, samplesref->extended_data,
                        0, 0, samplesref->audio->nb_samples,
                        av_get_channel_layout_nb_channels(link->channel_layout),
                        link->format);

        avfilter_unref_buffer(samplesref);
    } else
        buf_out = samplesref;

    link->cur_buf = buf_out;
    pts = buf_out->pts;
    ret = filter_samples(link, buf_out);
    ff_update_link_current_pts(link, pts);
    return ret;
}
Esempio n. 4
0
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
{
    int (*filter_frame)(AVFilterLink *, AVFrame *);
    AVFilterPad *dst = link->dstpad;
    AVFrame *out;

    FF_DPRINTF_START(NULL, filter_frame);
    ff_dlog_link(NULL, link, 1);

    if (!(filter_frame = dst->filter_frame))
        filter_frame = default_filter_frame;

    /* copy the frame if needed */
    if (dst->needs_writable && !av_frame_is_writable(frame)) {
        av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");

        switch (link->type) {
        case AVMEDIA_TYPE_VIDEO:
            out = ff_get_video_buffer(link, link->w, link->h);
            break;
        case AVMEDIA_TYPE_AUDIO:
            out = ff_get_audio_buffer(link, frame->nb_samples);
            break;
        default: return AVERROR(EINVAL);
        }
        if (!out) {
            av_frame_free(&frame);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, frame);

        switch (link->type) {
        case AVMEDIA_TYPE_VIDEO:
            av_image_copy(out->data, out->linesize, frame->data, frame->linesize,
                          frame->format, frame->width, frame->height);
            break;
        case AVMEDIA_TYPE_AUDIO:
            av_samples_copy(out->extended_data, frame->extended_data,
                            0, 0, frame->nb_samples,
                            av_get_channel_layout_nb_channels(frame->channel_layout),
                            frame->format);
            break;
        default: return AVERROR(EINVAL);
        }

        av_frame_free(&frame);
    } else
        out = frame;

    return filter_frame(link, out);
}
Esempio n. 5
0
int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
{
    int insamples = samplesref->audio->nb_samples, inpos = 0, nb_samples;
    AVFilterBufferRef *pbuf = link->partial_buf;
    int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
    int ret = 0;

    av_assert1(samplesref->format                == link->format);
    av_assert1(samplesref->audio->channel_layout == link->channel_layout);
    av_assert1(samplesref->audio->sample_rate    == link->sample_rate);

    if (!link->min_samples ||
        (!pbuf &&
         insamples >= link->min_samples && insamples <= link->max_samples)) {
        return ff_filter_samples_framed(link, samplesref);
    }
    /* Handle framing (min_samples, max_samples) */
    while (insamples) {
        if (!pbuf) {
            AVRational samples_tb = { 1, link->sample_rate };
            int perms = link->dstpad->min_perms | AV_PERM_WRITE;
            pbuf = ff_get_audio_buffer(link, perms, link->partial_buf_size);
            if (!pbuf) {
                av_log(link->dst, AV_LOG_WARNING,
                       "Samples dropped due to memory allocation failure.\n");
                return 0;
            }
            avfilter_copy_buffer_ref_props(pbuf, samplesref);
            pbuf->pts = samplesref->pts +
                        av_rescale_q(inpos, samples_tb, link->time_base);
            pbuf->audio->nb_samples = 0;
        }
        nb_samples = FFMIN(insamples,
                           link->partial_buf_size - pbuf->audio->nb_samples);
        av_samples_copy(pbuf->extended_data, samplesref->extended_data,
                        pbuf->audio->nb_samples, inpos,
                        nb_samples, nb_channels, link->format);
        inpos                   += nb_samples;
        insamples               -= nb_samples;
        pbuf->audio->nb_samples += nb_samples;
        if (pbuf->audio->nb_samples >= link->min_samples) {
            ret = ff_filter_samples_framed(link, pbuf);
            pbuf = NULL;
        }
    }
    avfilter_unref_buffer(samplesref);
    link->partial_buf = pbuf;
    return ret;
}
Esempio n. 6
0
int av_frame_make_writable(AVFrame *frame)
{
    AVFrame tmp;
    int ret;

    if (!frame->buf[0])
        return AVERROR(EINVAL);

    if (av_frame_is_writable(frame))
        return 0;

    memset(&tmp, 0, sizeof(tmp));
    tmp.format         = frame->format;
    tmp.width          = frame->width;
    tmp.height         = frame->height;
    tmp.channels       = frame->channels;
    tmp.channel_layout = frame->channel_layout;
    tmp.nb_samples     = frame->nb_samples;
    ret = av_frame_get_buffer(&tmp, 32);
    if (ret < 0)
        return ret;

    if (tmp.nb_samples) {
        int ch = tmp.channels;
        CHECK_CHANNELS_CONSISTENCY(&tmp);
        av_samples_copy(tmp.extended_data, frame->extended_data, 0, 0,
                        frame->nb_samples, ch, frame->format);
    } else {
        av_image_copy(tmp.data, tmp.linesize, frame->data, frame->linesize,
                      frame->format, frame->width, frame->height);
    }

    ret = av_frame_copy_props(&tmp, frame);
    if (ret < 0) {
        av_frame_unref(&tmp);
        return ret;
    }

    av_frame_unref(frame);

    *frame = tmp;
    if (tmp.data == tmp.extended_data)
        frame->extended_data = frame->data;

    return 0;
}
Esempio n. 7
0
AudioFrame AudioFrame::clone() const
{
    Q_D(const AudioFrame);
    if (!d->format.isValid())
        return AudioFrame();
    AudioFrame f(QByteArray(), d->format);
    f.setSamplesPerChannel(samplesPerChannel());
    f.allocate();
    // TODO: Frame.planes(), bytesPerLines()
    int nb_planes = f.planeCount();
    QVector<uchar*> dst(nb_planes);
    for (int i = 0; i < nb_planes; ++i) {
        dst[i] = f.bits(i);
    }
    av_samples_copy(dst.data(), d->planes.data(), 0, 0, samplesPerChannel(), d->format.channels(), (AVSampleFormat)d->format.sampleFormatFFmpeg());
    return f;
}
Esempio n. 8
0
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
{
    int planar   = av_sample_fmt_is_planar(dst->format);
    int channels = av_get_channel_layout_nb_channels(dst->channel_layout);
    int planes   = planar ? channels : 1;
    int i;

    if (dst->nb_samples     != src->nb_samples ||
        dst->channel_layout != src->channel_layout)
        return AVERROR(EINVAL);

    for (i = 0; i < planes; i++)
        if (!dst->extended_data[i] || !src->extended_data[i])
            return AVERROR(EINVAL);

    av_samples_copy(dst->extended_data, src->extended_data, 0, 0,
                    dst->nb_samples, channels, dst->format);

    return 0;
}
Esempio n. 9
0
static int ff_filter_frame_needs_framing(AVFilterLink *link, AVFrame *frame)
{
    int insamples = frame->nb_samples, inpos = 0, nb_samples;
    AVFrame *pbuf = link->partial_buf;
    int nb_channels = av_frame_get_channels(frame);
    int ret = 0;

    link->flags |= FF_LINK_FLAG_REQUEST_LOOP;
    /* Handle framing (min_samples, max_samples) */
    while (insamples) {
        if (!pbuf) {
            AVRational samples_tb = { 1, link->sample_rate };
            pbuf = ff_get_audio_buffer(link, link->partial_buf_size);
            if (!pbuf) {
                av_log(link->dst, AV_LOG_WARNING,
                       "Samples dropped due to memory allocation failure.\n");
                return 0;
            }
            av_frame_copy_props(pbuf, frame);
            pbuf->pts = frame->pts;
            if (pbuf->pts != AV_NOPTS_VALUE)
                pbuf->pts += av_rescale_q(inpos, samples_tb, link->time_base);
            pbuf->nb_samples = 0;
        }
        nb_samples = FFMIN(insamples,
                           link->partial_buf_size - pbuf->nb_samples);
        av_samples_copy(pbuf->extended_data, frame->extended_data,
                        pbuf->nb_samples, inpos,
                        nb_samples, nb_channels, link->format);
        inpos                   += nb_samples;
        insamples               -= nb_samples;
        pbuf->nb_samples += nb_samples;
        if (pbuf->nb_samples >= link->min_samples) {
            ret = ff_filter_frame_framed(link, pbuf);
            pbuf = NULL;
        }
    }
    av_frame_free(&frame);
    link->partial_buf = pbuf;
    return ret;
}
Esempio n. 10
0
bool AudioDecoder::decodeNextFrame( Frame& frameBuffer )
{
	if( ! decodeNextFrame() )
		return false;

	AVCodecContext& avCodecContext = _inputStream->getAudioCodec().getAVCodecContext();

	size_t decodedSize = av_samples_get_buffer_size( NULL, avCodecContext.channels, _frame->nb_samples, avCodecContext.sample_fmt, 1 );
	if( decodedSize == 0 )
		return false;

	AudioFrame& audioBuffer = static_cast<AudioFrame&>( frameBuffer );
	audioBuffer.setNbSamples( _frame->nb_samples );
	audioBuffer.resize( decodedSize );

	// @todo manage cases with data of frame not only on data[0] (use _frame.linesize)
	unsigned char* const src = _frame->data[0];
	unsigned char* dst = audioBuffer.getData();

	av_samples_copy( &dst, &src, 0, 0, _frame->nb_samples, avCodecContext.channels, avCodecContext.sample_fmt );

	return true;
}
Esempio n. 11
0
File: buffer.c Progetto: 1c0n/xbmc
AVFilterBufferRef *ff_copy_buffer_ref(AVFilterLink *outlink,
                                      AVFilterBufferRef *ref)
{
    AVFilterBufferRef *buf;
    int channels;

    switch (outlink->type) {

    case AVMEDIA_TYPE_VIDEO:
        buf = ff_get_video_buffer(outlink, AV_PERM_WRITE,
                                  ref->video->w, ref->video->h);
        if(!buf)
            return NULL;
        av_image_copy(buf->data, buf->linesize,
                      (void*)ref->data, ref->linesize,
                      ref->format, ref->video->w, ref->video->h);
        break;

    case AVMEDIA_TYPE_AUDIO:
        buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
                                        ref->audio->nb_samples);
        if(!buf)
            return NULL;
        channels = ref->audio->channels;
        av_samples_copy(buf->extended_data, ref->buf->extended_data,
                        0, 0, ref->audio->nb_samples,
                        channels,
                        ref->format);
        break;

    default:
        return NULL;
    }
    avfilter_copy_buffer_ref_props(buf, ref);
    return buf;
}
Esempio n. 12
0
File: trim.c Progetto: AVLeo/libav
static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    TrimContext       *s = ctx->priv;
    int64_t start_sample, end_sample = frame->nb_samples;
    int64_t pts;
    int drop;

    /* drop everything if EOF has already been returned */
    if (s->eof) {
        av_frame_free(&frame);
        return 0;
    }

    if (frame->pts != AV_NOPTS_VALUE)
        pts = av_rescale_q(frame->pts, inlink->time_base,
                           (AVRational){ 1, inlink->sample_rate });
    else
        pts = s->next_pts;
    s->next_pts = pts + frame->nb_samples;

    /* check if at least a part of the frame is after the start time */
    if (s->start_sample < 0 && s->start_pts == AV_NOPTS_VALUE) {
        start_sample = 0;
    } else {
        drop = 1;
        start_sample = frame->nb_samples;

        if (s->start_sample >= 0 &&
            s->nb_samples + frame->nb_samples > s->start_sample) {
            drop         = 0;
            start_sample = FFMIN(start_sample, s->start_sample - s->nb_samples);
        }

        if (s->start_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
            pts + frame->nb_samples > s->start_pts) {
            drop = 0;
            start_sample = FFMIN(start_sample, s->start_pts - pts);
        }

        if (drop)
            goto drop;
    }

    if (s->first_pts == AV_NOPTS_VALUE)
        s->first_pts = pts + start_sample;

    /* check if at least a part of the frame is before the end time */
    if (s->end_sample == INT64_MAX && s->end_pts == AV_NOPTS_VALUE && !s->duration_tb) {
        end_sample = frame->nb_samples;
    } else {
        drop       = 1;
        end_sample = 0;

        if (s->end_sample != INT64_MAX &&
            s->nb_samples < s->end_sample) {
            drop       = 0;
            end_sample = FFMAX(end_sample, s->end_sample - s->nb_samples);
        }

        if (s->end_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
            pts < s->end_pts) {
            drop       = 0;
            end_sample = FFMAX(end_sample, s->end_pts - pts);
        }

        if (s->duration_tb && pts - s->first_pts < s->duration_tb) {
            drop       = 0;
            end_sample = FFMAX(end_sample, s->first_pts + s->duration_tb - pts);
        }

        if (drop) {
            s->eof = 1;
            goto drop;
        }
    }

    s->nb_samples += frame->nb_samples;
    start_sample   = FFMAX(0, start_sample);
    end_sample     = FFMIN(frame->nb_samples, end_sample);
    av_assert0(start_sample < end_sample);

    if (start_sample) {
        AVFrame *out = ff_get_audio_buffer(ctx->outputs[0], end_sample - start_sample);
        if (!out) {
            av_frame_free(&frame);
            return AVERROR(ENOMEM);
        }

        av_frame_copy_props(out, frame);
        av_samples_copy(out->extended_data, frame->extended_data, 0, start_sample,
                        out->nb_samples, av_get_channel_layout_nb_channels(frame->channel_layout),
                        frame->format);
        if (out->pts != AV_NOPTS_VALUE)
            out->pts += av_rescale_q(start_sample, (AVRational){ 1, out->sample_rate },
                                     inlink->time_base);

        av_frame_free(&frame);
        frame = out;
    } else
        frame->nb_samples = end_sample;

    s->got_output = 1;
    return ff_filter_frame(ctx->outputs[0], frame);

drop:
    s->nb_samples += frame->nb_samples;
    av_frame_free(&frame);
    return 0;
}
Esempio n. 13
0
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
{
    int (*filter_frame)(AVFilterLink *, AVFrame *);
    AVFilterContext *dstctx = link->dst;
    AVFilterPad *dst = link->dstpad;
    AVFrame *out = NULL;
    int ret;
    AVFilterCommand *cmd= link->dst->command_queue;
    int64_t pts;

    if (link->closed) {
        av_frame_free(&frame);
        return AVERROR_EOF;
    }

    if (!(filter_frame = dst->filter_frame))
        filter_frame = default_filter_frame;

    /* copy the frame if needed */
    if (dst->needs_writable && !av_frame_is_writable(frame)) {
        av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");

        switch (link->type) {
        case AVMEDIA_TYPE_VIDEO:
            out = ff_get_video_buffer(link, link->w, link->h);
            break;
        case AVMEDIA_TYPE_AUDIO:
            out = ff_get_audio_buffer(link, frame->nb_samples);
            break;
        default:
            ret = AVERROR(EINVAL);
            goto fail;
        }
        if (!out) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        ret = av_frame_copy_props(out, frame);
        if (ret < 0)
            goto fail;

        switch (link->type) {
        case AVMEDIA_TYPE_VIDEO:
            av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
                          frame->format, frame->width, frame->height);
            break;
        case AVMEDIA_TYPE_AUDIO:
            av_samples_copy(out->extended_data, frame->extended_data,
                            0, 0, frame->nb_samples,
                            av_get_channel_layout_nb_channels(frame->channel_layout),
                            frame->format);
            break;
        default:
            ret = AVERROR(EINVAL);
            goto fail;
        }

        av_frame_free(&frame);
    } else
        out = frame;

    while(cmd && cmd->time <= out->pts * av_q2d(link->time_base)){
        av_log(link->dst, AV_LOG_DEBUG,
               "Processing command time:%f command:%s arg:%s\n",
               cmd->time, cmd->command, cmd->arg);
        avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
        ff_command_queue_pop(link->dst);
        cmd= link->dst->command_queue;
    }

    pts = out->pts;
    if (dstctx->enable_str) {
        int64_t pos = av_frame_get_pkt_pos(out);
        dstctx->var_values[VAR_N] = link->frame_count;
        dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
        dstctx->var_values[VAR_W] = link->w;
        dstctx->var_values[VAR_H] = link->h;
        dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;

        dstctx->is_disabled = fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) < 0.5;
        if (dstctx->is_disabled &&
            (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
            filter_frame = default_filter_frame;
    }
    ret = filter_frame(link, out);
    link->frame_count++;
    link->frame_requested = 0;
    ff_update_link_current_pts(link, pts);
    return ret;

fail:
    av_frame_free(&out);
    av_frame_free(&frame);
    return ret;
}
Esempio n. 14
0
static int return_audio_frame(AVFilterContext *ctx)
{
    AVFilterLink *link = ctx->outputs[0];
    FifoContext *s = ctx->priv;
    AVFrame *head = s->root.next ? s->root.next->frame : NULL;
    AVFrame *out;
    int ret;

    /* if head is NULL then we're flushing the remaining samples in out */
    if (!head && !s->out)
        return AVERROR_EOF;

    if (!s->out &&
            head->nb_samples >= link->request_samples &&
            calc_ptr_alignment(head) >= 32) {
        if (head->nb_samples == link->request_samples) {
            out = head;
            queue_pop(s);
        } else {
            out = av_frame_clone(head);
            if (!out)
                return AVERROR(ENOMEM);

            out->nb_samples = link->request_samples;
            buffer_offset(link, head, link->request_samples);
        }
    } else {
        int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);

        if (!s->out) {
            s->out = ff_get_audio_buffer(link, link->request_samples);
            if (!s->out)
                return AVERROR(ENOMEM);

            s->out->nb_samples = 0;
            s->out->pts                   = head->pts;
            s->allocated_samples          = link->request_samples;
        } else if (link->request_samples != s->allocated_samples) {
            av_log(ctx, AV_LOG_ERROR, "request_samples changed before the "
                   "buffer was returned.\n");
            return AVERROR(EINVAL);
        }

        while (s->out->nb_samples < s->allocated_samples) {
            int len;

            if (!s->root.next) {
                ret = ff_request_frame(ctx->inputs[0]);
                if (ret == AVERROR_EOF) {
                    av_samples_set_silence(s->out->extended_data,
                                           s->out->nb_samples,
                                           s->allocated_samples -
                                           s->out->nb_samples,
                                           nb_channels, link->format);
                    s->out->nb_samples = s->allocated_samples;
                    break;
                } else if (ret < 0)
                    return ret;
                if (!s->root.next)
                    return 0;
            }
            head = s->root.next->frame;

            len = FFMIN(s->allocated_samples - s->out->nb_samples,
                        head->nb_samples);

            av_samples_copy(s->out->extended_data, head->extended_data,
                            s->out->nb_samples, 0, len, nb_channels,
                            link->format);
            s->out->nb_samples += len;

            if (len == head->nb_samples) {
                av_frame_free(&head);
                queue_pop(s);
            } else {
                buffer_offset(link, head, len);
            }
        }
        out = s->out;
        s->out = NULL;
    }
    return ff_filter_frame(link, out);
}
Esempio n. 15
0
static int return_audio_frame(AVFilterContext *ctx)
{
    AVFilterLink *link = ctx->outputs[0];
    FifoContext *s = ctx->priv;
    AVFilterBufferRef *head = s->root.next->buf;
    AVFilterBufferRef *buf_out;
    int ret;

    if (!s->buf_out &&
        head->audio->nb_samples >= link->request_samples &&
        calc_ptr_alignment(head) >= 32) {
        if (head->audio->nb_samples == link->request_samples) {
            buf_out = head;
            queue_pop(s);
        } else {
            buf_out = avfilter_ref_buffer(head, AV_PERM_READ);
            if (!buf_out)
                return AVERROR(ENOMEM);

            buf_out->audio->nb_samples = link->request_samples;
            buffer_offset(link, head, link->request_samples);
        }
    } else {
        int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);

        if (!s->buf_out) {
            s->buf_out = ff_get_audio_buffer(link, AV_PERM_WRITE,
                                             link->request_samples);
            if (!s->buf_out)
                return AVERROR(ENOMEM);

            s->buf_out->audio->nb_samples = 0;
            s->buf_out->pts               = head->pts;
            s->allocated_samples          = link->request_samples;
        } else if (link->request_samples != s->allocated_samples) {
            av_log(ctx, AV_LOG_ERROR, "request_samples changed before the "
                   "buffer was returned.\n");
            return AVERROR(EINVAL);
        }

        while (s->buf_out->audio->nb_samples < s->allocated_samples) {
            int len = FFMIN(s->allocated_samples - s->buf_out->audio->nb_samples,
                            head->audio->nb_samples);

            av_samples_copy(s->buf_out->extended_data, head->extended_data,
                            s->buf_out->audio->nb_samples, 0, len, nb_channels,
                            link->format);
            s->buf_out->audio->nb_samples += len;

            if (len == head->audio->nb_samples) {
                avfilter_unref_buffer(head);
                queue_pop(s);

                if (!s->root.next &&
                    (ret = ff_request_frame(ctx->inputs[0])) < 0) {
                    if (ret == AVERROR_EOF) {
                        av_samples_set_silence(s->buf_out->extended_data,
                                               s->buf_out->audio->nb_samples,
                                               s->allocated_samples -
                                               s->buf_out->audio->nb_samples,
                                               nb_channels, link->format);
                        s->buf_out->audio->nb_samples = s->allocated_samples;
                        break;
                    }
                    return ret;
                }
                head = s->root.next->buf;
            } else {
                buffer_offset(link, head, len);
            }
        }
        buf_out = s->buf_out;
        s->buf_out = NULL;
    }
    return ff_filter_samples(link, buf_out);
}
Esempio n. 16
0
static int decode_packet(int *got_frame, int cached)
{
    int ret = 0;

    if (pkt.stream_index == video_stream_idx) {
        /* decode video frame */
        ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error decoding video frame\n");
            return ret;
        }

        if (*got_frame) {
            printf("video_frame%s n:%d coded_n:%d pts:%s\n",
                   cached ? "(cached)" : "",
                   video_frame_count++, frame->coded_picture_number,
                   av_ts2timestr(frame->pts, &video_dec_ctx->time_base));

            /* copy decoded frame to destination buffer:
             * this is required since rawvideo expects non aligned data */
            av_image_copy(video_dst_data, video_dst_linesize,
                          (const uint8_t **)(frame->data), frame->linesize,
                          video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);

            /* write to rawvideo file */
            fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
        }
    } else if (pkt.stream_index == audio_stream_idx) {
        /* decode audio frame */
        ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error decoding audio frame\n");
            return ret;
        }

        if (*got_frame) {
            printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
                   cached ? "(cached)" : "",
                   audio_frame_count++, frame->nb_samples,
                   av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));

            ret = av_samples_alloc(audio_dst_data, &audio_dst_linesize, frame->channels,
                                   frame->nb_samples, frame->format, 1);
            if (ret < 0) {
                fprintf(stderr, "Could not allocate audio buffer\n");
                return AVERROR(ENOMEM);
            }

            /* TODO: extend return code of the av_samples_* functions so that this call is not needed */
            audio_dst_bufsize =
                av_samples_get_buffer_size(NULL, frame->channels,
                                           frame->nb_samples, frame->format, 1);

            /* copy audio data to destination buffer:
             * this is required since rawaudio expects non aligned data */
            av_samples_copy(audio_dst_data, frame->data, 0, 0,
                            frame->nb_samples, frame->channels, frame->format);

            /* write to rawaudio file */
            fwrite(audio_dst_data[0], 1, audio_dst_bufsize, audio_dst_file);
            av_freep(&audio_dst_data[0]);
        }
    }

    return ret;
}
Esempio n. 17
0
/**
 * Decode a frame to a packet, run the result through SwrContext, if desired, encode it via an appropriate
 * encoder, and write the results to the Java-side native buffer.
 *
 * @param aio       FFAudio context
 * @param cached    true or false
 * @return number of bytes placed into java buffer or a negative value, if something went wrong
 */
static int decode_packet(FFAudioIO *aio, int cached) {
    int res = 0;
    uint8_t **resample_buf = NULL;
    jobject byte_buffer = NULL;
    uint8_t *javaBuffer = NULL;
    uint32_t out_buf_size = 0;
    int out_buf_samples = 0;
    int64_t out_channel_count;
    int64_t out_sample_rate;
    int flush = aio->got_frame;
    enum AVSampleFormat out;
    int bytesConsumed = 0;

    init_ids(aio->env, aio->java_instance);

    av_opt_get_int(aio->swr_context, "out_channel_count", 0, &out_channel_count);
    av_opt_get_int(aio->swr_context, "out_sample_rate", 0, &out_sample_rate);
    av_opt_get_sample_fmt(aio->swr_context, "out_sample_fmt", 0, &out);

    resample_buf = av_mallocz(sizeof(uint8_t *) * 1); // one plane!

    // make sure we really have an audio packet
    if (aio->decode_packet.stream_index == aio->stream_index) {
        // decode frame
        // got_frame indicates whether we got a frame
        bytesConsumed = avcodec_decode_audio4(aio->decode_context, aio->decode_frame, &aio->got_frame, &aio->decode_packet);
        if (bytesConsumed < 0) {
            throwUnsupportedAudioFileExceptionIfError(aio->env, bytesConsumed, "Failed to decode audio frame.");
            return bytesConsumed;
        }

        if (aio->got_frame) {

            aio->decoded_samples += aio->decode_frame->nb_samples;
            out_buf_samples = aio->decode_frame->nb_samples;
#ifdef DEBUG
            fprintf(stderr, "samples%s n:%" PRIu64 " nb_samples:%d pts:%s\n",
                   cached ? "(cached)" : "",
                   aio->decoded_samples, aio->decode_frame->nb_samples,
                   av_ts2timestr(aio->decode_frame->pts, &aio->decode_context->time_base));
#endif

            // adjust out sample number for a different sample rate
            // this is an estimate!!
            out_buf_samples = av_rescale_rnd(
                    swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate) + aio->decode_frame->nb_samples,
                    out_sample_rate,
                    aio->stream->codecpar->sample_rate,
                    AV_ROUND_UP
            );

            // allocate new aio->audio_data buffers
            res = av_samples_alloc(aio->audio_data, NULL, av_frame_get_channels(aio->decode_frame),
                                   aio->decode_frame->nb_samples, aio->decode_frame->format, 1);
            if (res < 0) {
                throwIOExceptionIfError(aio->env, res, "Could not allocate audio buffer.");
                return AVERROR(ENOMEM);
            }
            // copy audio data to aio->audio_data
            av_samples_copy(aio->audio_data, aio->decode_frame->data, 0, 0,
                            aio->decode_frame->nb_samples, av_frame_get_channels(aio->decode_frame), aio->decode_frame->format);

            res = resample(aio, resample_buf, out_buf_samples, (const uint8_t **)aio->audio_data, aio->decode_frame->nb_samples);
            if (res < 0) goto bail;
            else out_buf_samples = res;

        } else if (flush && swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate)) {

            res = resample(aio, resample_buf, swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate), NULL, 0);
            if (res < 0) goto bail;
            else out_buf_samples = res;
        } else {
#ifdef DEBUG
            fprintf(stderr, "Got no frame.\n");
#endif
        }

        if (out_buf_samples > 0) {

            res =  av_samples_get_buffer_size(NULL, (int)out_channel_count, out_buf_samples, out, 1);
            if (res < 0) goto bail;
            else out_buf_size = res;

            // ensure native buffer capacity
            if (aio->java_buffer_capacity < out_buf_size) {
                aio->java_buffer_capacity = (*aio->env)->CallIntMethod(aio->env, aio->java_instance, setNativeBufferCapacity_MID, (jint)out_buf_size);
            }
            // get java-managed byte buffer reference
            byte_buffer = (*aio->env)->GetObjectField(aio->env, aio->java_instance, nativeBuffer_FID);
            if (!byte_buffer) {
                res = -1;
                throwIOExceptionIfError(aio->env, 1, "Failed to get native buffer.");
                goto bail;
            }

            // we have some samples, let's copy them to the java buffer, using the desired encoding
            javaBuffer = (uint8_t *)(*aio->env)->GetDirectBufferAddress(aio->env, byte_buffer);
            if (!javaBuffer) {
                throwIOExceptionIfError(aio->env, 1, "Failed to get address for native buffer.");
                goto bail;
            }
            if (aio->encode_context) {
                aio->encode_frame->nb_samples = out_buf_samples;
                res = encode_buffer(aio, resample_buf[0], out_buf_size, javaBuffer);
                if (res < 0) {
                    out_buf_size = 0;
                    goto bail;
                }
                out_buf_size = res;
            } else {
                memcpy(javaBuffer, resample_buf[0], out_buf_size);
            }
            // we already wrote to the buffer, now we still need to
            // set new bytebuffer limit and position to 0.
            (*aio->env)->CallObjectMethod(aio->env, byte_buffer, rewind_MID);
            (*aio->env)->CallObjectMethod(aio->env, byte_buffer, limit_MID, out_buf_size);
        }
    }

    aio->resampled_samples += out_buf_size;

bail:

    if (resample_buf) {
        if (resample_buf[0]) av_freep(&resample_buf[0]);
        av_free(resample_buf);
    }
    if (aio->audio_data[0]) av_freep(&aio->audio_data[0]);

    return res;
}
Esempio n. 18
0
int COMXAudioCodecOMX::GetData(BYTE** dst, double &dts, double &pts)
{
  if (!m_bGotFrame)
    return 0;
  int inLineSize, outLineSize;
  /* input audio is aligned */
  int inputSize = av_samples_get_buffer_size(&inLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_pCodecContext->sample_fmt, 0);
  /* output audio will be packed */
  int outputSize = av_samples_get_buffer_size(&outLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1);

  if (m_iBufferOutputAlloced < m_iBufferOutputUsed + outputSize)
  {
     m_pBufferOutput = (BYTE*)av_realloc(m_pBufferOutput, m_iBufferOutputUsed + outputSize + FF_INPUT_BUFFER_PADDING_SIZE);
     m_iBufferOutputAlloced = m_iBufferOutputUsed + outputSize;
  }
  *dst = m_pBufferOutput;

  /* need to convert format */
  if(m_pCodecContext->sample_fmt != m_desiredSampleFormat)
  {
    if(m_pConvert && (m_pCodecContext->sample_fmt != m_iSampleFormat || m_channels != m_pCodecContext->channels))
    {
      swr_free(&m_pConvert);
      m_channels = m_pCodecContext->channels;
    }

    if(!m_pConvert)
    {
      m_iSampleFormat = m_pCodecContext->sample_fmt;
      m_pConvert = swr_alloc_set_opts(NULL,
                      av_get_default_channel_layout(m_pCodecContext->channels), 
                      m_desiredSampleFormat, m_pCodecContext->sample_rate,
                      av_get_default_channel_layout(m_pCodecContext->channels), 
                      m_pCodecContext->sample_fmt, m_pCodecContext->sample_rate,
                      0, NULL);

      if(!m_pConvert || swr_init(m_pConvert) < 0)
      {
        CLog::Log(LOGERROR, "COMXAudioCodecOMX::Decode - Unable to initialise convert format %d to %d", m_pCodecContext->sample_fmt, m_desiredSampleFormat);
        return 0;
      }
    }

    /* use unaligned flag to keep output packed */
    uint8_t *out_planes[m_pCodecContext->channels];
    if(av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput + m_iBufferOutputUsed, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 ||
       swr_convert(m_pConvert, out_planes, m_pFrame1->nb_samples, (const uint8_t **)m_pFrame1->data, m_pFrame1->nb_samples) < 0)
    {
      CLog::Log(LOGERROR, "COMXAudioCodecOMX::Decode - Unable to convert format %d to %d", (int)m_pCodecContext->sample_fmt, m_desiredSampleFormat);
      outputSize = 0;
    }
  }
  else
  {
    /* copy to a contiguous buffer */
    uint8_t *out_planes[m_pCodecContext->channels];
    if (av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput + m_iBufferOutputUsed, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 ||
      av_samples_copy(out_planes, m_pFrame1->data, 0, 0, m_pFrame1->nb_samples, m_pCodecContext->channels, m_desiredSampleFormat) < 0 )
    {
      outputSize = 0;
    }
  }
  int desired_size = AUDIO_DECODE_OUTPUT_BUFFER * (m_pCodecContext->channels * GetBitsPerSample()) >> (rounded_up_channels_shift[m_pCodecContext->channels] + 4);

  if (m_bFirstFrame)
  {
    CLog::Log(LOGDEBUG, "COMXAudioCodecOMX::GetData size=%d/%d line=%d/%d buf=%p, desired=%d", inputSize, outputSize, inLineSize, outLineSize, *dst, desired_size);
    m_bFirstFrame = false;
  }
  m_iBufferOutputUsed += outputSize;

  if (!m_bNoConcatenate && m_pCodecContext->sample_fmt == AV_SAMPLE_FMT_FLTP && m_frameSize && (int)m_frameSize != outputSize)
    CLog::Log(LOGERROR, "COMXAudioCodecOMX::GetData Unexpected change of size (%d->%d)", m_frameSize, outputSize);
  m_frameSize = outputSize;

  // if next buffer submitted won't fit then flush it out
  if (m_iBufferOutputUsed + outputSize > desired_size || m_bNoConcatenate)
  {
     int ret = m_iBufferOutputUsed;
     m_bGotFrame = false;
     m_iBufferOutputUsed = 0;
     dts = m_dts;
     pts = m_pts;
     return ret;
  }
  return 0;
}
Esempio n. 19
0
	/*
	 * 将praw重新分块到列表中
	 */
	static void rebuffer_sample(AVRaw *praw, AVRaw **head, AVRaw **tail, AVSampleFormat fmt, int channel, int samples)
	{
		AVRaw * pnew;
		int ds, ss;

		if (praw)
		{
			/* 如果表还是空的就插入一个新的 */
			if (!(*tail))
			{
				pnew = make_audio_raw(fmt, channel, samples);
				if (pnew)
					list_push_raw(head, tail, pnew);
				else
					return;
			}

			/* 如果最后一个是满的,就需要在分配一个 */
			if ((*tail)->samples == (*tail)->seek_sample)
			{
				pnew = make_audio_raw(fmt, channel, samples);
				if (pnew)
					list_push_raw(head, tail, pnew);
				else
					return;
			}
			else
			{
				pnew = *tail;
			}

			int offset = 0;
			while (offset < praw->samples)
			{
				/* ds目的空间,ss剩余未填充数据 */
				ds = pnew->samples - pnew->seek_sample;
				ss = praw->samples - offset;
				if (ds >= ss)
				{
					/* 空间足够容纳praw */
					av_samples_copy(pnew->data, praw->data, pnew->seek_sample, offset, ss, channel, fmt);
					pnew->seek_sample += ss;
					return;
				}
				else
				{
					av_samples_copy(pnew->data, praw->data, pnew->seek_sample, offset, ds, channel, fmt);
					offset += ds;
					pnew->seek_sample += ds;
					if (pnew->seek_sample == pnew->samples)
					{ /* 如果还有多余的数据 */
						pnew = make_audio_raw(fmt, channel, samples);
						if (pnew)
							list_push_raw(head, tail, pnew);
						else
							return;
					}
				}
			}
		}
	}
Esempio n. 20
0
int COMXAudioCodecOMX::GetData(BYTE** dst)
{
  if (!m_bGotFrame)
    return 0;
  int inLineSize, outLineSize;
  /* input audio is aligned */
  int inputSize = av_samples_get_buffer_size(&inLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_pCodecContext->sample_fmt, 0);
  /* output audio will be packed */
  int outputSize = av_samples_get_buffer_size(&outLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1);
  bool cont = !m_pFrame1->data[1] || (m_pFrame1->data[1] == m_pFrame1->data[0] + inLineSize && inLineSize == outLineSize && inLineSize * m_pCodecContext->channels == inputSize);

  if (m_iBufferOutputAlloced < outputSize)
  {
     av_free(m_pBufferOutput);
     m_pBufferOutput = (BYTE*)av_malloc(outputSize + FF_INPUT_BUFFER_PADDING_SIZE);
     m_iBufferOutputAlloced = outputSize;
  }
  *dst = m_pBufferOutput;

  /* need to convert format */
  if(m_pCodecContext->sample_fmt != m_desiredSampleFormat)
  {
    if(m_pConvert && (m_pCodecContext->sample_fmt != m_iSampleFormat || m_channels != m_pCodecContext->channels))
    {
      swr_free(&m_pConvert);
      m_channels = m_pCodecContext->channels;
    }

    if(!m_pConvert)
    {
      m_iSampleFormat = m_pCodecContext->sample_fmt;
      m_pConvert = swr_alloc_set_opts(NULL,
                      av_get_default_channel_layout(m_pCodecContext->channels), 
                      m_desiredSampleFormat, m_pCodecContext->sample_rate,
                      av_get_default_channel_layout(m_pCodecContext->channels), 
                      m_pCodecContext->sample_fmt, m_pCodecContext->sample_rate,
                      0, NULL);

      if(!m_pConvert || swr_init(m_pConvert) < 0)
      {
        CLog::Log(LOGERROR, "COMXAudioCodecOMX::Decode - Unable to initialise convert format %d to %d", m_pCodecContext->sample_fmt, m_desiredSampleFormat);
        return 0;
      }
    }

    /* use unaligned flag to keep output packed */
    uint8_t *out_planes[m_pCodecContext->channels];
    if(av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 ||
       swr_convert(m_pConvert, out_planes, m_pFrame1->nb_samples, (const uint8_t **)m_pFrame1->data, m_pFrame1->nb_samples) < 0)
    {
      CLog::Log(LOGERROR, "COMXAudioCodecOMX::Decode - Unable to convert format %d to %d", (int)m_pCodecContext->sample_fmt, m_desiredSampleFormat);
      outputSize = 0;
    }
  }
  else
  {
    /* if it is already contiguous, just return decoded frame */
    if (cont)
    {
      *dst = m_pFrame1->data[0];
    }
    else
    {
      /* copy to a contiguous buffer */
      uint8_t *out_planes[m_pCodecContext->channels];
      if (av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 ||
        av_samples_copy(out_planes, m_pFrame1->data, 0, 0, m_pFrame1->nb_samples, m_pCodecContext->channels, m_desiredSampleFormat) < 0 )
      {
        outputSize = 0;
      }
    }
  }

  if (m_bFirstFrame)
  {
    CLog::Log(LOGDEBUG, "COMXAudioCodecOMX::GetData size=%d/%d line=%d/%d cont=%d buf=%p", inputSize, outputSize, inLineSize, outLineSize, cont, *dst);
    m_bFirstFrame = false;
  }
  return outputSize;
}
Esempio n. 21
0
int av_frame_ref(AVFrame *dst, const AVFrame *src)
{
    int i, ret = 0;

    dst->format         = src->format;
    dst->width          = src->width;
    dst->height         = src->height;
    dst->channels       = src->channels;
    dst->channel_layout = src->channel_layout;
    dst->nb_samples     = src->nb_samples;

    ret = av_frame_copy_props(dst, src);
    if (ret < 0)
        return ret;

    /* duplicate the frame data if it's not refcounted */
    if (!src->buf[0]) {
        ret = av_frame_get_buffer(dst, 32);
        if (ret < 0)
            return ret;

        if (src->nb_samples) {
            int ch = src->channels;
            CHECK_CHANNELS_CONSISTENCY(src);
            av_samples_copy(dst->extended_data, src->extended_data, 0, 0,
                            dst->nb_samples, ch, dst->format);
        } else {
            av_image_copy(dst->data, dst->linesize, src->data, src->linesize,
                          dst->format, dst->width, dst->height);
        }
        return 0;
    }

    /* ref the buffers */
    for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
        if (!src->buf[i])
            continue;
        dst->buf[i] = av_buffer_ref(src->buf[i]);
        if (!dst->buf[i]) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
    }

    if (src->extended_buf) {
        dst->extended_buf = av_mallocz(sizeof(*dst->extended_buf) *
                                       src->nb_extended_buf);
        if (!dst->extended_buf) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        dst->nb_extended_buf = src->nb_extended_buf;

        for (i = 0; i < src->nb_extended_buf; i++) {
            dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
            if (!dst->extended_buf[i]) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
        }
    }

    /* duplicate extended data */
    if (src->extended_data != src->data) {
        int ch = src->channels;

        if (!ch) {
            ret = AVERROR(EINVAL);
            goto fail;
        }
        CHECK_CHANNELS_CONSISTENCY(src);

        dst->extended_data = av_malloc(sizeof(*dst->extended_data) * ch);
        if (!dst->extended_data) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
    } else
        dst->extended_data = dst->data;

    memcpy(dst->data,     src->data,     sizeof(src->data));
    memcpy(dst->linesize, src->linesize, sizeof(src->linesize));

    return 0;

fail:
    av_frame_unref(dst);
    return ret;
}
Esempio n. 22
0
int COMXAudioCodecOMX::GetData(BYTE** dst, double &dts, double &pts)
{
  if (!m_bGotFrame)
    return 0;
  int inLineSize, outLineSize;
  /* input audio is aligned */
  int inputSize = av_samples_get_buffer_size(&inLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_pCodecContext->sample_fmt, 0);
  /* output audio will be packed */
  int outputSize = av_samples_get_buffer_size(&outLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1);

  if (!m_bNoConcatenate && m_iBufferOutputUsed && (int)m_frameSize != outputSize)
  {
    LOG_TRACE_2 << "COMXAudioCodecOMX::GetData Unexpected change of size (" << m_frameSize <<" ->"
        << outputSize << ")";
    m_bNoConcatenate = true;
  }

  // if this buffer won't fit then flush out what we have
  int desired_size = AUDIO_DECODE_OUTPUT_BUFFER * (m_pCodecContext->channels * GetBitsPerSample()) >> (rounded_up_channels_shift[m_pCodecContext->channels] + 4);
  if (m_iBufferOutputUsed && (m_iBufferOutputUsed + outputSize > desired_size || m_bNoConcatenate))
  {
     int ret = m_iBufferOutputUsed;
     m_iBufferOutputUsed = 0;
     m_bNoConcatenate = false;
     dts = m_dts;
     pts = m_pts;
     *dst = m_pBufferOutput;
     return ret;
  }
  m_frameSize = outputSize;

  if (m_iBufferOutputAlloced < m_iBufferOutputUsed + outputSize)
  {
     m_pBufferOutput = (BYTE*)av_realloc(m_pBufferOutput, m_iBufferOutputUsed + outputSize + FF_INPUT_BUFFER_PADDING_SIZE);
     m_iBufferOutputAlloced = m_iBufferOutputUsed + outputSize;
  }

  /* need to convert format */
  if(m_pCodecContext->sample_fmt != m_desiredSampleFormat)
  {
    if(m_pConvert && (m_pCodecContext->sample_fmt != m_iSampleFormat || m_channels != m_pCodecContext->channels))
    {
      swr_free(&m_pConvert);
      m_channels = m_pCodecContext->channels;
    }

    if(!m_pConvert)
    {
      m_iSampleFormat = m_pCodecContext->sample_fmt;
      m_pConvert = swr_alloc_set_opts(NULL,
                      av_get_default_channel_layout(m_pCodecContext->channels),
                      m_desiredSampleFormat, m_pCodecContext->sample_rate,
                      av_get_default_channel_layout(m_pCodecContext->channels),
                      m_pCodecContext->sample_fmt, m_pCodecContext->sample_rate,
                      0, NULL);

      if(!m_pConvert || swr_init(m_pConvert) < 0)
      {
        LOG_TRACE_2 << "COMXAudioCodecOMX::Decode - Unable to initialise convert format "
            << m_pCodecContext->sample_fmt << " to " << m_desiredSampleFormat;
        return 0;
      }
    }

    /* use unaligned flag to keep output packed */
    uint8_t *out_planes[m_pCodecContext->channels];
    if(av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput + m_iBufferOutputUsed, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 ||
       swr_convert(m_pConvert, out_planes, m_pFrame1->nb_samples, (const uint8_t **)m_pFrame1->data, m_pFrame1->nb_samples) < 0)
    {
      LOG_TRACE_2 << "COMXAudioCodecOMX::Decode - Unable to convert format " <<
        (int)m_pCodecContext->sample_fmt << " to " << m_desiredSampleFormat;
      outputSize = 0;
    }
  }
  else
  {
    /* copy to a contiguous buffer */
    uint8_t *out_planes[m_pCodecContext->channels];
    if (av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput + m_iBufferOutputUsed, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 ||
      av_samples_copy(out_planes, m_pFrame1->data, 0, 0, m_pFrame1->nb_samples, m_pCodecContext->channels, m_desiredSampleFormat) < 0 )
    {
      outputSize = 0;
    }
  }
  m_bGotFrame = false;

  if (m_bFirstFrame)
  {
    char log_buf[512];
    sprintf(log_buf, "COMXAudioCodecOMX::GetData size=%d/%d line=%d/%d buf=%p, desired=%d",
            inputSize, outputSize, inLineSize, outLineSize, m_pBufferOutput, desired_size);
    LOG_TRACE_2 << log_buf;
    m_bFirstFrame = false;
  }
  m_iBufferOutputUsed += outputSize;
  return 0;
}
Esempio n. 23
0
static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
                        AVFrame **rframe)
{
    AVFrame *frame0, *frame, *buf;
    unsigned nb_samples, nb_frames, i, p;
    int ret;

    /* Note: this function relies on no format changes and must only be
       called with enough samples. */
    av_assert1(samples_ready(link));
    frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
    if (frame->nb_samples >= min && frame->nb_samples < max) {
        *rframe = ff_framequeue_take(&link->fifo);
        return 0;
    }
    nb_frames = 0;
    nb_samples = 0;
    while (1) {
        if (nb_samples + frame->nb_samples > max) {
            if (nb_samples < min)
                nb_samples = max;
            break;
        }
        nb_samples += frame->nb_samples;
        nb_frames++;
        if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
            break;
        frame = ff_framequeue_peek(&link->fifo, nb_frames);
    }

    buf = ff_get_audio_buffer(link, nb_samples);
    if (!buf)
        return AVERROR(ENOMEM);
    ret = av_frame_copy_props(buf, frame0);
    if (ret < 0) {
        av_frame_free(&buf);
        return ret;
    }
    buf->pts = frame0->pts;

    p = 0;
    for (i = 0; i < nb_frames; i++) {
        frame = ff_framequeue_take(&link->fifo);
        av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
                        frame->nb_samples, link->channels, link->format);
        p += frame->nb_samples;
        av_frame_free(&frame);
    }
    if (p < nb_samples) {
        unsigned n = nb_samples - p;
        frame = ff_framequeue_peek(&link->fifo, 0);
        av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
                        link->channels, link->format);
        frame->nb_samples -= n;
        av_samples_copy(frame->extended_data, frame->extended_data, 0, n,
                        frame->nb_samples, link->channels, link->format);
        if (frame->pts != AV_NOPTS_VALUE)
            frame->pts += av_rescale_q(n, av_make_q(1, link->sample_rate), link->time_base);
        ff_framequeue_update_peeked(&link->fifo, 0);
        ff_framequeue_skip_samples(&link->fifo, n);
    }

    *rframe = buf;
    return 0;
}