Beispiel #1
0
/*
 * encode one audio frame and send it to the muxer
 * return 1 when encoding is finished, 0 otherwise
 */
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
{
    AVCodecContext *c;
    AVPacket pkt = { 0 }; // data and size must be 0;
    AVFrame *frame;
    int ret;
    int got_packet;
    int dst_nb_samples;

    av_init_packet(&pkt);
    c = ost->enc;

    frame = get_audio_frame(ost);

    if (frame) {
        /* convert samples from native format to destination codec format, using the resampler */
            /* compute destination number of samples */
            dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
                                            c->sample_rate, c->sample_rate, AV_ROUND_UP);
            av_assert0(dst_nb_samples == frame->nb_samples);

        /* when we pass a frame to the encoder, it may keep a reference to it
         * internally;
         * make sure we do not overwrite it here
         */
        ret = av_frame_make_writable(ost->frame);
        if (ret < 0)
            exit(1);

            /* convert to destination format */
            ret = swr_convert(ost->swr_ctx,
                              ost->frame->data, dst_nb_samples,
                              (const uint8_t **)frame->data, frame->nb_samples);
            if (ret < 0) {
                fprintf(stderr, "Error while converting\n");
                exit(1);
            }
            frame = ost->frame;

        frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
        ost->samples_count += dst_nb_samples;
    }

    ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
    if (ret < 0) {
        fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
        exit(1);
    }

    if (got_packet) {
        ret = write_frame(oc, &c->time_base, ost->st, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error while writing audio frame: %s\n",
                    av_err2str(ret));
            exit(1);
        }
    }

    return (frame || got_packet) ? 0 : 1;
}
Beispiel #2
0
Datei: pcm.c Projekt: AVLeo/libav
int ff_pcm_read_seek(AVFormatContext *s,
                     int stream_index, int64_t timestamp, int flags)
{
    AVStream *st;
    int block_align, byte_rate;
    int64_t pos, ret;

    st = s->streams[0];

    block_align = st->codecpar->block_align ? st->codecpar->block_align :
        (av_get_bits_per_sample(st->codecpar->codec_id) * st->codecpar->channels) >> 3;
    byte_rate = st->codecpar->bit_rate ? st->codecpar->bit_rate >> 3 :
        block_align * st->codecpar->sample_rate;

    if (block_align <= 0 || byte_rate <= 0)
        return -1;
    if (timestamp < 0) timestamp = 0;

    /* compute the position by aligning it to block_align */
    pos = av_rescale_rnd(timestamp * byte_rate,
                         st->time_base.num,
                         st->time_base.den * (int64_t)block_align,
                         (flags & AVSEEK_FLAG_BACKWARD) ? AV_ROUND_DOWN : AV_ROUND_UP);
    pos *= block_align;

    /* recompute exact position */
    st->cur_dts = av_rescale(pos, st->time_base.den, byte_rate * (int64_t)st->time_base.num);
    if ((ret = avio_seek(s->pb, pos + s->internal->data_offset, SEEK_SET)) < 0)
        return ret;
    return 0;
}
Beispiel #3
0
static struct mp_audio *play(struct af_instance *af, struct mp_audio *data)
{
    struct af_resample *s = af->priv;
    struct mp_audio *in   = data;
    struct mp_audio *out  = af->data;


    int in_size     = data->len;
    int in_samples  = in_size / (data->bps * data->nch);
    int out_samples = avresample_available(s->avrctx) +
        av_rescale_rnd(get_delay(s) + in_samples,
                       s->ctx.out_rate, s->ctx.in_rate, AV_ROUND_UP);
    int out_size    = out->bps * out_samples * out->nch;

    if (talloc_get_size(out->audio) < out_size)
        out->audio = talloc_realloc_size(out, out->audio, out_size);

    af->delay = out->bps * av_rescale_rnd(get_delay(s),
                                          s->ctx.out_rate, s->ctx.in_rate,
                                          AV_ROUND_UP);

#if !USE_SET_CHANNEL_MAPPING
    reorder_channels(data->audio, s->reorder_in, data->bps, data->nch, in_samples);
#endif

    out_samples = avresample_convert(s->avrctx,
            (uint8_t **) &out->audio, out_size, out_samples,
            (uint8_t **) &in->audio,  in_size,  in_samples);

    *data = *out;

#if USE_SET_CHANNEL_MAPPING
    if (needs_reorder(s->reorder_out, out->nch)) {
        if (talloc_get_size(s->reorder_buffer) < out_size)
            s->reorder_buffer = talloc_realloc_size(s, s->reorder_buffer, out_size);
        data->audio = s->reorder_buffer;
        out_samples = avresample_convert(s->avrctx_out,
                (uint8_t **) &data->audio, out_size, out_samples,
                (uint8_t **) &out->audio, out_size, out_samples);
    }
#else
    reorder_channels(data->audio, s->reorder_out, out->bps, out->nch, out_samples);
#endif

    data->len = out->bps * out_samples * out->nch;
    return data;
}
Beispiel #4
0
static int get_out_samples(struct af_resample *s, int in_samples)
{
#if LIBSWRESAMPLE_VERSION_MAJOR > 1 || LIBSWRESAMPLE_VERSION_MINOR >= 2
    return swr_get_out_samples(s->avrctx, in_samples);
#else
    return av_rescale_rnd(in_samples, s->ctx.out_rate, s->ctx.in_rate, AV_ROUND_UP)
           + swr_get_delay(s->avrctx, s->ctx.out_rate);
#endif
}
Beispiel #5
0
boost::int64_t AudioResampleImpl::resultSamples(boost::int64_t sourceSamples) const
{
	if (!m_resampler)
		return 0;

    return av_rescale_rnd(
                swr_get_delay(m_resampler.get(), m_from.sampleRate()) + sourceSamples,
                m_to.sampleRate(), m_from.sampleRate(), AV_ROUND_UP);
}
Beispiel #6
0
AudioData AudioResampleImpl::process(AudioComponentPtr source, int desiredSamples) const
{
	AudioData result(m_to);

	if (!m_resampler) return result;

	if (source->format() != m_from)
	{
		assert(!"AudioResampler::process: incompatible source format");
		return result;
	}

	int lastReadSamples = 0;
	int resultSamplesPerChannelFact = 0;
	do
	{
        AudioData sourceData = source->read(
                    av_rescale_rnd(desiredSamples,
                                   m_from.sampleRate(),
                                   m_to.sampleRate(),
                                   AV_ROUND_DOWN
                                   ));

		if (sourceData.isEmpty())
			break;

		lastReadSamples = sourceData.numSamples();

        const auto sourceSamplesPerChannel = sourceData.numSamples();
		const auto resultSamplesPerChannel = resultSamples(sourceSamplesPerChannel);
        result.setSamples(resultSamplesPerChannel);

		boost::scoped_array<char*> preparedSourceData;
		splitAudioData(sourceData, preparedSourceData);

		boost::scoped_array<char*> preparedDestData;
		splitAudioData(result, preparedDestData);

		const uint8_t ** sourcePtr = (const uint8_t **)preparedSourceData.get();
		uint8_t ** destPtr = (uint8_t**)preparedDestData.get();

		resultSamplesPerChannelFact = swr_convert(m_resampler.get(), destPtr, static_cast<int>(resultSamplesPerChannel),
			sourcePtr, sourceSamplesPerChannel );

		if (!resultSamplesPerChannelFact)
		{
			qDebug() << "AudioResampler::process: not enought data to process. ("
				<< m_from.sampleRate() << "," << m_to.sampleRate() << ").\t source samples" << sourceSamplesPerChannel;
		}

	} while(!resultSamplesPerChannelFact);

    result.setSamples(resultSamplesPerChannelFact);

	return result;
}
Beispiel #7
0
    int MpegLoader::readMore(QByteArray &result, qint64 &samplesAdded) 
    {
        int res;
        if ((res = av_read_frame(FmtContext_, &Avpkt_)) < 0)
            return -1;

        if (Avpkt_.stream_index == StreamId_) 
        {
            av_frame_unref(Frame_);
            int got_frame = 0;
            if ((res = avcodec_decode_audio4(CodecContext_, Frame_, &got_frame, &Avpkt_)) < 0) 
            {
                av_packet_unref(&Avpkt_);
                if (res == AVERROR_INVALIDDATA) 
                    return 0;
                return -1;
            }

            if (got_frame) 
            {
                if (OutSamplesData_) 
                {
                    int64_t dstSamples = av_rescale_rnd(swr_get_delay(SwrContext_, SrcRate_) + Frame_->nb_samples, DstRate_, SrcRate_, AV_ROUND_UP);
                    if (dstSamples > MaxResampleSamples_) 
                    {
                        MaxResampleSamples_ = dstSamples;
                        av_free(OutSamplesData_[0]);

                        if ((res = av_samples_alloc(OutSamplesData_, 0, OutChannels, MaxResampleSamples_, OutFormat, 1)) < 0) 
                        {
                            OutSamplesData_[0] = 0;
                            av_packet_unref(&Avpkt_);
                            return -1;
                        }
                    }

                    if ((res = swr_convert(SwrContext_, OutSamplesData_, dstSamples, (const uint8_t**)Frame_->extended_data, Frame_->nb_samples)) < 0) 
                    {
                        av_packet_unref(&Avpkt_);
                        return -1;
                    }

                    qint32 resultLen = av_samples_get_buffer_size(0, OutChannels, res, OutFormat, 1);
                    result.append((const char*)OutSamplesData_[0], resultLen);
                    samplesAdded += resultLen / SampleSize_;
                } 
                else 
                {
                    result.append((const char*)Frame_->extended_data[0], Frame_->nb_samples * SampleSize_);
                    samplesAdded += Frame_->nb_samples;
                }
            }
        }
        av_packet_unref(&Avpkt_);
        return 1;
    }
Beispiel #8
0
static int WriteAudioFrame(AVFormatContext *oc, OutputStream *ost, AVI6 *avi)
{
	AVCodecContext *c = NULL;
	AVPacket pkt = { 0 };
	AVFrame *frame = NULL;
	int ret = 0;
	int got_packet = 0;
	int dst_nb_samples = 0;

	av_init_packet(&pkt);
	c = ost->st->codec;

	frame = GetAudioFrame(ost, avi);

	if (frame) {
		// フォーマット変換後のサンプル数を決定
		dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, frame->sample_rate) + frame->nb_samples,
										c->sample_rate, c->sample_rate, AV_ROUND_UP);
		//av_assert0(dst_nb_samples == frame->nb_samples);

		// フレームを書き込み可能にする
		ret = av_frame_make_writable(ost->frame);
		if (ret < 0)
			exit(1);

		// 音声フォーマットを変換
		ret = swr_convert(ost->swr_ctx,
						  ost->frame->data, dst_nb_samples,
						  (const uint8_t **)frame->data, frame->nb_samples);
		if (ret < 0) {
			fprintf(stderr, "Error while converting\n");
			return 0;
		}
		frame = ost->frame;

		frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
		ost->samples_count += dst_nb_samples;

		ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
		if (ret < 0) {
			fprintf(stderr, "Error encoding audio frame: %s\n", MakeErrorString(ret));
			return 0;
		}

		if (got_packet) {
			ret = WriteFrame(oc, &c->time_base, ost->st, &pkt);
			if (ret < 0) {
				fprintf(stderr, "Error while writing audio frame: %s\n",
						MakeErrorString(ret));
				return 0;
			}
		}
	}

	return (frame || got_packet) ? 0 : 1;
}
Beispiel #9
0
static int applehttp_read_seek(AVFormatContext *s, int stream_index,
                               int64_t timestamp, int flags)
{
    AppleHTTPContext *c = s->priv_data;
    int i, j, ret;

    if ((flags & AVSEEK_FLAG_BYTE) || !c->variants[0]->finished)
        return AVERROR(ENOSYS);

    timestamp = av_rescale_rnd(timestamp, 1, stream_index >= 0 ?
                               s->streams[stream_index]->time_base.den :
                               AV_TIME_BASE, flags & AVSEEK_FLAG_BACKWARD ?
                               AV_ROUND_DOWN : AV_ROUND_UP);
    ret = AVERROR(EIO);
    for (i = 0; i < c->n_variants; i++) {
        /* Reset reading */
        struct variant *var = c->variants[i];
        int64_t pos = c->first_timestamp == AV_NOPTS_VALUE ? 0 :
                      av_rescale_rnd(c->first_timestamp, 1,
                          stream_index >= 0 ? s->streams[stream_index]->time_base.den : AV_TIME_BASE,
                          flags & AVSEEK_FLAG_BACKWARD ? AV_ROUND_DOWN : AV_ROUND_UP);
        if (var->input) {
            ffurl_close(var->input);
            var->input = NULL;
        }
        av_free_packet(&var->pkt);
        reset_packet(&var->pkt);
        var->pb.eof_reached = 0;

        /* Locate the segment that contains the target timestamp */
        for (j = 0; j < var->n_segments; j++) {
            if (timestamp >= pos &&
                timestamp < pos + var->segments[j]->duration) {
                var->cur_seq_no = var->start_seq_no + j;
                ret = 0;
                break;
            }
            pos += var->segments[j]->duration;
        }
    }
    return ret;
}
Beispiel #10
0
static int ffmpeg_decode(struct ffmpeg_file *file)
{
    if (file->pkt_decoded >= file->pkt->size) {
        int e = av_read_frame(file->format, file->pkt);
        if (e < 0) {
            return e;
        }
        file->pkt_decoded = 0;
    }

    int got_frame = 0;
    int e = avcodec_decode_audio4(file->codec, file->frame, &got_frame,
            file->pkt);
    if (e < 0) {
        return e;
    }
    if (!got_frame) {
        return 0;
    }
    file->pkt_decoded += e;

    AVFrame *frame = file->frame;
    int delay_nsamples = swr_get_delay(file->swr, file->codec->sample_rate);
    int dst_nsamples = av_rescale_rnd(delay_nsamples + frame->nb_samples,
            file->sample_rate, file->codec->sample_rate, AV_ROUND_UP);
    if (file->buf_nsamples < dst_nsamples) {
        if (file->buf) {
            av_freep(&file->buf[0]);
        }
        av_freep(&file->buf);
        int e = av_samples_alloc_array_and_samples(&file->buf, NULL,
                file->channels, file->sample_rate, file->sample_fmt, 0);
        if (e < 0) {
            return e;
        }
        file->buf_nsamples = dst_nsamples;
    }

    int ns = swr_convert(file->swr, file->buf, dst_nsamples,
            (const uint8_t**) frame->data, frame->nb_samples);
    int nb = av_samples_get_buffer_size(NULL, file->channels,
            ns, file->sample_fmt, 1);
    if (nb < 0) {
        return nb;
    }
    file->buf_len = nb;
    file->buf_offset = 0;
    if (file->frame->pts > 0) {
        file->time = file->frame->pts;
    }

    return nb;
}
Beispiel #11
0
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
{
    AVFilterContext  *ctx = inlink->dst;
    ResampleContext    *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];

    if (s->avr) {
        AVFilterBufferRef *buf_out;
        int delay, nb_samples, ret;

        /* maximum possible samples lavr can output */
        delay      = avresample_get_delay(s->avr);
        nb_samples = av_rescale_rnd(buf->audio->nb_samples + delay,
                                    outlink->sample_rate, inlink->sample_rate,
                                    AV_ROUND_UP);

        buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
        ret     = avresample_convert(s->avr, (void**)buf_out->extended_data,
                                     buf_out->linesize[0], nb_samples,
                                     (void**)buf->extended_data, buf->linesize[0],
                                     buf->audio->nb_samples);

        av_assert0(!avresample_available(s->avr));

        if (s->next_pts == AV_NOPTS_VALUE) {
            if (buf->pts == AV_NOPTS_VALUE) {
                av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, "
                       "assuming 0.\n");
                s->next_pts = 0;
            } else
                s->next_pts = av_rescale_q(buf->pts, inlink->time_base,
                                           outlink->time_base);
        }

        if (ret > 0) {
            buf_out->audio->nb_samples = ret;
            if (buf->pts != AV_NOPTS_VALUE) {
                buf_out->pts = av_rescale_q(buf->pts, inlink->time_base,
                                            outlink->time_base) -
                               av_rescale(delay, outlink->sample_rate,
                                          inlink->sample_rate);
            } else
                buf_out->pts = s->next_pts;

            s->next_pts = buf_out->pts + buf_out->audio->nb_samples;

            ff_filter_samples(outlink, buf_out);
        }
        avfilter_unref_buffer(buf);
    } else
        ff_filter_samples(outlink, buf);
}
Beispiel #12
0
static int read_seek2(AVFormatContext *s, int stream_index,
                      int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
    ASSContext *ass = s->priv_data;

    if (flags & AVSEEK_FLAG_BYTE) {
        return AVERROR(ENOSYS);
    } else if (flags & AVSEEK_FLAG_FRAME) {
        if (ts < 0 || ts >= ass->event_count)
            return AVERROR(ERANGE);
        ass->event_index = ts;
    } else {
        int i, idx = -1;
        int64_t min_ts_diff = INT64_MAX;
        if (stream_index == -1) {
            AVRational time_base = s->streams[0]->time_base;
            ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
            min_ts = av_rescale_rnd(min_ts, time_base.den,
                                    time_base.num * (int64_t)AV_TIME_BASE,
                                    AV_ROUND_UP);
            max_ts = av_rescale_rnd(max_ts, time_base.den,
                                    time_base.num * (int64_t)AV_TIME_BASE,
                                    AV_ROUND_DOWN);
        }
        /* TODO: ass->event[] is sorted by pts so we could do a binary search */
        for (i=0; i<ass->event_count; i++) {
            int64_t pts = get_pts(ass->event[i]);
            int64_t ts_diff = FFABS(pts - ts);
            if (pts >= min_ts && pts <= max_ts && ts_diff < min_ts_diff) {
                min_ts_diff = ts_diff;
                idx = i;
            }
        }
        if (idx < 0)
            return AVERROR(ERANGE);
        ass->event_index = idx;
    }
    return 0;
}
Beispiel #13
0
static struct mp_audio *play(struct af_instance *af, struct mp_audio *data)
{
    struct af_resample *s = af->priv;
    struct mp_audio *in   = data;
    struct mp_audio *out  = af->data;

    out->samples = avresample_available(s->avrctx) +
        av_rescale_rnd(get_delay(s) + in->samples,
                       s->ctx.out_rate, s->ctx.in_rate, AV_ROUND_UP);

    mp_audio_realloc_min(out, out->samples);

    af->delay = get_delay(s) / (double)s->ctx.in_rate;

#if !USE_SET_CHANNEL_MAPPING
    do_reorder(in, s->reorder_in);
#endif

    if (out->samples) {
        out->samples = avresample_convert(s->avrctx,
            (uint8_t **) out->planes, out->samples * out->sstride, out->samples,
            (uint8_t **) in->planes,  in->samples  * in->sstride,  in->samples);
        if (out->samples < 0)
            return NULL; // error
    }

    *data = *out;

#if USE_SET_CHANNEL_MAPPING
    if (needs_reorder(s->reorder_out, out->nch)) {
        if (af_fmt_is_planar(out->format)) {
            reorder_planes(data, s->reorder_out);
        } else {
            int out_size = out->samples * out->sstride;
            if (talloc_get_size(s->reorder_buffer) < out_size)
                s->reorder_buffer = talloc_realloc_size(s, s->reorder_buffer, out_size);
            data->planes[0] = s->reorder_buffer;
            int out_samples = avresample_convert(s->avrctx_out,
                    (uint8_t **) data->planes, out_size, out->samples,
                    (uint8_t **) out->planes, out_size, out->samples);
            assert(out_samples == data->samples);
        }
    }
#else
    do_reorder(data, s->reorder_out);
#endif

    return data;
}
Beispiel #14
0
AudioBufferPtr AudioDecoderThread::resampleAudio(char* pDecodedData, int framesDecoded,
        int currentSampleFormat)
{
    if (!m_pResampleContext) {
#ifdef LIBAVRESAMPLE_VERSION
        m_pResampleContext = avresample_alloc_context();
        av_opt_set_int(m_pResampleContext, "in_channel_layout",
                av_get_default_channel_layout(m_pStream->codec->channels), 0);
        av_opt_set_int(m_pResampleContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
        av_opt_set_int(m_pResampleContext, "in_sample_rate", m_InputSampleRate, 0);
        av_opt_set_int(m_pResampleContext, "out_sample_rate", m_AP.m_SampleRate, 0);
        av_opt_set_int(m_pResampleContext, "in_sample_fmt",
                (AVSampleFormat)currentSampleFormat, 0);
        av_opt_set_int(m_pResampleContext, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
        int err = avresample_open(m_pResampleContext);
        AVG_ASSERT(err >= 0);
#else
        m_pResampleContext = av_audio_resample_init(m_AP.m_Channels, 
                m_pStream->codec->channels, m_AP.m_SampleRate, m_InputSampleRate,
                AV_SAMPLE_FMT_S16, (AVSampleFormat)currentSampleFormat, 16, 10, 0, 0.8);
#endif
        AVG_ASSERT(m_pResampleContext);
    }
#ifdef LIBAVRESAMPLE_VERSION
    uint8_t *pResampledData;
    int leftoverSamples = avresample_available(m_pResampleContext);
    int framesAvailable = leftoverSamples +
            av_rescale_rnd(avresample_get_delay(m_pResampleContext) +
                    framesDecoded, m_AP.m_SampleRate, m_InputSampleRate, AV_ROUND_UP);
    av_samples_alloc(&pResampledData, 0, 2, framesAvailable,
            AV_SAMPLE_FMT_S16, 0);
    int framesResampled = avresample_convert(m_pResampleContext, &pResampledData, 0, 
            framesAvailable, (uint8_t**)&pDecodedData, 0, framesDecoded);
    AudioBufferPtr pBuffer(new AudioBuffer(framesResampled, m_AP));
    memcpy(pBuffer->getData(), pResampledData, 
            framesResampled*m_AP.m_Channels*sizeof(short));
    av_freep(&pResampledData);
#else
    short pResampledData[AVCODEC_MAX_AUDIO_FRAME_SIZE/2];
    int framesResampled = audio_resample(m_pResampleContext, pResampledData,
            (short*)pDecodedData, framesDecoded);
    AudioBufferPtr pBuffer(new AudioBuffer(framesResampled, m_AP));
    memcpy(pBuffer->getData(), pResampledData, 
            framesResampled*m_AP.m_Channels*sizeof(short));
#endif
    return pBuffer;
}
Beispiel #15
0
static int cmf_read_packet(AVFormatContext *s, AVPacket *mpkt)
{
    struct cmf *cmf = s->priv_data;
    int ret;
    struct cmfvpb *ci ;
    AVStream *st_parent;
   
retry_read:
    ci = cmf->cmfvpb;
	
	if(cmf->sctx!=NULL)
    	ret = av_read_frame(cmf->sctx, &cmf->pkt);
	else
		ret = -1;
	
    if (ret < 0) {
		if(cmf->sctx){
       		cmf_flush_packet_queue(cmf->sctx);
		}
        cmf_reset_packet(&cmf->pkt);
        url_lpreset(&ci->vlpcontext);
        cmf->parsering_index = cmf->parsering_index + 1;
        av_log(s, AV_LOG_INFO, "\n--cmf_read_packet parsernextslice cmf->parsering_index[0x%llx]--\n", cmf->parsering_index);
        if (cmf->parsering_index >= ci->total_num){
            av_log(s, AV_LOG_INFO, " cmf_read_packet to lastindex,curindex [%lld] totalnum[%lld]\n", cmf->parsering_index,ci->total_num);
            return AVERROR_EOF;
        }
        ret = cmf_parser_next_slice(s, cmf->parsering_index, 0);
        if (ret>=0) {
               av_log(s, AV_LOG_INFO, " goto reread, ret=%d\n", ret);
            goto retry_read;    
        }else{
	    av_log(s, AV_LOG_INFO, "cmf_parser_next_slice failed..., ret=%d\n", ret);
	    return ret;
	}
    }
    st_parent = s->streams[cmf->pkt.stream_index];
    cmf->calc_startpts=av_rescale_rnd(ci->start_time,st_parent->time_base.den,1000*st_parent->time_base.num,AV_ROUND_ZERO);
    cmf->pkt.pts = cmf->calc_startpts + cmf->pkt.pts;
    
    if (st_parent->start_time == AV_NOPTS_VALUE) {
        st_parent->start_time  = cmf->pkt.pts;
        av_log(s, AV_LOG_INFO, "first packet st->start_time [0x%llx] [0x%llx]\n", st_parent->start_time, cmf->pkt.stream_index);
    }
    *mpkt = cmf->pkt;
    return 0;
}
Beispiel #16
0
BOOL CVideoLivRecord::write_audio_frame(AVStream *st, void* pBuffer, LONG len)
{
	AVCodecContext* avcc = st->codec;
	AVPacket pkt = {0};
	av_init_packet(&pkt);
	AVFrame* frame = get_audio_frame(st, pBuffer, len);
	int ret = 0;
	int dst_nb_samples = 0;
	if (frame){
		dst_nb_samples = (int)av_rescale_rnd(swr_get_delay(m_pAudioSwrctx, avcc->sample_rate) + frame->nb_samples,
			avcc->sample_rate, avcc->sample_rate, AV_ROUND_UP);
		av_assert0(dst_nb_samples == frame->nb_samples);
		ret = av_frame_make_writable(m_pAudioFrame);
		if (ret < 0){
			log("[CVideoLivRecord::write_audio_frame] -- av_frame_make_writable() error");
			return FALSE;
		}
		ret = swr_convert(m_pAudioSwrctx, m_pAudioFrame->data, dst_nb_samples, 
			             (const uint8_t**)frame->data, frame->nb_samples);
		if (ret < 0){
			log("[CVideoLivRecord::write_audio_frame] -- av_frame_make_writable() error");
			return FALSE;
		}
		frame = m_pAudioFrame;
		AVRational tmp = {1, avcc->sample_rate};

		frame->pts = av_rescale_q(m_AudioSamplesCount, tmp, avcc->time_base);
		m_AudioSamplesCount += dst_nb_samples;
	}
	int got_packet = 0;
	ret = avcodec_encode_audio2(avcc, &pkt, frame, &got_packet);
	if (ret < 0){
		log("[CVideoLivRecord::write_audio_frame] -- avcodec_encode_audio2() error");
		return FALSE;
	}
	if(got_packet){
		av_packet_rescale_ts(&pkt, avcc->time_base, st->time_base);
		pkt.stream_index = st->index;
		ret = av_interleaved_write_frame(m_pAVFormatContext, &pkt);
		//ret = write_audio_frame(m_pAudioStream, pBuffer, len);
		if (ret < 0){
			log("[CVideoLivRecord::write_audio_frame] -- write_audio_frame() error");
			return FALSE;
		}
	}
	return (frame || got_packet)? FALSE : TRUE;
}
void AudioDecoder::handleDecodedFrame(AVFrame* frame) {
	/* compute destination number of samples */
	m_outNumSamples = static_cast<int>(av_rescale_rnd(getDelay(m_resampleCtx, frame->sample_rate)
														  + frame->nb_samples, OUT_SAMPLE_RATE, frame->sample_rate,
													  AV_ROUND_UP));

	if (m_outNumSamples > m_maxOutNumSamples) {
		av_freep(&m_outData[0]);
		auto ret = av_samples_alloc(m_outData, &m_outLinesize, OUT_NUM_CHANNELS, m_outNumSamples,
									OUT_SAMPLE_FORMAT, 1);
		if (ret < 0) {
			mprintf(("FFMPEG: Failed to allocate samples!!!"));
			return;
		}

		m_maxOutNumSamples = m_outNumSamples;
	}

	/* convert to destination format */
	auto ret = resample_convert(m_resampleCtx, m_outData, 0, m_outNumSamples,
								(uint8_t**) frame->data, 0, frame->nb_samples);
	if (ret < 0) {
		mprintf(("FFMPEG: Error while converting audio!\n"));
		return;
	}

	auto outBufsize = av_samples_get_buffer_size(&m_outLinesize, OUT_NUM_CHANNELS, ret, OUT_SAMPLE_FORMAT, 1);
	if (outBufsize < 0) {
		mprintf(("FFMPEG: Could not get sample buffer size!\n"));
		return;
	}

	auto begin = reinterpret_cast<short*>(m_outData[0]);
	auto end = reinterpret_cast<short*>(m_outData[0] + outBufsize);

	auto size = std::distance(begin, end);
	auto newSize = m_audioBuffer.size() + size;

	if (newSize <= m_audioBuffer.capacity()) {
		// We haven't filled the buffer yet
		m_audioBuffer.insert(m_audioBuffer.end(), begin, end);
	} else {
		flushAudioBuffer();
		m_audioBuffer.assign(begin, end);
	}
}
Beispiel #18
0
static int64_t rtmp_read_seek(URLContext *s, int stream_index,
                              int64_t timestamp, int flags)
{
    RTMP *r = s->priv_data;

    if (flags & AVSEEK_FLAG_BYTE)
        return AVERROR(ENOSYS);

    /* seeks are in milliseconds */
    if (stream_index < 0)
        timestamp = av_rescale_rnd(timestamp, 1000, AV_TIME_BASE,
            flags & AVSEEK_FLAG_BACKWARD ? AV_ROUND_DOWN : AV_ROUND_UP);

    if (!RTMP_SendSeek(r, timestamp))
        return -1;
    return timestamp;
}
Beispiel #19
0
static int gxf_write_media_preamble(AVFormatContext *s, AVPacket *pkt, int size)
{
    GXFContext *gxf = s->priv_data;
    AVIOContext *pb = s->pb;
    AVStream *st = s->streams[pkt->stream_index];
    GXFStreamContext *sc = st->priv_data;
    unsigned field_nb;
    /* If the video is frame-encoded, the frame numbers shall be represented by
     * even field numbers.
     * see SMPTE360M-2004  6.4.2.1.3 Media field number */
    if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
        field_nb = gxf->nb_fields;
    } else {
        field_nb = av_rescale_rnd(pkt->dts, gxf->time_base.den,
                                  (int64_t)48000*gxf->time_base.num, AV_ROUND_UP);
    }

    avio_w8(pb, sc->media_type);
    avio_w8(pb, st->index);
    avio_wb32(pb, field_nb);
    if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
        avio_wb16(pb, 0);
        avio_wb16(pb, size / 2);
    } else if (st->codec->codec_id == CODEC_ID_MPEG2VIDEO) {
        int frame_type = gxf_parse_mpeg_frame(sc, pkt->data, pkt->size);
        if (frame_type == AV_PICTURE_TYPE_I) {
            avio_w8(pb, 0x0d);
            sc->iframes++;
        } else if (frame_type == AV_PICTURE_TYPE_B) {
            avio_w8(pb, 0x0f);
            sc->bframes++;
        } else {
            avio_w8(pb, 0x0e);
            sc->pframes++;
        }
        avio_wb24(pb, size);
    } else if (st->codec->codec_id == CODEC_ID_DVVIDEO) {
        avio_w8(pb, size / 4096);
        avio_wb24(pb, 0);
    } else
        avio_wb32(pb, size);
    avio_wb32(pb, field_nb);
    avio_w8(pb, 1); /* flags */
    avio_w8(pb, 0); /* reserved */
    return 16;
}
Beispiel #20
0
static int64_t get_out_samples(struct SwrContext *s, int in_samples) {
    ResampleContext *c = s->resample;
    // The + 2 are added to allow implementations to be slightly inaccurate, they should not be needed currently.
    // They also make it easier to proof that changes and optimizations do not
    // break the upper bound.
    int64_t num = s->in_buffer_count + 2LL + in_samples;
    num *= c->phase_count;
    num -= c->index;
    num = av_rescale_rnd(num, s->out_sample_rate, ((int64_t)s->in_sample_rate) * c->phase_count, AV_ROUND_UP) + 2;

    if (c->compensation_distance) {
        if (num > INT_MAX)
            return AVERROR(EINVAL);

        num = FFMAX(num, (num * c->ideal_dst_incr - 1) / c->dst_incr + 1);
    }
    return num;
}
Beispiel #21
0
int avresample_get_out_samples(AVAudioResampleContext *avr, int in_nb_samples)
{
    int64_t samples = avresample_get_delay(avr) + (int64_t)in_nb_samples;

    if (avr->resample_needed) {
        samples = av_rescale_rnd(samples,
                                 avr->out_sample_rate,
                                 avr->in_sample_rate,
                                 AV_ROUND_UP);
    }

    samples += avresample_available(avr);

    if (samples > INT_MAX)
        return AVERROR(EINVAL);

    return samples;
}
Beispiel #22
0
int create_channel_data( AVFrame *pFrame )
{
        //printf( "In Create Channel data : Thread 1\n" );
        int dst_nb_channels = av_get_channel_layout_nb_channels
                              ( AV_CH_LAYOUT_STEREO );
        int dst_linesize;
        int delay = swr_get_delay( gMedia->pSwrContext,
                                   pFrame->sample_rate );
        int dst_nb_samples = av_rescale_rnd( pFrame->nb_samples + delay,
                                             pFrame->sample_rate,
                                             pFrame->sample_rate,
                                             AV_ROUND_UP );
        //printf("Destination channels = %d\n",dst_nb_channels);
        //printf("Destination samples = %d\n",dst_nb_samples);
        int error_check = av_samples_alloc_array_and_samples
                          ( &gMedia->audio_buf,
                            &dst_linesize,
                            dst_nb_channels,
                            dst_nb_samples,
                            AV_SAMPLE_FMT_FLT,
                            1 );

        if ( error_check < 0 ) {
                fprintf( stderr, "Could not allocate destination samples\n" );
        }

        int data_size = av_samples_get_buffer_size
                        ( NULL,
                          pFrame->channels,
                          pFrame->nb_samples,
                          pFrame->format,
                          0 );
        /*      printf("Number of samples = %d\n",pFrame->nb_samples);
                printf("Number of bytes = %d\n",pFrame->nb_samples*2*4);
                printf("Linesize per channel is %d\n",pFrame->linesize[0]);
                printf("Calculated datasize is %d\n",data_size);
        */
        swr_convert( gMedia->pSwrContext,
                     ( uint8_t ** )( gMedia->audio_buf ) ,
                     pFrame->nb_samples,
                     ( const uint8_t ** )pFrame->data,
                     pFrame->nb_samples );
        return data_size;
}
Beispiel #23
0
static int gxf_write_media_preamble(ByteIOContext *pb, GXFContext *ctx, AVPacket *pkt, int size)
{
    GXFStreamContext *sc = ctx->fc->streams[pkt->stream_index]->priv_data;
    unsigned field_nb;
    /* If the video is frame-encoded, the frame numbers shall be represented by
     * even field numbers.
     * see SMPTE360M-2004  6.4.2.1.3 Media field number */
    if (sc->codec->codec_type == CODEC_TYPE_VIDEO) {
        field_nb = ctx->nb_fields;
    } else {
        field_nb = av_rescale_rnd(pkt->dts, ctx->sample_rate, sc->codec->time_base.den, AV_ROUND_UP);
    }

    put_byte(pb, sc->media_type);
    put_byte(pb, sc->index);
    put_be32(pb, field_nb);
    if (sc->codec->codec_type == CODEC_TYPE_AUDIO) {
        put_be16(pb, 0);
        put_be16(pb, size / 2);
    } else if (sc->codec->codec_id == CODEC_ID_MPEG2VIDEO) {
        int frame_type = gxf_parse_mpeg_frame(sc, pkt->data, pkt->size);
        if (frame_type == FF_I_TYPE) {
            put_byte(pb, 0x0d);
            sc->iframes++;
        } else if (frame_type == FF_B_TYPE) {
            put_byte(pb, 0x0f);
            sc->bframes++;
        } else {
            put_byte(pb, 0x0e);
            sc->pframes++;
        }
        put_be24(pb, size);
    } else if (sc->codec->codec_id == CODEC_ID_DVVIDEO) {
        put_byte(pb, size / 4096);
        put_be24(pb, 0);
    } else
        put_be32(pb, size);
    put_be32(pb, field_nb);
    put_byte(pb, 1); /* flags */
    put_byte(pb, 0); /* reserved */
    return 16;
}
Beispiel #24
0
static int gxf_compare_field_nb(AVFormatContext *s, AVPacket *next, AVPacket *cur)
{
    GXFContext *gxf = s->priv_data;
    AVPacket *pkt[2] = { cur, next };
    int i, field_nb[2];
    GXFStreamContext *sc[2];

    for (i = 0; i < 2; i++) {
        AVStream *st = s->streams[pkt[i]->stream_index];
        sc[i] = st->priv_data;
        if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            field_nb[i] = av_rescale_rnd(pkt[i]->dts, gxf->time_base.den,
                                         (int64_t)48000*gxf->time_base.num, AV_ROUND_UP);
            field_nb[i] &= ~1; // compare against even field number because audio must be before video
        } else
            field_nb[i] = pkt[i]->dts; // dts are field based
    }

    return field_nb[1] > field_nb[0] ||
        (field_nb[1] == field_nb[0] && sc[1]->order > sc[0]->order);
}
Beispiel #25
0
static int request_frame(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    ResampleContext   *s = ctx->priv;
    int ret = 0;

    s->got_output = 0;
    while (ret >= 0 && !s->got_output)
        ret = ff_request_frame(ctx->inputs[0]);

    /* flush the lavr delay buffer */
    if (ret == AVERROR_EOF && s->avr) {
        AVFilterBufferRef *buf;
        int nb_samples = av_rescale_rnd(avresample_get_delay(s->avr),
                                        outlink->sample_rate,
                                        ctx->inputs[0]->sample_rate,
                                        AV_ROUND_UP);

        if (!nb_samples)
            return ret;

        buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
        if (!buf)
            return AVERROR(ENOMEM);

        ret = avresample_convert(s->avr, (void**)buf->extended_data,
                                 buf->linesize[0], nb_samples,
                                 NULL, 0, 0);
        if (ret <= 0) {
            avfilter_unref_buffer(buf);
            return (ret == 0) ? AVERROR_EOF : ret;
        }

        buf->pts = s->next_pts;
        return ff_filter_samples(outlink, buf);
    }
    return ret;
}
Beispiel #26
0
static int request_frame(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    ResampleContext   *s = ctx->priv;
    int ret = 0;

    s->got_output = 0;
    while (ret >= 0 && !s->got_output)
        ret = ff_request_frame(ctx->inputs[0]);

    /* flush the lavr delay buffer */
    if (ret == AVERROR_EOF && s->avr) {
        AVFrame *frame;
        int nb_samples = av_rescale_rnd(avresample_get_delay(s->avr),
                                        outlink->sample_rate,
                                        ctx->inputs[0]->sample_rate,
                                        AV_ROUND_UP);

        if (!nb_samples)
            return ret;

        frame = ff_get_audio_buffer(outlink, nb_samples);
        if (!frame)
            return AVERROR(ENOMEM);

        ret = avresample_convert(s->avr, frame->extended_data,
                                 frame->linesize[0], nb_samples,
                                 NULL, 0, 0);
        if (ret <= 0) {
            av_frame_free(&frame);
            return (ret == 0) ? AVERROR_EOF : ret;
        }

        frame->pts = s->next_pts;
        return ff_filter_frame(outlink, frame);
    }
    return ret;
}
Beispiel #27
0
static int applehttp_read_seek(AVFormatContext *s, int stream_index,
                               int64_t timestamp, int flags)
{
    AppleHTTPContext *c = s->priv_data;
    int pos = 0, i;
    struct variant *var = c->variants[0];

    if ((flags & AVSEEK_FLAG_BYTE) || !c->finished)
        return AVERROR(ENOSYS);

    /* Reset the variants */
    c->last_packet_dts = AV_NOPTS_VALUE;
    for (i = 0; i < c->n_variants; i++) {
        struct variant *var = c->variants[i];
        if (var->pb) {
            url_fclose(var->pb);
            var->pb = NULL;
        }
        av_free_packet(&var->pkt);
        reset_packet(&var->pkt);
    }

    timestamp = av_rescale_rnd(timestamp, 1, stream_index >= 0 ?
                               s->streams[stream_index]->time_base.den :
                               AV_TIME_BASE, flags & AVSEEK_FLAG_BACKWARD ?
                               AV_ROUND_DOWN : AV_ROUND_UP);
    /* Locate the segment that contains the target timestamp */
    for (i = 0; i < var->n_segments; i++) {
        if (timestamp >= pos && timestamp < pos + var->segments[i]->duration) {
            c->cur_seq_no = var->start_seq_no + i;
            return 0;
        }
        pos += var->segments[i]->duration;
    }
    return AVERROR(EIO);
}
AudioDecoder::AudioDecoder(DecoderStatus* status)
	: FFMPEGStreamDecoder(status) {
	m_audioBuffer.reserve(static_cast<size_t>(OUT_SAMPLE_RATE * OUT_NUM_CHANNELS / 2));

	m_resampleCtx = getSWRContext(m_status->audioCodecPars.channel_layout, m_status->audioCodecPars.sample_rate,
								  m_status->audioCodecPars.audio_format);

	/*
    * compute the number of converted samples: buffering is avoided
    * ensuring that the output buffer will contain at least all the
    * converted input samples
    */
	m_maxOutNumSamples = m_outNumSamples = static_cast<int>(av_rescale_rnd(DEFAULT_SRC_NUM_SAMPLES,
																		   OUT_SAMPLE_RATE,
																		   m_status->audioCodecPars.sample_rate,
																		   AV_ROUND_UP));

	auto ret = alloc_array_and_samples(&m_outData, &m_outLinesize, OUT_NUM_CHANNELS,
									   m_outNumSamples, OUT_SAMPLE_FORMAT);

	if (ret < 0) {
		mprintf(("FFMPEG: Failed to allocate samples array!\n"));
	}
}
Beispiel #29
0
static int rtp_write_header(AVFormatContext *s1)
{
    RTPMuxContext *s = s1->priv_data;
    int max_packet_size, n;
    AVStream *st;

    if (s1->nb_streams != 1)
        return -1;
    st = s1->streams[0];
    if (!is_supported(st->codec->codec_id)) {
        av_log(s1, AV_LOG_ERROR, "Unsupported codec %x\n", st->codec->codec_id);

        return -1;
    }

    s->payload_type = ff_rtp_get_payload_type(st->codec);
    if (s->payload_type < 0)
        s->payload_type = RTP_PT_PRIVATE + (st->codec->codec_type == AVMEDIA_TYPE_AUDIO);

    s->base_timestamp = av_get_random_seed();
    s->timestamp = s->base_timestamp;
    s->cur_timestamp = 0;
    s->ssrc = av_get_random_seed();
    s->first_packet = 1;
    s->first_rtcp_ntp_time = ff_ntp_time();
    if (s1->start_time_realtime)
        /* Round the NTP time to whole milliseconds. */
        s->first_rtcp_ntp_time = (s1->start_time_realtime / 1000) * 1000 +
                                 NTP_OFFSET_US;

    max_packet_size = url_fget_max_packet_size(s1->pb);
    if (max_packet_size <= 12)
        return AVERROR(EIO);
    s->buf = av_malloc(max_packet_size);
    if (s->buf == NULL) {
        return AVERROR(ENOMEM);
    }
    s->max_payload_size = max_packet_size - 12;

    s->max_frames_per_packet = 0;
    if (s1->max_delay) {
        if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            if (st->codec->frame_size == 0) {
                av_log(s1, AV_LOG_ERROR, "Cannot respect max delay: frame size = 0\n");
            } else {
                s->max_frames_per_packet = av_rescale_rnd(s1->max_delay, st->codec->sample_rate, AV_TIME_BASE * st->codec->frame_size, AV_ROUND_DOWN);
            }
        }
        if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            /* FIXME: We should round down here... */
            s->max_frames_per_packet = av_rescale_q(s1->max_delay, (AVRational){1, 1000000}, st->codec->time_base);
        }
    }

    av_set_pts_info(st, 32, 1, 90000);
    switch(st->codec->codec_id) {
    case CODEC_ID_MP2:
    case CODEC_ID_MP3:
        s->buf_ptr = s->buf + 4;
        break;
    case CODEC_ID_MPEG1VIDEO:
    case CODEC_ID_MPEG2VIDEO:
        break;
    case CODEC_ID_MPEG2TS:
        n = s->max_payload_size / TS_PACKET_SIZE;
        if (n < 1)
            n = 1;
        s->max_payload_size = n * TS_PACKET_SIZE;
        s->buf_ptr = s->buf;
        break;
    case CODEC_ID_H264:
        /* check for H.264 MP4 syntax */
        if (st->codec->extradata_size > 4 && st->codec->extradata[0] == 1) {
            s->nal_length_size = (st->codec->extradata[4] & 0x03) + 1;
        }
        break;
    case CODEC_ID_AMR_NB:
    case CODEC_ID_AMR_WB:
        if (!s->max_frames_per_packet)
            s->max_frames_per_packet = 12;
        if (st->codec->codec_id == CODEC_ID_AMR_NB)
            n = 31;
        else
            n = 61;
        /* max_header_toc_size + the largest AMR payload must fit */
        if (1 + s->max_frames_per_packet + n > s->max_payload_size) {
            av_log(s1, AV_LOG_ERROR, "RTP max payload size too small for AMR\n");
            return -1;
        }
        if (st->codec->channels != 1) {
            av_log(s1, AV_LOG_ERROR, "Only mono is supported\n");
            return -1;
        }
    case CODEC_ID_AAC:
        s->num_frames = 0;
    default:
        if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            av_set_pts_info(st, 32, 1, st->codec->sample_rate);
        }
        s->buf_ptr = s->buf;
        break;
    }

    return 0;
}