Example #1
0
int64_t swr_get_delay(struct SwrContext *s, int64_t base)
{
    int64_t in_sr, out_sr;
    av_opt_get_int(s, "in_sample_rate", 0, &in_sr);
    av_opt_get_int(s, "out_sample_rate", 0, &out_sr);
    return av_rescale_rnd(avresample_available(s), base, out_sr, AV_ROUND_UP) + av_rescale_rnd(avresample_get_delay(s), base, in_sr, AV_ROUND_UP);
}
Example #2
0
/**
 * Allocates and initializes the encoder context and frame in FFAudioIO.
 * As parameters serve the output parameters of the SwrContext from FFAudioIO.
 * Therefore the SwrContext must be setup first for this to be successful.
 *
 * @param env JNIEnv
 * @param aio FFAudioIO (our context)
 * @param encoder AVCodec to use to setup the encoder AVCodecContext
 * @return a negative value, if something goes wrong
 */
int ff_init_encoder(JNIEnv *env, FFAudioIO *aio, AVCodec *encoder) {
    int res = 0;
    int64_t out_sample_rate;
    int64_t out_channel_count;
    int64_t out_channel_layout;
    enum AVSampleFormat out_sample_fmt;

    // make sure we clean up before resetting this
    // in case this is called twice
    if (aio->encode_frame) {
        av_frame_free(&aio->encode_frame);
    }
    if (aio->encode_context) {
        avcodec_close(aio->encode_context);
        av_free(aio->encode_context);
    }

    aio->encode_context = avcodec_alloc_context3(encoder);
    if (!aio->encode_context) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not allocate codec context.");
        goto bail;
    }

    // init to whatever we have in SwrContext
    av_opt_get_int(aio->swr_context, "out_channel_count", 0, &out_channel_count);
    av_opt_get_int(aio->swr_context, "out_channel_layout", 0, &out_channel_layout);
    av_opt_get_int(aio->swr_context, "out_sample_rate", 0, &out_sample_rate);
    av_opt_get_sample_fmt(aio->swr_context, "out_sample_fmt", 0, &out_sample_fmt);

    aio->encode_context->sample_fmt = out_sample_fmt;
    aio->encode_context->sample_rate = out_sample_rate;
    aio->encode_context->channel_layout = out_channel_layout;
    aio->encode_context->channels = out_channel_count;

    res = avcodec_open2(aio->encode_context, encoder, NULL);
    if (res < 0) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not open encoder.");
        goto bail;
    }

    aio->encode_frame = av_frame_alloc();
    if (!aio->encode_frame) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not allocate encoder frame.");
        goto bail;
    }
    aio->encode_frame->nb_samples = aio->encode_context->frame_size; // this will be changed later!!
    aio->encode_frame->format = aio->encode_context->sample_fmt;
    aio->encode_frame->channel_layout = aio->encode_context->channel_layout;

    bail:

    return res;
}
Example #3
0
File: rtp.c Project: Arcen/libav
int ff_rtp_get_payload_type(AVFormatContext *fmt, AVCodecContext *codec)
{
    int i;
    AVOutputFormat *ofmt = fmt ? fmt->oformat : NULL;

    /* Was the payload type already specified for the RTP muxer? */
    if (ofmt && ofmt->priv_class) {
        int64_t payload_type;
        if (av_opt_get_int(fmt->priv_data, "payload_type", 0, &payload_type) >= 0 &&
            payload_type >= 0)
            return (int)payload_type;
    }

    /* static payload type */
    for (i = 0; AVRtpPayloadTypes[i].pt >= 0; ++i)
        if (AVRtpPayloadTypes[i].codec_id == codec->codec_id) {
            if (codec->codec_id == CODEC_ID_H263 && (!fmt ||
                !fmt->oformat->priv_class ||
                !av_opt_flag_is_set(fmt->priv_data, "rtpflags", "rfc2190")))
                continue;
            if (codec->codec_id == CODEC_ID_PCM_S16BE)
                if (codec->channels != AVRtpPayloadTypes[i].audio_channels)
                    continue;
            return AVRtpPayloadTypes[i].pt;
        }

    /* dynamic payload type */
    return RTP_PT_PRIVATE + (codec->codec_type == AVMEDIA_TYPE_AUDIO);
}
Example #4
0
/**
 * Resample a buffer using FFAudioUI->swr_context.
 * The returned out buffer needs to be freed by the caller.
 *
 * @param aio           FFAudioIO context
 * @param out_buf       out buffer
 * @param out_samples   out samples
 * @param in_buf        in buffer
 * @param in_samples    in samples
 * @return number of samples copied/converted or a negative value, should things go wrong
 */
static int resample(FFAudioIO *aio,  uint8_t **out_buf, int out_samples, const uint8_t **in_buf, const int in_samples) {
    int res = 0;
    int64_t out_channel_count;
    enum AVSampleFormat out_sample_format;

    if (out_samples == 0) goto bail; // nothing to do.

    av_opt_get_int(aio->swr_context, "out_channel_count", 0, &out_channel_count);
    av_opt_get_sample_fmt(aio->swr_context, "out_sample_fmt", 0, &out_sample_format);

    #ifdef DEBUG
        fprintf(stderr, "resample: out_samples=%d in_samples=%d, channels=%d sample_format=%d\n",
            out_samples, in_samples, (int)out_channel_count, out_sample_format);
    #endif

    // allocate temp buffer for resampled data
    res = av_samples_alloc(out_buf, NULL, out_channel_count, out_samples, out_sample_format, 1);
    if (res < 0) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(aio->env, res, "Could not allocate resample buffer.");
        goto bail;
    }

    // run the SWR conversion (even if it is not strictly necessary)
    res = swr_convert(aio->swr_context, out_buf, out_samples, in_buf, in_samples);
    if (res < 0) {
        throwIOExceptionIfError(aio->env, res, "Failed to convert audio data.");
        goto bail;
    }

    bail:

    return res;
}
Example #5
0
int Option::getInt() const
{
	int64_t out_val;
	int error =  av_opt_get_int( _avContext, getName().c_str(), AV_OPT_SEARCH_CHILDREN, &out_val );
	checkFFmpegGetOption( error );

	return out_val;
}
Example #6
0
void Option::setFlag( const std::string& flag, const bool enable )
{
	int64_t optVal;
	int error = av_opt_get_int( _avContext, getName().c_str(), AV_OPT_SEARCH_CHILDREN, &optVal );
	checkFFmpegGetOption( error );

	if( enable )
		optVal = optVal |  _avOption->default_val.i64;
	else
		optVal = optVal &~ _avOption->default_val.i64;

	error = av_opt_set_int( _avContext, getName().c_str(), optVal, AV_OPT_SEARCH_CHILDREN );
	checkFFmpegSetOption( error, flag );
}
 void setSampleRate(int sr)
 {
     if (swr)
     {
         int64_t outSampleRate;
         av_opt_get_int(swr, "out_sample_rate", 0, &outSampleRate);
         if (outSampleRate != host.sampleRate)
         {
             av_opt_set_int(swr, "out_sample_rate", host.sampleRate, 0);
             swr_init(swr);
             JIF(swr_init(swr), "failed to init audio resampler.");
         }
     }
     host.sampleRate = sr;
     return;
 err:
     host.state = CodecBoxDecoderState::Failed;
     close();
 }
Example #8
0
File: rtp.c Project: bwahn/FFmpeg-1
int ff_rtp_get_payload_type(AVFormatContext *fmt,
                            AVCodecContext *codec, int idx)
{
    int i;
    AVOutputFormat *ofmt = fmt ? fmt->oformat : NULL;

    /* Was the payload type already specified for the RTP muxer? */
    if (ofmt && ofmt->priv_class && fmt->priv_data) {
        int64_t payload_type;
        if (av_opt_get_int(fmt->priv_data, "payload_type", 0, &payload_type) >= 0 &&
            payload_type >= 0)
            return (int)payload_type;
    }

    /* static payload type */
    for (i = 0; rtp_payload_types[i].pt >= 0; ++i)
        if (rtp_payload_types[i].codec_id == codec->codec_id) {
            if (codec->codec_id == AV_CODEC_ID_H263 && (!fmt || !fmt->oformat ||
                !fmt->oformat->priv_class || !fmt->priv_data ||
                !av_opt_flag_is_set(fmt->priv_data, "rtpflags", "rfc2190")))
                continue;
            /* G722 has 8000 as nominal rate even if the sample rate is 16000,
             * see section 4.5.2 in RFC 3551. */
            if (codec->codec_id == AV_CODEC_ID_ADPCM_G722 &&
                codec->sample_rate == 16000 && codec->channels == 1)
                return rtp_payload_types[i].pt;
            if (codec->codec_type == AVMEDIA_TYPE_AUDIO &&
                ((rtp_payload_types[i].clock_rate > 0 &&
                  codec->sample_rate != rtp_payload_types[i].clock_rate) ||
                 (rtp_payload_types[i].audio_channels > 0 &&
                  codec->channels != rtp_payload_types[i].audio_channels)))
                continue;
            return rtp_payload_types[i].pt;
        }

    if (idx < 0)
        idx = codec->codec_type == AVMEDIA_TYPE_AUDIO;

    /* dynamic payload type */
    return RTP_PT_PRIVATE + idx;
}
Example #9
0
static int opus_decode_subpacket(OpusStreamContext *s,
                                 const uint8_t *buf, int buf_size,
                                 int nb_samples)
{
    int output_samples = 0;
    int flush_needed   = 0;
    int i, j, ret;

    /* check if we need to flush the resampler */
    if (swr_is_initialized(s->swr)) {
        if (buf) {
            int64_t cur_samplerate;
            av_opt_get_int(s->swr, "in_sample_rate", 0, &cur_samplerate);
            flush_needed = (s->packet.mode == OPUS_MODE_CELT) || (cur_samplerate != s->silk_samplerate);
        } else {
            flush_needed = !!s->delayed_samples;
        }
    }

    if (!buf && !flush_needed)
        return 0;

    /* use dummy output buffers if the channel is not mapped to anything */
    if (!s->out[0] ||
            (s->output_channels == 2 && !s->out[1])) {
        av_fast_malloc(&s->out_dummy, &s->out_dummy_allocated_size, s->out_size);
        if (!s->out_dummy)
            return AVERROR(ENOMEM);
        if (!s->out[0])
            s->out[0] = s->out_dummy;
        if (!s->out[1])
            s->out[1] = s->out_dummy;
    }

    /* flush the resampler if necessary */
    if (flush_needed) {
        ret = opus_flush_resample(s, s->delayed_samples);
        if (ret < 0) {
            av_log(s->avctx, AV_LOG_ERROR, "Error flushing the resampler.\n");
            return ret;
        }
        swr_close(s->swr);
        output_samples += s->delayed_samples;
        s->delayed_samples = 0;

        if (!buf)
            goto finish;
    }

    /* decode all the frames in the packet */
    for (i = 0; i < s->packet.frame_count; i++) {
        int size = s->packet.frame_size[i];
        int samples = opus_decode_frame(s, buf + s->packet.frame_offset[i], size);

        if (samples < 0) {
            av_log(s->avctx, AV_LOG_ERROR, "Error decoding an Opus frame.\n");
            if (s->avctx->err_recognition & AV_EF_EXPLODE)
                return samples;

            for (j = 0; j < s->output_channels; j++)
                memset(s->out[j], 0, s->packet.frame_duration * sizeof(float));
            samples = s->packet.frame_duration;
        }
        output_samples += samples;

        for (j = 0; j < s->output_channels; j++)
            s->out[j] += samples;
        s->out_size -= samples * sizeof(float);
    }

finish:
    s->out[0] = s->out[1] = NULL;
    s->out_size = 0;

    return output_samples;
}
Example #10
0
/**
 * Decode a frame to a packet, run the result through SwrContext, if desired, encode it via an appropriate
 * encoder, and write the results to the Java-side native buffer.
 *
 * @param aio       FFAudio context
 * @param cached    true or false
 * @return number of bytes placed into java buffer or a negative value, if something went wrong
 */
static int decode_packet(FFAudioIO *aio, int cached) {
    int res = 0;
    uint8_t **resample_buf = NULL;
    jobject byte_buffer = NULL;
    uint8_t *javaBuffer = NULL;
    uint32_t out_buf_size = 0;
    int out_buf_samples = 0;
    int64_t out_channel_count;
    int64_t out_sample_rate;
    int flush = aio->got_frame;
    enum AVSampleFormat out;
    int bytesConsumed = 0;

    init_ids(aio->env, aio->java_instance);

    av_opt_get_int(aio->swr_context, "out_channel_count", 0, &out_channel_count);
    av_opt_get_int(aio->swr_context, "out_sample_rate", 0, &out_sample_rate);
    av_opt_get_sample_fmt(aio->swr_context, "out_sample_fmt", 0, &out);

    resample_buf = av_mallocz(sizeof(uint8_t *) * 1); // one plane!

    // make sure we really have an audio packet
    if (aio->decode_packet.stream_index == aio->stream_index) {
        // decode frame
        // got_frame indicates whether we got a frame
        bytesConsumed = avcodec_decode_audio4(aio->decode_context, aio->decode_frame, &aio->got_frame, &aio->decode_packet);
        if (bytesConsumed < 0) {
            throwUnsupportedAudioFileExceptionIfError(aio->env, bytesConsumed, "Failed to decode audio frame.");
            return bytesConsumed;
        }

        if (aio->got_frame) {

            aio->decoded_samples += aio->decode_frame->nb_samples;
            out_buf_samples = aio->decode_frame->nb_samples;
#ifdef DEBUG
            fprintf(stderr, "samples%s n:%" PRIu64 " nb_samples:%d pts:%s\n",
                   cached ? "(cached)" : "",
                   aio->decoded_samples, aio->decode_frame->nb_samples,
                   av_ts2timestr(aio->decode_frame->pts, &aio->decode_context->time_base));
#endif

            // adjust out sample number for a different sample rate
            // this is an estimate!!
            out_buf_samples = av_rescale_rnd(
                    swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate) + aio->decode_frame->nb_samples,
                    out_sample_rate,
                    aio->stream->codecpar->sample_rate,
                    AV_ROUND_UP
            );

            // allocate new aio->audio_data buffers
            res = av_samples_alloc(aio->audio_data, NULL, av_frame_get_channels(aio->decode_frame),
                                   aio->decode_frame->nb_samples, aio->decode_frame->format, 1);
            if (res < 0) {
                throwIOExceptionIfError(aio->env, res, "Could not allocate audio buffer.");
                return AVERROR(ENOMEM);
            }
            // copy audio data to aio->audio_data
            av_samples_copy(aio->audio_data, aio->decode_frame->data, 0, 0,
                            aio->decode_frame->nb_samples, av_frame_get_channels(aio->decode_frame), aio->decode_frame->format);

            res = resample(aio, resample_buf, out_buf_samples, (const uint8_t **)aio->audio_data, aio->decode_frame->nb_samples);
            if (res < 0) goto bail;
            else out_buf_samples = res;

        } else if (flush && swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate)) {

            res = resample(aio, resample_buf, swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate), NULL, 0);
            if (res < 0) goto bail;
            else out_buf_samples = res;
        } else {
#ifdef DEBUG
            fprintf(stderr, "Got no frame.\n");
#endif
        }

        if (out_buf_samples > 0) {

            res =  av_samples_get_buffer_size(NULL, (int)out_channel_count, out_buf_samples, out, 1);
            if (res < 0) goto bail;
            else out_buf_size = res;

            // ensure native buffer capacity
            if (aio->java_buffer_capacity < out_buf_size) {
                aio->java_buffer_capacity = (*aio->env)->CallIntMethod(aio->env, aio->java_instance, setNativeBufferCapacity_MID, (jint)out_buf_size);
            }
            // get java-managed byte buffer reference
            byte_buffer = (*aio->env)->GetObjectField(aio->env, aio->java_instance, nativeBuffer_FID);
            if (!byte_buffer) {
                res = -1;
                throwIOExceptionIfError(aio->env, 1, "Failed to get native buffer.");
                goto bail;
            }

            // we have some samples, let's copy them to the java buffer, using the desired encoding
            javaBuffer = (uint8_t *)(*aio->env)->GetDirectBufferAddress(aio->env, byte_buffer);
            if (!javaBuffer) {
                throwIOExceptionIfError(aio->env, 1, "Failed to get address for native buffer.");
                goto bail;
            }
            if (aio->encode_context) {
                aio->encode_frame->nb_samples = out_buf_samples;
                res = encode_buffer(aio, resample_buf[0], out_buf_size, javaBuffer);
                if (res < 0) {
                    out_buf_size = 0;
                    goto bail;
                }
                out_buf_size = res;
            } else {
                memcpy(javaBuffer, resample_buf[0], out_buf_size);
            }
            // we already wrote to the buffer, now we still need to
            // set new bytebuffer limit and position to 0.
            (*aio->env)->CallObjectMethod(aio->env, byte_buffer, rewind_MID);
            (*aio->env)->CallObjectMethod(aio->env, byte_buffer, limit_MID, out_buf_size);
        }
    }

    aio->resampled_samples += out_buf_size;

bail:

    if (resample_buf) {
        if (resample_buf[0]) av_freep(&resample_buf[0]);
        av_free(resample_buf);
    }
    if (aio->audio_data[0]) av_freep(&aio->audio_data[0]);

    return res;
}
Example #11
0
static int opus_decode_frame(OpusStreamContext *s, const uint8_t *data, int size)
{
    int samples    = s->packet.frame_duration;
    int redundancy = 0;
    int redundancy_size, redundancy_pos;
    int ret, i, consumed;
    int delayed_samples = s->delayed_samples;

    ret = ff_opus_rc_dec_init(&s->rc, data, size);
    if (ret < 0)
        return ret;

    /* decode the silk frame */
    if (s->packet.mode == OPUS_MODE_SILK || s->packet.mode == OPUS_MODE_HYBRID) {
#if CONFIG_SWRESAMPLE
        if (!swr_is_initialized(s->swr)) {
#elif CONFIG_AVRESAMPLE
        if (!avresample_is_open(s->avr)) {
#endif
            ret = opus_init_resample(s);
            if (ret < 0)
                return ret;
        }

        samples = ff_silk_decode_superframe(s->silk, &s->rc, s->silk_output,
                                            FFMIN(s->packet.bandwidth, OPUS_BANDWIDTH_WIDEBAND),
                                            s->packet.stereo + 1,
                                            silk_frame_duration_ms[s->packet.config]);
        if (samples < 0) {
            av_log(s->avctx, AV_LOG_ERROR, "Error decoding a SILK frame.\n");
            return samples;
        }
#if CONFIG_SWRESAMPLE
        samples = swr_convert(s->swr,
                              (uint8_t**)s->out, s->packet.frame_duration,
                              (const uint8_t**)s->silk_output, samples);
#elif CONFIG_AVRESAMPLE
        samples = avresample_convert(s->avr, (uint8_t**)s->out, s->out_size,
                                     s->packet.frame_duration,
                                     (uint8_t**)s->silk_output,
                                     sizeof(s->silk_buf[0]),
                                     samples);
#endif
        if (samples < 0) {
            av_log(s->avctx, AV_LOG_ERROR, "Error resampling SILK data.\n");
            return samples;
        }
        av_assert2((samples & 7) == 0);
        s->delayed_samples += s->packet.frame_duration - samples;
    } else
        ff_silk_flush(s->silk);

    // decode redundancy information
    consumed = opus_rc_tell(&s->rc);
    if (s->packet.mode == OPUS_MODE_HYBRID && consumed + 37 <= size * 8)
        redundancy = ff_opus_rc_dec_log(&s->rc, 12);
    else if (s->packet.mode == OPUS_MODE_SILK && consumed + 17 <= size * 8)
        redundancy = 1;

    if (redundancy) {
        redundancy_pos = ff_opus_rc_dec_log(&s->rc, 1);

        if (s->packet.mode == OPUS_MODE_HYBRID)
            redundancy_size = ff_opus_rc_dec_uint(&s->rc, 256) + 2;
        else
            redundancy_size = size - (consumed + 7) / 8;
        size -= redundancy_size;
        if (size < 0) {
            av_log(s->avctx, AV_LOG_ERROR, "Invalid redundancy frame size.\n");
            return AVERROR_INVALIDDATA;
        }

        if (redundancy_pos) {
            ret = opus_decode_redundancy(s, data + size, redundancy_size);
            if (ret < 0)
                return ret;
            ff_celt_flush(s->celt);
        }
    }

    /* decode the CELT frame */
    if (s->packet.mode == OPUS_MODE_CELT || s->packet.mode == OPUS_MODE_HYBRID) {
        float *out_tmp[2] = { s->out[0], s->out[1] };
        float **dst = (s->packet.mode == OPUS_MODE_CELT) ?
                      out_tmp : s->celt_output;
        int celt_output_samples = samples;
        int delay_samples = av_audio_fifo_size(s->celt_delay);

        if (delay_samples) {
            if (s->packet.mode == OPUS_MODE_HYBRID) {
                av_audio_fifo_read(s->celt_delay, (void**)s->celt_output, delay_samples);

                for (i = 0; i < s->output_channels; i++) {
                    s->fdsp->vector_fmac_scalar(out_tmp[i], s->celt_output[i], 1.0,
                                                delay_samples);
                    out_tmp[i] += delay_samples;
                }
                celt_output_samples -= delay_samples;
            } else {
                av_log(s->avctx, AV_LOG_WARNING,
                       "Spurious CELT delay samples present.\n");
                av_audio_fifo_drain(s->celt_delay, delay_samples);
                if (s->avctx->err_recognition & AV_EF_EXPLODE)
                    return AVERROR_BUG;
            }
        }

        ff_opus_rc_dec_raw_init(&s->rc, data + size, size);

        ret = ff_celt_decode_frame(s->celt, &s->rc, dst,
                                   s->packet.stereo + 1,
                                   s->packet.frame_duration,
                                   (s->packet.mode == OPUS_MODE_HYBRID) ? 17 : 0,
                                   ff_celt_band_end[s->packet.bandwidth]);
        if (ret < 0)
            return ret;

        if (s->packet.mode == OPUS_MODE_HYBRID) {
            int celt_delay = s->packet.frame_duration - celt_output_samples;
            void *delaybuf[2] = { s->celt_output[0] + celt_output_samples,
                                  s->celt_output[1] + celt_output_samples };

            for (i = 0; i < s->output_channels; i++) {
                s->fdsp->vector_fmac_scalar(out_tmp[i],
                                            s->celt_output[i], 1.0,
                                            celt_output_samples);
            }

            ret = av_audio_fifo_write(s->celt_delay, delaybuf, celt_delay);
            if (ret < 0)
                return ret;
        }
    } else
        ff_celt_flush(s->celt);

    if (s->redundancy_idx) {
        for (i = 0; i < s->output_channels; i++)
            opus_fade(s->out[i], s->out[i],
                      s->redundancy_output[i] + 120 + s->redundancy_idx,
                      ff_celt_window2 + s->redundancy_idx, 120 - s->redundancy_idx);
        s->redundancy_idx = 0;
    }
    if (redundancy) {
        if (!redundancy_pos) {
            ff_celt_flush(s->celt);
            ret = opus_decode_redundancy(s, data + size, redundancy_size);
            if (ret < 0)
                return ret;

            for (i = 0; i < s->output_channels; i++) {
                opus_fade(s->out[i] + samples - 120 + delayed_samples,
                          s->out[i] + samples - 120 + delayed_samples,
                          s->redundancy_output[i] + 120,
                          ff_celt_window2, 120 - delayed_samples);
                if (delayed_samples)
                    s->redundancy_idx = 120 - delayed_samples;
            }
        } else {
            for (i = 0; i < s->output_channels; i++) {
                memcpy(s->out[i] + delayed_samples, s->redundancy_output[i], 120 * sizeof(float));
                opus_fade(s->out[i] + 120 + delayed_samples,
                          s->redundancy_output[i] + 120,
                          s->out[i] + 120 + delayed_samples,
                          ff_celt_window2, 120);
            }
        }
    }

    return samples;
}

static int opus_decode_subpacket(OpusStreamContext *s,
                                 const uint8_t *buf, int buf_size,
                                 float **out, int out_size,
                                 int nb_samples)
{
    int output_samples = 0;
    int flush_needed   = 0;
    int i, j, ret;

    s->out[0]   = out[0];
    s->out[1]   = out[1];
    s->out_size = out_size;

    /* check if we need to flush the resampler */
#if CONFIG_SWRESAMPLE
    if (swr_is_initialized(s->swr)) {
        if (buf) {
            int64_t cur_samplerate;
            av_opt_get_int(s->swr, "in_sample_rate", 0, &cur_samplerate);
            flush_needed = (s->packet.mode == OPUS_MODE_CELT) || (cur_samplerate != s->silk_samplerate);
        } else {
            flush_needed = !!s->delayed_samples;
        }
    }
#elif CONFIG_AVRESAMPLE
    if (avresample_is_open(s->avr)) {
        if (buf) {
            int64_t cur_samplerate;
            av_opt_get_int(s->avr, "in_sample_rate", 0, &cur_samplerate);
            flush_needed = (s->packet.mode == OPUS_MODE_CELT) || (cur_samplerate != s->silk_samplerate);
        } else {
            flush_needed = !!s->delayed_samples;
        }
    }
#endif

    if (!buf && !flush_needed)
        return 0;

    /* use dummy output buffers if the channel is not mapped to anything */
    if (!s->out[0] ||
        (s->output_channels == 2 && !s->out[1])) {
        av_fast_malloc(&s->out_dummy, &s->out_dummy_allocated_size, s->out_size);
        if (!s->out_dummy)
            return AVERROR(ENOMEM);
        if (!s->out[0])
            s->out[0] = s->out_dummy;
        if (!s->out[1])
            s->out[1] = s->out_dummy;
    }

    /* flush the resampler if necessary */
    if (flush_needed) {
        ret = opus_flush_resample(s, s->delayed_samples);
        if (ret < 0) {
            av_log(s->avctx, AV_LOG_ERROR, "Error flushing the resampler.\n");
            return ret;
        }
#if CONFIG_SWRESAMPLE
        swr_close(s->swr);
#elif CONFIG_AVRESAMPLE
        avresample_close(s->avr);
#endif
        output_samples += s->delayed_samples;
        s->delayed_samples = 0;

        if (!buf)
            goto finish;
    }

    /* decode all the frames in the packet */
    for (i = 0; i < s->packet.frame_count; i++) {
        int size = s->packet.frame_size[i];
        int samples = opus_decode_frame(s, buf + s->packet.frame_offset[i], size);

        if (samples < 0) {
            av_log(s->avctx, AV_LOG_ERROR, "Error decoding an Opus frame.\n");
            if (s->avctx->err_recognition & AV_EF_EXPLODE)
                return samples;

            for (j = 0; j < s->output_channels; j++)
                memset(s->out[j], 0, s->packet.frame_duration * sizeof(float));
            samples = s->packet.frame_duration;
        }
        output_samples += samples;

        for (j = 0; j < s->output_channels; j++)
            s->out[j] += samples;
        s->out_size -= samples * sizeof(float);
    }

finish:
    s->out[0] = s->out[1] = NULL;
    s->out_size = 0;

    return output_samples;
}
Example #12
0
bool D2V::printSettings() {
    int stream_type = getStreamType(f->fctx->iformat->name);

    int video_id = video_stream->id;
    int audio_id = 0;
    int64_t ts_packetsize = 0;
    if (stream_type == TRANSPORT_STREAM) {
        const AVStream *audio_stream = nullptr;
        for (unsigned i = 0; i < f->fctx->nb_streams; i++) {
            if (f->fctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
                audio_stream = f->fctx->streams[i];
                break;
            }
        }

        if (audio_stream)
            audio_id = audio_stream->id;

        if (av_opt_get_int(f->fctx, "ts_packetsize", AV_OPT_SEARCH_CHILDREN, &ts_packetsize) < 0)
            ts_packetsize = 0;
    }

    int mpeg_type = 0;
    if (video_stream->codec->codec_id == AV_CODEC_ID_MPEG1VIDEO)
        mpeg_type = 1;
    else if (video_stream->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO)
        mpeg_type = 2;

    int yuvrgb_scale = input_range == ColourRangeLimited ? 1 : 0;

    int width, height;
    if (av_opt_get_image_size(video_stream->codec, "video_size", 0, &width, &height) < 0)
        width = height = -1;

    AVRational sar;
    if (av_opt_get_q(video_stream->codec, "aspect", 0, &sar) < 0)
        sar = { 1, 1 };
    AVRational dar = av_mul_q(av_make_q(width, height), sar);
    av_reduce(&dar.num, &dar.den, dar.num, dar.den, 1024);

    // No AVOption for framerate?
    AVRational frame_rate = video_stream->codec->framerate;

    std::string settings;

    settings += "Stream_Type=" + std::to_string(stream_type) + "\n";
    if (stream_type == TRANSPORT_STREAM) {
        char pids[100] = { 0 };
        snprintf(pids, 100, "%x,%x,%x", video_id, audio_id, 0);
        settings += "MPEG2_Transport_PID=";
        settings += pids;
        settings += "\n";

        settings += "Transport_Packet_Size=" + std::to_string(ts_packetsize) + "\n";
    }
    settings += "MPEG_Type=" + std::to_string(mpeg_type) + "\n";
    settings += "iDCT_Algorithm=6\n"; // "32-bit SSEMMX (Skal)". No one cares anyway.
    settings += "YUVRGB_Scale=" + std::to_string(yuvrgb_scale) + "\n";
    settings += "Luminance_Filter=0,0\n"; // We don't care.
    settings += "Clipping=0,0,0,0\n"; // We don't crop here.
    settings += "Aspect_Ratio=" + std::to_string(dar.num) + ":" + std::to_string(dar.den) + "\n";
    settings += "Picture_Size=" + std::to_string(width) + "x" + std::to_string(height) + "\n";
    settings += "Field_Operation=0\n"; // Always tell them honor the pulldown flags.
    settings += "Frame_Rate=" + std::to_string((int)((float)frame_rate.num * 1000 / frame_rate.den)) + " (" + std::to_string(frame_rate.num) + "/" + std::to_string(frame_rate.den) + ")\n";
    settings += "Location=0,0,0,0\n"; // Whatever.

    if (fprintf(d2v_file, "%s", settings.c_str()) < 0) {
        error = "Failed to print d2v settings section: fprintf() failed.";
        return false;
    }

    return true;
}
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AVFilterLink *inlink = ctx->inputs[0];
    ResampleContext   *s = ctx->priv;
    char buf1[64], buf2[64];
    int ret;

    int64_t resampling_forced;

    if (s->avr) {
        avresample_close(s->avr);
        avresample_free(&s->avr);
    }

    if (inlink->channel_layout == outlink->channel_layout &&
        inlink->sample_rate    == outlink->sample_rate    &&
        (inlink->format        == outlink->format ||
        (av_get_channel_layout_nb_channels(inlink->channel_layout)  == 1 &&
         av_get_channel_layout_nb_channels(outlink->channel_layout) == 1 &&
         av_get_planar_sample_fmt(inlink->format) ==
         av_get_planar_sample_fmt(outlink->format))))
        return 0;

    if (!(s->avr = avresample_alloc_context()))
        return AVERROR(ENOMEM);

    if (s->options) {
        int ret;
        AVDictionaryEntry *e = NULL;
        while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX)))
            av_log(ctx, AV_LOG_VERBOSE, "lavr option: %s=%s\n", e->key, e->value);

        ret = av_opt_set_dict(s->avr, &s->options);
        if (ret < 0)
            return ret;
    }

    av_opt_set_int(s->avr,  "in_channel_layout", inlink ->channel_layout, 0);
    av_opt_set_int(s->avr, "out_channel_layout", outlink->channel_layout, 0);
    av_opt_set_int(s->avr,  "in_sample_fmt",     inlink ->format,         0);
    av_opt_set_int(s->avr, "out_sample_fmt",     outlink->format,         0);
    av_opt_set_int(s->avr,  "in_sample_rate",    inlink ->sample_rate,    0);
    av_opt_set_int(s->avr, "out_sample_rate",    outlink->sample_rate,    0);

    if ((ret = avresample_open(s->avr)) < 0)
        return ret;

    av_opt_get_int(s->avr, "force_resampling", 0, &resampling_forced);
    s->resampling = resampling_forced || (inlink->sample_rate != outlink->sample_rate);

    if (s->resampling) {
        outlink->time_base = (AVRational){ 1, outlink->sample_rate };
        s->next_pts        = AV_NOPTS_VALUE;
        s->next_in_pts     = AV_NOPTS_VALUE;
    } else
        outlink->time_base = inlink->time_base;

    av_get_channel_layout_string(buf1, sizeof(buf1),
                                 -1, inlink ->channel_layout);
    av_get_channel_layout_string(buf2, sizeof(buf2),
                                 -1, outlink->channel_layout);
    av_log(ctx, AV_LOG_VERBOSE,
           "fmt:%s srate:%d cl:%s -> fmt:%s srate:%d cl:%s\n",
           av_get_sample_fmt_name(inlink ->format), inlink ->sample_rate, buf1,
           av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf2);

    return 0;
}