Esempio n. 1
0
/**
 * Resample a buffer using FFAudioUI->swr_context.
 * The returned out buffer needs to be freed by the caller.
 *
 * @param aio           FFAudioIO context
 * @param out_buf       out buffer
 * @param out_samples   out samples
 * @param in_buf        in buffer
 * @param in_samples    in samples
 * @return number of samples copied/converted or a negative value, should things go wrong
 */
static int resample(FFAudioIO *aio,  uint8_t **out_buf, int out_samples, const uint8_t **in_buf, const int in_samples) {
    int res = 0;
    int64_t out_channel_count;
    enum AVSampleFormat out_sample_format;

    if (out_samples == 0) goto bail; // nothing to do.

    av_opt_get_int(aio->swr_context, "out_channel_count", 0, &out_channel_count);
    av_opt_get_sample_fmt(aio->swr_context, "out_sample_fmt", 0, &out_sample_format);

    #ifdef DEBUG
        fprintf(stderr, "resample: out_samples=%d in_samples=%d, channels=%d sample_format=%d\n",
            out_samples, in_samples, (int)out_channel_count, out_sample_format);
    #endif

    // allocate temp buffer for resampled data
    res = av_samples_alloc(out_buf, NULL, out_channel_count, out_samples, out_sample_format, 1);
    if (res < 0) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(aio->env, res, "Could not allocate resample buffer.");
        goto bail;
    }

    // run the SWR conversion (even if it is not strictly necessary)
    res = swr_convert(aio->swr_context, out_buf, out_samples, in_buf, in_samples);
    if (res < 0) {
        throwIOExceptionIfError(aio->env, res, "Failed to convert audio data.");
        goto bail;
    }

    bail:

    return res;
}
Esempio n. 2
0
/**
 * Initialize our main context FFAudioIO, so that SwrContext, decode buffers and the encoder are set
 * to reasonable values.
 *
 * @param JNIEnv    env
 * @param aio       our context, FFAudioIO
 * @return a negative value, if something went wrong
 */
int ff_init_audioio(JNIEnv *env, FFAudioIO *aio) {
    int res = 0;
    int nb_planes;
    AVCodec *codec = NULL;

    aio->timestamp = 0;

    // allocate pointer to the audio buffers, i.e. the multiple planes/channels.
    nb_planes = av_sample_fmt_is_planar(aio->stream->codecpar->format)
        ? aio->stream->codecpar->channels
        : 1;

    // always init SWR to keep code simpler
    res = init_swr(env, aio);
    if (res < 0) {
        // exception is already thrown
        goto bail;
    }
    // if for some reason the codec delivers 24bit, we need to encode its output to little endian
    if (aio->stream->codecpar->bits_per_coded_sample == 24) {
        codec = ff_find_encoder(aio->stream->codecpar->format, aio->stream->codecpar->bits_per_coded_sample, ff_big_endian(aio->stream->codecpar->codec_id), 1);
        if (!codec) {
            res = AVERROR(EINVAL);
            throwIOExceptionIfError(env, res, "Could not find suitable encoder codec.");
            goto bail;
        }
        res = ff_init_encoder(env, aio, codec);
        if (res<0) {
            throwIOExceptionIfError(env, res, "Could not initialize encoder codec.");
            goto bail;
        }
    }

    // allocate the buffer the codec decodes to
    aio->audio_data = av_mallocz(sizeof(uint8_t *) * nb_planes);
    if (!aio->audio_data) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not allocate audio data buffers.");
        goto bail;
    }

    aio->decode_frame = av_frame_alloc();
    if (!aio->decode_frame) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not allocate frame.");
        goto bail;
    }

    // initialize packet
    av_init_packet(&(aio->decode_packet));
    aio->decode_packet.data = NULL;
    aio->decode_packet.size = 0;

bail:

    return res;
}
Esempio n. 3
0
/**
 * Allocates and initializes the encoder context and frame in FFAudioIO.
 * As parameters serve the output parameters of the SwrContext from FFAudioIO.
 * Therefore the SwrContext must be setup first for this to be successful.
 *
 * @param env JNIEnv
 * @param aio FFAudioIO (our context)
 * @param encoder AVCodec to use to setup the encoder AVCodecContext
 * @return a negative value, if something goes wrong
 */
int ff_init_encoder(JNIEnv *env, FFAudioIO *aio, AVCodec *encoder) {
    int res = 0;
    int64_t out_sample_rate;
    int64_t out_channel_count;
    int64_t out_channel_layout;
    enum AVSampleFormat out_sample_fmt;

    // make sure we clean up before resetting this
    // in case this is called twice
    if (aio->encode_frame) {
        av_frame_free(&aio->encode_frame);
    }
    if (aio->encode_context) {
        avcodec_close(aio->encode_context);
        av_free(aio->encode_context);
    }

    aio->encode_context = avcodec_alloc_context3(encoder);
    if (!aio->encode_context) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not allocate codec context.");
        goto bail;
    }

    // init to whatever we have in SwrContext
    av_opt_get_int(aio->swr_context, "out_channel_count", 0, &out_channel_count);
    av_opt_get_int(aio->swr_context, "out_channel_layout", 0, &out_channel_layout);
    av_opt_get_int(aio->swr_context, "out_sample_rate", 0, &out_sample_rate);
    av_opt_get_sample_fmt(aio->swr_context, "out_sample_fmt", 0, &out_sample_fmt);

    aio->encode_context->sample_fmt = out_sample_fmt;
    aio->encode_context->sample_rate = out_sample_rate;
    aio->encode_context->channel_layout = out_channel_layout;
    aio->encode_context->channels = out_channel_count;

    res = avcodec_open2(aio->encode_context, encoder, NULL);
    if (res < 0) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not open encoder.");
        goto bail;
    }

    aio->encode_frame = av_frame_alloc();
    if (!aio->encode_frame) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not allocate encoder frame.");
        goto bail;
    }
    aio->encode_frame->nb_samples = aio->encode_context->frame_size; // this will be changed later!!
    aio->encode_frame->format = aio->encode_context->sample_fmt;
    aio->encode_frame->channel_layout = aio->encode_context->channel_layout;

    bail:

    return res;
}
Esempio n. 4
0
/**
 * Callback read function used by our custom AVIOContext.
 *
 * @param opaque    pointer to the current FFAudioIO
 * @param buf       buffer to write freshly read data to
 * @param size      size of buf
 * @return          number of bytes read or a negative number in case of an error
 */
static int read_callback(void *opaque, uint8_t *buf, int size) {
    int res = 0;
    int available_data;
    jobject read_buffer = NULL;
    uint8_t *java_buffer = NULL;
    FFAudioIO *aio = (FFAudioIO*)opaque;

    // tell java to fill buffer
    available_data = (int) (*aio->env)->CallIntMethod(aio->env, aio->java_instance, fillReadBuffer_MID);
    if (available_data > size) {
        res = -1;
        throwIOExceptionIfError(aio->env, 1, "Available data must not be larger than callback buffer.");
        goto bail;
    }
    if ((*aio->env)->ExceptionCheck(aio->env)) {
        // needed?
        //(*aio->env)->ExceptionDescribe(aio->env);
        res = -1;
        goto bail;
    }

    if (available_data <= 0) {
        res = 0;
        goto bail;
    }

    read_buffer = (*aio->env)->GetObjectField(aio->env, aio->java_instance, readBuffer_FID);
    if (!read_buffer) {
        res = -1;
        throwIOExceptionIfError(aio->env, 1, "Failed to get read buffer.");
        goto bail;
    }

    java_buffer = (uint8_t *)(*aio->env)->GetDirectBufferAddress(aio->env, read_buffer);
    if (!java_buffer) {
        res = -1;
        throwIOExceptionIfError(aio->env, 1, "Failed to get address for read buffer.");
        goto bail;
    }

    // copy to c buffer
    memcpy(buf, (const uint8_t *)java_buffer, available_data);
    // return size of buffer
    res = available_data;

bail:

    return res;
}
Esempio n. 5
0
/**
 * Callback read function for our custom AVIOContext.
 *
 * @param opaque    pointer passed in with Callback information
 * @param buf       the buffer to write to
 * @param size      size of buf
 * @return          number of bytes written to buf or a negative value in case of an error
 */
static int read_callback(void *opaque, uint8_t *buf, int size) {
    int res = 0;
    int availableData;
    uint8_t *java_buffer = NULL;
    FFCallback *callback = (FFCallback *)opaque;

    if (callback->call_count > 0) goto bail;
    callback->call_count = 1;

    java_buffer = (uint8_t *)(*callback->env)->GetDirectBufferAddress(callback->env, callback->byte_buffer);
    if (!java_buffer) {
        res = -1;
        throwIOExceptionIfError(callback->env, 1, "Failed to get address for byte buffer");
        goto bail;
    }
    // get available data, i.e. the limit of the java buffer
    availableData = (*callback->env)->CallIntMethod(callback->env, callback->byte_buffer, limit_MID);
    // copy to c buf
    memcpy(buf, (const uint8_t *)java_buffer, availableData);

    res = availableData;

bail:

    return res;
}
Esempio n. 6
0
/**
 * Encodes a buffer to the final format using its FFAudioIO encode_context.
 *
 * @param aio       FFAudioIO context
 * @param in_buf    input buffer, data to encode
 * @param in_size   size of the input buffer
 * @param out_buf   output buffer
 * @return a negative value should some go wrong
 */
static int encode_buffer(FFAudioIO *aio, const uint8_t *in_buf, int in_size, const uint8_t *out_buf) {
    int res = 0;
    int got_output;

    res = av_samples_get_buffer_size(NULL, aio->encode_context->channels, aio->encode_frame->nb_samples, aio->encode_context->sample_fmt, 1);

#ifdef DEBUG
    fprintf(stderr, "encode_buffer: channels=%d frame->nb_samples=%d in_size=%d\n", aio->encode_context->channels, aio->encode_frame->nb_samples, in_size);
    fprintf(stderr, "encode_buffer: needed buffer=%d available=%d\n", res, in_size);
#endif

    // setup the data pointers in the AVFrame
    res = avcodec_fill_audio_frame(aio->encode_frame,
            aio->encode_context->channels,
            aio->encode_context->sample_fmt,
            in_buf,
            in_size,
            1);
    if (res < 0) {
        throwIOExceptionIfError(aio->env, res, "Failed to fill audio frame.");
        goto bail;
    }
    av_init_packet(&aio->encode_packet);
    aio->encode_packet.data = NULL; // packet data will be allocated by the encoder
    aio->encode_packet.size = 0;
    // encode the samples
    res = avcodec_encode_audio2(aio->encode_context, &aio->encode_packet, aio->encode_frame, &got_output);
    if (res < 0) {
        throwIOExceptionIfError(aio->env, res, "Failed to encode audio frame.");
        goto bail;
    }
    if (got_output) {
        res = aio->encode_packet.size;
        memcpy((char*)out_buf, aio->encode_packet.data, aio->encode_packet.size);
        av_packet_unref(&aio->encode_packet);
    }

    bail:

    return res;
}
Esempio n. 7
0
/**
 * Allocates and initializes the SwrContext so that we don't have to deal with planar sample formats.
 *
 * @param env JNIEnv
 * @param aio FFAudioIO
 * @return a negative value should an error occur
 */
static int init_swr(JNIEnv *env, FFAudioIO *aio) {
    int res = 0;

    aio->swr_context = swr_alloc();
    if (!aio->swr_context) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not allocate swr context.");
        goto bail;
    }

    av_opt_set_sample_fmt(aio->swr_context, "in_sample_fmt",  aio->stream->codecpar->format, 0);
    // make sure we get interleaved/packed output
    av_opt_set_sample_fmt(aio->swr_context, "out_sample_fmt", av_get_packed_sample_fmt(aio->stream->codecpar->format), 0);

    // keep everything else the way it was...
    av_opt_set_int(aio->swr_context, "in_channel_count",  aio->stream->codecpar->channels, 0);
    av_opt_set_int(aio->swr_context, "out_channel_count",  aio->stream->codecpar->channels, 0);
    av_opt_set_int(aio->swr_context, "in_channel_layout",  aio->stream->codecpar->channel_layout, 0);
    av_opt_set_int(aio->swr_context, "out_channel_layout", aio->stream->codecpar->channel_layout, 0);
    av_opt_set_int(aio->swr_context, "in_sample_rate",     aio->stream->codecpar->sample_rate, 0);
    av_opt_set_int(aio->swr_context, "out_sample_rate",    aio->stream->codecpar->sample_rate, 0);

    res = swr_init(aio->swr_context);
    if (res < 0) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not initialize swr context");
        goto bail;
    }

    //fprintf(stderr, "init_swr: dither context: %d\n", aio->swr_context->dither);
    //fprintf(stderr, "init_swr: output sample bits: %d\n", aio->swr_context->dither.output_sample_bits);

    bail:

    return res;
}
Esempio n. 8
0
/**
 * Opens the input file/url and allocates a AVFormatContext for it, but does not open the audio stream with an
 * appropriate decoder.
 *
 * @param env JNIEnv
 * @param format_context AVFormatContext
 * @param url URL to open
 * @return negative value, if something went wrong
 */
int ff_open_format_context(JNIEnv *env, AVFormatContext **format_context, const char *url) {
    int res = 0;
    int probe_score = 0;

    res = avformat_open_input(format_context, url, NULL, NULL);
    if (res) {
        if (res == AVERROR(ENOENT) || res == AVERROR_HTTP_NOT_FOUND) {
            throwFileNotFoundExceptionIfError(env, res, url);
        } else if (res == AVERROR_PROTOCOL_NOT_FOUND
                || res == AVERROR_HTTP_BAD_REQUEST
                || res == AVERROR_HTTP_UNAUTHORIZED
                || res == AVERROR_HTTP_FORBIDDEN
                || res == AVERROR_HTTP_OTHER_4XX
                || res == AVERROR_HTTP_SERVER_ERROR
                || res == AVERROR(EIO)) {
            throwIOExceptionIfError(env, res, url);
        } else {
            throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to open audio file");
        }
        goto bail;
    }
    probe_score = av_format_get_probe_score(*format_context);

    #ifdef DEBUG
        fprintf(stderr, "ff_open_format_context(): probe score=%i\n", probe_score);
    #endif

    if (probe_score < MIN_PROBE_SCORE) {
        res = probe_score;
        throwUnsupportedAudioFileExceptionIfError(env, probe_score, "Probe score too low");
        goto bail;
    }

    res = avformat_find_stream_info(*format_context, NULL);
    if (res < 0) {
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to find stream info");
        goto bail;
    }

bail:

    return res;
}
Esempio n. 9
0
/**
 * Re-configures SwrContext and Encoder to match the provided target_format.
 *
 * @param env           JNIEnv
 * @param object        stream instance this call stems from, i.e. a FFCodecInputStream
 * @param target_format target AudioFormat
 * @param aio_pointer   Pointer to the FFAudioIO struct of the FFNativePeerInputStream that opened the file/stream
 * @return pointer to the FFAudioIO struct that was given as parameter
 */
JNIEXPORT jlong JNICALL Java_com_tagtraum_ffsampledsp_FFCodecInputStream_open(JNIEnv *env, jobject object, jobject target_format, jlong aio_pointer) {
    int res = 0;
    enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_NONE;
    int out_channel_layout = AV_CH_LAYOUT_STEREO;
    int is_float = 0;
    int is_signed = 0;
    AVCodec *encoder = NULL;
    int dither_method = SWR_DITHER_NONE;
    int output_sample_bits = 0;

    init_ids(env);

    FFAudioIO *aio = (FFAudioIO*)(intptr_t)aio_pointer;

    jfloat sample_rate = (*env)->CallFloatMethod(env, target_format, getSampleRate_MID);
    jint sample_size_in_bits = (*env)->CallIntMethod(env, target_format, getSampleSizeInBits_MID);
    jint channels = (*env)->CallIntMethod(env, target_format, getChannels_MID);
    jboolean big_endian = (*env)->CallBooleanMethod(env, target_format, isBigEndian_MID);
    jobject encoding = (*env)->CallObjectMethod(env, target_format, getEncoding_MID);
    jstring jencoding_name = (jstring)(*env)->CallObjectMethod(env, encoding, toString_MID);

    const char *encoding_name = (*env)->GetStringUTFChars(env, jencoding_name, NULL);
    is_float = strcmp("PCM_FLOAT", encoding_name) == 0;
    is_signed = strcmp("PCM_SIGNED", encoding_name) == 0;
    (*env)->ReleaseStringUTFChars(env, jencoding_name, encoding_name);

#ifdef DEBUG
    fprintf(stderr, "encoding = %s\n", encoding_name);
    fprintf(stderr, "signed   = %d\n", is_signed);
    fprintf(stderr, "float    = %d\n", is_float);
    fprintf(stderr, "bits     = %d\n", (int)sample_size_in_bits);
#endif

    if (sample_size_in_bits <= 8) {
        out_sample_fmt = AV_SAMPLE_FMT_U8;
    } else if (sample_size_in_bits <=16) {
        out_sample_fmt = AV_SAMPLE_FMT_S16;
    } else if (sample_size_in_bits <= 32 && is_float) {
        out_sample_fmt = AV_SAMPLE_FMT_FLT;
    } else if (sample_size_in_bits <=32) {
        out_sample_fmt = AV_SAMPLE_FMT_S32;
    } else if (sample_size_in_bits <= 64 && is_float) {
        out_sample_fmt = AV_SAMPLE_FMT_DBL;
    } else {
        fprintf(stderr, "Will use 64 bit PCM_FLOAT even though it might not have been desired.\n");
        out_sample_fmt = AV_SAMPLE_FMT_DBL;
    }

    if (aio->stream->codecpar->channels == channels) {
        out_channel_layout = aio->stream->codecpar->channel_layout;
    } else if (channels == 1) {
        out_channel_layout = AV_CH_LAYOUT_MONO;
    } else if (channels == 2) {
        out_channel_layout = AV_CH_LAYOUT_STEREO;
    } else {
        fprintf(stderr, "Undetermined channel layout, will use stereo.\n");
        channels = 2;
    }

    if (aio->stream->codecpar->bits_per_coded_sample > sample_size_in_bits) {
        dither_method = SWR_DITHER_TRIANGULAR;
        output_sample_bits = sample_size_in_bits;
    }

#ifdef DEBUG
    fprintf(stderr, "setting out format to: %d\n", out_sample_fmt);
#endif

    // remove default setup
    if (aio->swr_context) {
        swr_free(&aio->swr_context);
    }
    // allocate new
    aio->swr_context = swr_alloc();
    if (!aio->swr_context) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not allocate swr context.");
        goto bail;
    }

    // standard stuff from input
    av_opt_set_sample_fmt(aio->swr_context, "in_sample_fmt",  aio->stream->codecpar->format, 0);
    av_opt_set_int(aio->swr_context, "in_channel_count",  aio->stream->codecpar->channels, 0);
    av_opt_set_int(aio->swr_context, "in_channel_layout",  aio->stream->codecpar->channel_layout, 0);
    av_opt_set_int(aio->swr_context, "in_sample_rate",     aio->stream->codecpar->sample_rate, 0);
    // custom stuff
    av_opt_set_int(aio->swr_context, "out_channel_layout", out_channel_layout, 0);
    av_opt_set_int(aio->swr_context, "out_channel_count", channels, 0);
    av_opt_set_int(aio->swr_context, "out_sample_rate", (int)round(sample_rate), 0);
    av_opt_set_sample_fmt(aio->swr_context, "out_sample_fmt", out_sample_fmt, 0);
    av_opt_set_int(aio->swr_context, "dither_method", dither_method, 0);
    av_opt_set_int(aio->swr_context, "output_sample_bits", output_sample_bits, 0);

    res = swr_init(aio->swr_context);
    if (res < 0) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not re-initialize swr context.");
        goto bail;
    }

#ifdef DEBUG
    fprintf(stderr, "open codec: dither method     : %d\n", dither_method);
    fprintf(stderr, "open codec: output sample bits: %d\n", aio->swr_context->dither.output_sample_bits);
#endif

    // re-adjust encoder
    encoder = ff_find_encoder(out_sample_fmt, sample_size_in_bits, big_endian, is_signed);
    if (!encoder) {
        res = AVERROR(EINVAL);
        throwIOExceptionIfError(env, res, "Could not find suitable encoder.");
        goto bail;
    }
    res = ff_init_encoder(env, aio, encoder);
    if (res < 0) {
        goto bail;
    }

    bail:

    return (jlong)(intptr_t)aio;
}
Esempio n. 10
0
/**
 * Decode a frame to a packet, run the result through SwrContext, if desired, encode it via an appropriate
 * encoder, and write the results to the Java-side native buffer.
 *
 * @param aio       FFAudio context
 * @param cached    true or false
 * @return number of bytes placed into java buffer or a negative value, if something went wrong
 */
static int decode_packet(FFAudioIO *aio, int cached) {
    int res = 0;
    uint8_t **resample_buf = NULL;
    jobject byte_buffer = NULL;
    uint8_t *javaBuffer = NULL;
    uint32_t out_buf_size = 0;
    int out_buf_samples = 0;
    int64_t out_channel_count;
    int64_t out_sample_rate;
    int flush = aio->got_frame;
    enum AVSampleFormat out;
    int bytesConsumed = 0;

    init_ids(aio->env, aio->java_instance);

    av_opt_get_int(aio->swr_context, "out_channel_count", 0, &out_channel_count);
    av_opt_get_int(aio->swr_context, "out_sample_rate", 0, &out_sample_rate);
    av_opt_get_sample_fmt(aio->swr_context, "out_sample_fmt", 0, &out);

    resample_buf = av_mallocz(sizeof(uint8_t *) * 1); // one plane!

    // make sure we really have an audio packet
    if (aio->decode_packet.stream_index == aio->stream_index) {
        // decode frame
        // got_frame indicates whether we got a frame
        bytesConsumed = avcodec_decode_audio4(aio->decode_context, aio->decode_frame, &aio->got_frame, &aio->decode_packet);
        if (bytesConsumed < 0) {
            throwUnsupportedAudioFileExceptionIfError(aio->env, bytesConsumed, "Failed to decode audio frame.");
            return bytesConsumed;
        }

        if (aio->got_frame) {

            aio->decoded_samples += aio->decode_frame->nb_samples;
            out_buf_samples = aio->decode_frame->nb_samples;
#ifdef DEBUG
            fprintf(stderr, "samples%s n:%" PRIu64 " nb_samples:%d pts:%s\n",
                   cached ? "(cached)" : "",
                   aio->decoded_samples, aio->decode_frame->nb_samples,
                   av_ts2timestr(aio->decode_frame->pts, &aio->decode_context->time_base));
#endif

            // adjust out sample number for a different sample rate
            // this is an estimate!!
            out_buf_samples = av_rescale_rnd(
                    swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate) + aio->decode_frame->nb_samples,
                    out_sample_rate,
                    aio->stream->codecpar->sample_rate,
                    AV_ROUND_UP
            );

            // allocate new aio->audio_data buffers
            res = av_samples_alloc(aio->audio_data, NULL, av_frame_get_channels(aio->decode_frame),
                                   aio->decode_frame->nb_samples, aio->decode_frame->format, 1);
            if (res < 0) {
                throwIOExceptionIfError(aio->env, res, "Could not allocate audio buffer.");
                return AVERROR(ENOMEM);
            }
            // copy audio data to aio->audio_data
            av_samples_copy(aio->audio_data, aio->decode_frame->data, 0, 0,
                            aio->decode_frame->nb_samples, av_frame_get_channels(aio->decode_frame), aio->decode_frame->format);

            res = resample(aio, resample_buf, out_buf_samples, (const uint8_t **)aio->audio_data, aio->decode_frame->nb_samples);
            if (res < 0) goto bail;
            else out_buf_samples = res;

        } else if (flush && swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate)) {

            res = resample(aio, resample_buf, swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate), NULL, 0);
            if (res < 0) goto bail;
            else out_buf_samples = res;
        } else {
#ifdef DEBUG
            fprintf(stderr, "Got no frame.\n");
#endif
        }

        if (out_buf_samples > 0) {

            res =  av_samples_get_buffer_size(NULL, (int)out_channel_count, out_buf_samples, out, 1);
            if (res < 0) goto bail;
            else out_buf_size = res;

            // ensure native buffer capacity
            if (aio->java_buffer_capacity < out_buf_size) {
                aio->java_buffer_capacity = (*aio->env)->CallIntMethod(aio->env, aio->java_instance, setNativeBufferCapacity_MID, (jint)out_buf_size);
            }
            // get java-managed byte buffer reference
            byte_buffer = (*aio->env)->GetObjectField(aio->env, aio->java_instance, nativeBuffer_FID);
            if (!byte_buffer) {
                res = -1;
                throwIOExceptionIfError(aio->env, 1, "Failed to get native buffer.");
                goto bail;
            }

            // we have some samples, let's copy them to the java buffer, using the desired encoding
            javaBuffer = (uint8_t *)(*aio->env)->GetDirectBufferAddress(aio->env, byte_buffer);
            if (!javaBuffer) {
                throwIOExceptionIfError(aio->env, 1, "Failed to get address for native buffer.");
                goto bail;
            }
            if (aio->encode_context) {
                aio->encode_frame->nb_samples = out_buf_samples;
                res = encode_buffer(aio, resample_buf[0], out_buf_size, javaBuffer);
                if (res < 0) {
                    out_buf_size = 0;
                    goto bail;
                }
                out_buf_size = res;
            } else {
                memcpy(javaBuffer, resample_buf[0], out_buf_size);
            }
            // we already wrote to the buffer, now we still need to
            // set new bytebuffer limit and position to 0.
            (*aio->env)->CallObjectMethod(aio->env, byte_buffer, rewind_MID);
            (*aio->env)->CallObjectMethod(aio->env, byte_buffer, limit_MID, out_buf_size);
        }
    }

    aio->resampled_samples += out_buf_size;

bail:

    if (resample_buf) {
        if (resample_buf[0]) av_freep(&resample_buf[0]);
        av_free(resample_buf);
    }
    if (aio->audio_data[0]) av_freep(&aio->audio_data[0]);

    return res;
}
Esempio n. 11
0
/**
 * Opens the byte buffer to determine its AudioFileFormat.
 *
 * @param env JNIEnv
 * @param instance calling FFAudioFileReader instance
 * @param byte_buffer audio data
 * @return AudioFileFormat objects
 */
 JNIEXPORT jobjectArray JNICALL Java_com_tagtraum_ffsampledsp_FFAudioFileReader_getAudioFileFormatsFromBuffer(JNIEnv *env, jobject instance, jobject byte_buffer) {
    int res = 0;
    AVFormatContext *format_context = NULL;
    //AVStream *stream = NULL;
    jobjectArray array = NULL;

    unsigned char* callbackBuffer = NULL;
    FFCallback *callback = NULL;
    AVIOContext *io_context;

    init_ids(env);

    callback = calloc(1, sizeof(FFCallback));
    if (!callback) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not allocate callback.");
        goto bail;
    }
    callback->env = env;
    callback->byte_buffer = byte_buffer;
    callback->call_count = 0;

    format_context = avformat_alloc_context();
    if (!format_context) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not allocate format context.");
        goto bail;
    }

    // limit probe to less than what we read in one chunk...
    format_context->probesize = 8*1024; // this corresponds to the Java code!
    format_context->max_analyze_duration = 5*AV_TIME_BASE;

    callbackBuffer = (unsigned char*)av_malloc(CALLBACK_BUFFERSIZE * sizeof(uint8_t));
    if (!callbackBuffer) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not allocate callback buffer.");
        goto bail;
    }

    io_context = avio_alloc_context(
        callbackBuffer,      // IOBuffer
        CALLBACK_BUFFERSIZE, // Buffer Size (32kb corresponds to Java code)
        0,                   // Write flag, only reading, so 0
        callback,            // FFCallback pointer (opaque)
        read_callback,       // Read callback
        NULL,                // Write callback
        NULL                 // Seek callback
    );
    if (!io_context) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not allocate custom IO context.");
        goto bail;
    }
    // we didn't supply a seek function in avio_alloc_context, so we need to make sure we don't seek..
    io_context->seekable = 0;

    format_context->pb = io_context;

    res = ff_open_format_context(env, &format_context, "MemoryAVIOContext");
    if (res) {
        goto bail;
    }

    res = create_ffaudiofileformats(env, format_context, &array, NULL);
    if (res) {
        goto bail;
    }

bail:

    /*
    if (stream && stream->codec) {
        avcodec_close(stream->codec);
    }
    */
    if (format_context) {
        AVFormatContext *s = format_context;
        if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO)) {
            if (s->pb) {
                avio_flush(s->pb);
                av_free(s->pb->buffer);
                av_free(s->pb);
            }
        }

        avformat_close_input(&format_context);
    }
    if (callback) {
        free(callback);
    }

    return array;
}
Esempio n. 12
0
/**
 * Creates the FFAudioIO, custom AVIOContext etc for reading data from the stream.
 *
 * @param env       JNIEnv
 * @param stream    calling FFStreamInputStream instance
 * @return          pointer to the created FFAudioIO
 */
JNIEXPORT jlong JNICALL Java_com_tagtraum_ffsampledsp_FFStreamInputStream_open(JNIEnv *env, jobject stream, jint streamIndex) {

    int res = 0;
    FFAudioIO *aio;
    AVIOContext *io_context;
    unsigned char* callback_buffer = NULL;

    init_ids(env, stream);

    aio = calloc(1, sizeof(FFAudioIO));
    if (!aio) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not allocate audio IO.");
        goto bail;
    }
    aio->env = env;
    aio->java_instance = stream;

    aio->format_context = avformat_alloc_context();
    if (!aio->format_context) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not allocate format context.");
        goto bail;
    }

    // limit probe to less than what we read in one chunk...
    aio->format_context->probesize = 8*1024;
    aio->format_context->max_analyze_duration = 5*AV_TIME_BASE;

    callback_buffer = (unsigned char*)av_malloc(CALLBACK_BUFFERSIZE * sizeof(uint8_t));
    if (!callback_buffer) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not allocate callback buffer.");
        goto bail;
    }

    io_context = avio_alloc_context(
        callback_buffer,      // IOBuffer
        CALLBACK_BUFFERSIZE, // Buffer Size (32kb corresponds to Java code)
        0,                   // Write flag, only reading, so 0
        aio,                 // FFAudioIO pointer (opaque)
        read_callback,       // Read callback
        NULL,                // Write callback
        NULL                 // Seek callback
    );
    if (!io_context) {
        res = AVERROR(ENOMEM);
        throwIOExceptionIfError(env, res, "Could not allocate custom IO context.");
        goto bail;
    }
    // we didn't supply a seek function in avio_alloc_context,
    // so we need to make sure we don't seek...
    io_context->seekable = 0;

    aio->format_context->pb = io_context;
    aio->stream_index = (int)streamIndex;

    res = ff_open_file(env, &aio->format_context, &aio->stream, &aio->decode_context, &aio->stream_index, "MemoryAVIOContext");
    if (res) {
        // exception is already thrown
        goto bail;
    }

    res = ff_init_audioio(env, aio);
    if (res) {
        // exception is already thrown
        goto bail;
    }

#ifdef DEBUG
    fprintf(stderr, "stream->codecpar->bits_per_coded_sample: %i\n", aio->stream->codecpar->bits_per_coded_sample);
    fprintf(stderr, "stream->codecpar->bits_per_raw_sample  : %i\n", aio->stream->codecpar->bits_per_raw_sample);
    fprintf(stderr, "stream->codecpar->bit_rate             : %i\n", aio->stream->codecpar->bit_rate);
    fprintf(stderr, "frames     : %" PRId64 "\n", aio->stream->nb_frames);
    fprintf(stderr, "sample_rate: %i\n", aio->stream->codecpar->sample_rate);
    fprintf(stderr, "channels   : %i\n", aio->stream->codecpar->channels);
    fprintf(stderr, "frame_size : %i\n", aio->stream->codecpar->frame_size);
    fprintf(stderr, "codec_id   : %i\n", aio->stream->codecpar->codec_id);
#endif

bail:

    if (res) ff_audioio_free(aio);
    return (jlong)(intptr_t)aio;
}