コード例 #1
0
/**
 * Opens the given stream, i.e. sets up a decoder.
 *
 * @param env JNIEnv
 * @param stream AVStream
 */
int ff_open_stream(JNIEnv *env, AVStream *stream, AVCodecContext **context) {
#ifdef DEBUG
    fprintf(stderr, "Opening stream...\n");
#endif

    int res = 0;
    AVCodec *decoder = NULL;
    AVDictionary *opts = NULL;
    int refcount = 0; // is this correct?

    decoder = avcodec_find_decoder(stream->codecpar->codec_id);
    if (!decoder) {
        fprintf(stderr, "Failed to find %s codec\n", av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
        res = AVERROR(EINVAL);
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to find codec.");
        goto bail;
    }

    *context = avcodec_alloc_context3(decoder);
    if (!context) {
        fprintf(stderr, "Failed to allocate context\n");
        res = AVERROR(EINVAL);
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to allocate codec context.");
        goto bail;
    }

    /* Copy codec parameters from input stream to output codec context */
    if ((res = avcodec_parameters_to_context(*context, stream->codecpar)) < 0) {
        fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n", av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to copy codec parameters.");
        goto bail;
    }

    /* Init the decoders, with or without reference counting */
    av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0);
    if ((res = avcodec_open2(*context, decoder, &opts)) < 0) {
        fprintf(stderr, "Failed to open %s codec\n", av_get_media_type_string(AVMEDIA_TYPE_AUDIO));
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to open codec.");
        goto bail;
    }

#ifdef DEBUG
    fprintf(stderr, "Stream was opened.\n");
#endif
    return res;

bail:
    return res;
}
コード例 #2
0
ファイル: MFUtils.cpp プロジェクト: hendriks73/mfsampledsp
/**
 * Creates a Media Source Reader.
 *
 * @param env JNI env
 * @param path path
 * @param ppMediaSrcReader media source reader
 * @return HRESULT
 */
HRESULT mf_createMediaSourceReader(JNIEnv *env, jstring path, IMFSourceReader **ppMediaSrcReader) {

    HRESULT res = S_OK;
    const LPWSTR pwszFilePath = (LPWSTR)env->GetStringChars(path, NULL);

    res = MFCreateSourceReaderFromURL(
        pwszFilePath, 
        NULL, 
        ppMediaSrcReader);
	if (HRESULT_CODE(res) == ERROR_FILE_NOT_FOUND
			|| HRESULT_CODE(res) == ERROR_PATH_NOT_FOUND
			|| HRESULT_CODE(res) == ERROR_NOT_DOS_DISK
			|| HRESULT_CODE(res) == ERROR_BAD_NETPATH) {
		const char * filePath = env->GetStringUTFChars(path, NULL);
        throwFileNotFoundExceptionIfError(env, res, filePath);
	    env->ReleaseStringUTFChars(path, filePath);
        goto bail;
	}
    if (res != S_OK) {
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to create source reader from url");
        goto bail;
    }

bail:

    env->ReleaseStringChars(path, (jchar *)pwszFilePath);

    return res;
}
コード例 #3
0
/**
 * Opens the input file/url and allocates a AVFormatContext for it, but does not open the audio stream with an
 * appropriate decoder.
 *
 * @param env JNIEnv
 * @param format_context AVFormatContext
 * @param url URL to open
 * @return negative value, if something went wrong
 */
int ff_open_format_context(JNIEnv *env, AVFormatContext **format_context, const char *url) {
    int res = 0;
    int probe_score = 0;

    res = avformat_open_input(format_context, url, NULL, NULL);
    if (res) {
        if (res == AVERROR(ENOENT) || res == AVERROR_HTTP_NOT_FOUND) {
            throwFileNotFoundExceptionIfError(env, res, url);
        } else if (res == AVERROR_PROTOCOL_NOT_FOUND
                || res == AVERROR_HTTP_BAD_REQUEST
                || res == AVERROR_HTTP_UNAUTHORIZED
                || res == AVERROR_HTTP_FORBIDDEN
                || res == AVERROR_HTTP_OTHER_4XX
                || res == AVERROR_HTTP_SERVER_ERROR
                || res == AVERROR(EIO)) {
            throwIOExceptionIfError(env, res, url);
        } else {
            throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to open audio file");
        }
        goto bail;
    }
    probe_score = av_format_get_probe_score(*format_context);

    #ifdef DEBUG
        fprintf(stderr, "ff_open_format_context(): probe score=%i\n", probe_score);
    #endif

    if (probe_score < MIN_PROBE_SCORE) {
        res = probe_score;
        throwUnsupportedAudioFileExceptionIfError(env, probe_score, "Probe score too low");
        goto bail;
    }

    res = avformat_find_stream_info(*format_context, NULL);
    if (res < 0) {
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to find stream info");
        goto bail;
    }

bail:

    return res;
}
コード例 #4
0
ファイル: MFUtils.cpp プロジェクト: hendriks73/mfsampledsp
/**
 * Creates a Media Source.
 *
 * @param env JNI env
 * @param path path
 * @param ppMediaSrc media source
 * @return HRESULT
 */
HRESULT mf_createMediaSource(JNIEnv *env, jstring path, IMFMediaSource **ppMediaSrc) {

    HRESULT res = S_OK;
    const LPWSTR pwszFilePath = (LPWSTR)env->GetStringChars(path, NULL);
    IUnknown *pUnk = NULL;
    IMFSourceResolver *pResolver = NULL;
    MF_OBJECT_TYPE ObjectType = MF_OBJECT_INVALID;


    *ppMediaSrc = NULL;
    res = MFCreateSourceResolver(&pResolver);
    if (res != S_OK || pResolver == NULL) {
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to create source resolver");
        goto bail;
    }
        // File format may not match its extension so we ignore the extension
        res = pResolver->CreateObjectFromURL(
            pwszFilePath, 
            MF_RESOLUTION_MEDIASOURCE | MF_RESOLUTION_READ | MF_RESOLUTION_CONTENT_DOES_NOT_HAVE_TO_MATCH_EXTENSION_OR_MIME_TYPE, 
            NULL, 
            &ObjectType, 
            &pUnk);
    if (res != S_OK || pUnk == NULL) {
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to create object from url");
        goto bail;
    }
    res = pUnk->QueryInterface(
            IID_IMFMediaSource, 
            (void**)(ppMediaSrc));
    if (res != S_OK) {
        throwUnsupportedAudioFileExceptionIfError(env, res, "Failed get media source interface");
        goto bail;
    }

bail:

    SAFE_RELEASE(pResolver)
    SAFE_RELEASE(pUnk)
    env->ReleaseStringChars(path, (jchar *)pwszFilePath);

    return res;
}
コード例 #5
0
/**
 * Decode a frame to a packet, run the result through SwrContext, if desired, encode it via an appropriate
 * encoder, and write the results to the Java-side native buffer.
 *
 * @param aio       FFAudio context
 * @param cached    true or false
 * @return number of bytes placed into java buffer or a negative value, if something went wrong
 */
static int decode_packet(FFAudioIO *aio, int cached) {
    int res = 0;
    uint8_t **resample_buf = NULL;
    jobject byte_buffer = NULL;
    uint8_t *javaBuffer = NULL;
    uint32_t out_buf_size = 0;
    int out_buf_samples = 0;
    int64_t out_channel_count;
    int64_t out_sample_rate;
    int flush = aio->got_frame;
    enum AVSampleFormat out;
    int bytesConsumed = 0;

    init_ids(aio->env, aio->java_instance);

    av_opt_get_int(aio->swr_context, "out_channel_count", 0, &out_channel_count);
    av_opt_get_int(aio->swr_context, "out_sample_rate", 0, &out_sample_rate);
    av_opt_get_sample_fmt(aio->swr_context, "out_sample_fmt", 0, &out);

    resample_buf = av_mallocz(sizeof(uint8_t *) * 1); // one plane!

    // make sure we really have an audio packet
    if (aio->decode_packet.stream_index == aio->stream_index) {
        // decode frame
        // got_frame indicates whether we got a frame
        bytesConsumed = avcodec_decode_audio4(aio->decode_context, aio->decode_frame, &aio->got_frame, &aio->decode_packet);
        if (bytesConsumed < 0) {
            throwUnsupportedAudioFileExceptionIfError(aio->env, bytesConsumed, "Failed to decode audio frame.");
            return bytesConsumed;
        }

        if (aio->got_frame) {

            aio->decoded_samples += aio->decode_frame->nb_samples;
            out_buf_samples = aio->decode_frame->nb_samples;
#ifdef DEBUG
            fprintf(stderr, "samples%s n:%" PRIu64 " nb_samples:%d pts:%s\n",
                   cached ? "(cached)" : "",
                   aio->decoded_samples, aio->decode_frame->nb_samples,
                   av_ts2timestr(aio->decode_frame->pts, &aio->decode_context->time_base));
#endif

            // adjust out sample number for a different sample rate
            // this is an estimate!!
            out_buf_samples = av_rescale_rnd(
                    swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate) + aio->decode_frame->nb_samples,
                    out_sample_rate,
                    aio->stream->codecpar->sample_rate,
                    AV_ROUND_UP
            );

            // allocate new aio->audio_data buffers
            res = av_samples_alloc(aio->audio_data, NULL, av_frame_get_channels(aio->decode_frame),
                                   aio->decode_frame->nb_samples, aio->decode_frame->format, 1);
            if (res < 0) {
                throwIOExceptionIfError(aio->env, res, "Could not allocate audio buffer.");
                return AVERROR(ENOMEM);
            }
            // copy audio data to aio->audio_data
            av_samples_copy(aio->audio_data, aio->decode_frame->data, 0, 0,
                            aio->decode_frame->nb_samples, av_frame_get_channels(aio->decode_frame), aio->decode_frame->format);

            res = resample(aio, resample_buf, out_buf_samples, (const uint8_t **)aio->audio_data, aio->decode_frame->nb_samples);
            if (res < 0) goto bail;
            else out_buf_samples = res;

        } else if (flush && swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate)) {

            res = resample(aio, resample_buf, swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate), NULL, 0);
            if (res < 0) goto bail;
            else out_buf_samples = res;
        } else {
#ifdef DEBUG
            fprintf(stderr, "Got no frame.\n");
#endif
        }

        if (out_buf_samples > 0) {

            res =  av_samples_get_buffer_size(NULL, (int)out_channel_count, out_buf_samples, out, 1);
            if (res < 0) goto bail;
            else out_buf_size = res;

            // ensure native buffer capacity
            if (aio->java_buffer_capacity < out_buf_size) {
                aio->java_buffer_capacity = (*aio->env)->CallIntMethod(aio->env, aio->java_instance, setNativeBufferCapacity_MID, (jint)out_buf_size);
            }
            // get java-managed byte buffer reference
            byte_buffer = (*aio->env)->GetObjectField(aio->env, aio->java_instance, nativeBuffer_FID);
            if (!byte_buffer) {
                res = -1;
                throwIOExceptionIfError(aio->env, 1, "Failed to get native buffer.");
                goto bail;
            }

            // we have some samples, let's copy them to the java buffer, using the desired encoding
            javaBuffer = (uint8_t *)(*aio->env)->GetDirectBufferAddress(aio->env, byte_buffer);
            if (!javaBuffer) {
                throwIOExceptionIfError(aio->env, 1, "Failed to get address for native buffer.");
                goto bail;
            }
            if (aio->encode_context) {
                aio->encode_frame->nb_samples = out_buf_samples;
                res = encode_buffer(aio, resample_buf[0], out_buf_size, javaBuffer);
                if (res < 0) {
                    out_buf_size = 0;
                    goto bail;
                }
                out_buf_size = res;
            } else {
                memcpy(javaBuffer, resample_buf[0], out_buf_size);
            }
            // we already wrote to the buffer, now we still need to
            // set new bytebuffer limit and position to 0.
            (*aio->env)->CallObjectMethod(aio->env, byte_buffer, rewind_MID);
            (*aio->env)->CallObjectMethod(aio->env, byte_buffer, limit_MID, out_buf_size);
        }
    }

    aio->resampled_samples += out_buf_size;

bail:

    if (resample_buf) {
        if (resample_buf[0]) av_freep(&resample_buf[0]);
        av_free(resample_buf);
    }
    if (aio->audio_data[0]) av_freep(&aio->audio_data[0]);

    return res;
}
コード例 #6
0
/**
 * Opens the input file/url, allocates a AVFormatContext for it and opens the audio stream with an
 * appropriate decoder.
 *
 * @param env JNIEnv
 * @param format_context AVFormatContext
 * @param openedStream opened audio AVStream
 * @param stream_index[in] index of the desired <em>audio</em> stream
 * @param stream_index[out] index of the selected stream (index of <em>all</em> streams)
 * @param url URL to open
 * @return negative value, if something went wrong
 */
int ff_open_file(JNIEnv *env, AVFormatContext **format_context, AVStream **openedStream, AVCodecContext **context, int *stream_index, const char *url) {
    int res = 0;
    res = ff_open_format_context(env, format_context, url);
    if (res) {
        // exception has already been thrown
        goto bail;
    }

#ifdef DEBUG
    fprintf(stderr, "Desired audio stream index: %i.\n", *stream_index);
#endif

    if (*stream_index < 0) {
        // use best audio stream
        res = open_codec_context(stream_index, *format_context, *context, AVMEDIA_TYPE_AUDIO);
        if (res) {
            throwUnsupportedAudioFileExceptionIfError(env, res, "Failed to open codec context.");
            goto bail;
        }
        *openedStream = (*format_context)->streams[*stream_index];
    } else {
        // find xth audio stream
        // count possible audio streams
        int i;
        int audio_stream_number = 0;
        AVStream* stream = NULL;

        AVFormatContext* deref_format_context = *format_context;
        for (i=0; i<deref_format_context->nb_streams; i++) {
            stream = deref_format_context->streams[i];
            if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
                if (audio_stream_number == *stream_index) {
                    *stream_index = i;
                #ifdef DEBUG
                    fprintf(stderr, "Found desired audio stream at index: %i.\n", i);
                #endif
                    break;
                }
                audio_stream_number++;
            }
            stream = NULL;
        }
        if (stream == NULL) {
            // we didn't find a stream with the given index
            res = -1;
            throwIndexOutOfBoundsExceptionIfError(env, res, *stream_index);
            goto bail;
        }
        res = ff_open_stream(env, stream, context);
        if (res) {
            goto bail;
        }
        *openedStream = stream;
    }

#ifdef DEBUG
    fprintf(stderr, "Opened stream index: %i.\n", *stream_index);
    fprintf(stderr, "Opened stream: %ld.\n", (long) *openedStream);
#endif

bail:

    return res;
}
コード例 #7
0
static int create_ffaudiofileformats(JNIEnv *env, AVFormatContext *format_context, jobjectArray *array, jstring url) {
    int res = 0;
    jlong duration_in_microseconds = -1;
    jfloat frame_rate = -1;
    jobject vbr = NULL;
    jboolean big_endian = 1;
    jobject audio_format = NULL;
    jint frame_size = -1;
    jint sample_size = -1;
    int audio_stream_count = 0;
    int audio_stream_number = 0;

    // count possible audio streams
    int i;
    for (i=0; i<format_context->nb_streams; i++) {
        AVStream* stream = format_context->streams[i];
        if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
            audio_stream_count++;
        }
    }

#ifdef DEBUG
    fprintf(stderr, "Found %i audio streams.\n", audio_stream_count);
#endif

    // are there any audio streams at all?
    if (audio_stream_count == 0) {
        throwUnsupportedAudioFileExceptionIfError(env, -1, "Failed to find audio stream");
        goto bail;
    }

    // create output array
    *array = (*env)->NewObjectArray(env, audio_stream_count, (*env)->FindClass(env, "javax/sound/sampled/AudioFileFormat"), NULL);
    if (array == NULL) {
        goto bail;
    }

#ifdef DEBUG
    fprintf(stderr, "Created audio file format array.\n");
#endif

    // iterate over audio streams
    for (i=0; i<format_context->nb_streams; i++) {
        AVStream* stream = format_context->streams[i];
        if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
            /*
            res = ff_open_stream(env, stream);
            if (res) {
                goto bail;
            }
            */

            // create object
            duration_in_microseconds = duration(format_context, stream);
            frame_rate = get_frame_rate(stream, duration_in_microseconds);
            big_endian = ff_big_endian(stream->codecpar->codec_id);
            if (is_pcm(stream->codecpar->codec_id)) {
                frame_size = (stream->codecpar->bits_per_coded_sample / 8) * stream->codecpar->channels;
            }
            // TODO: Support VBR.

            sample_size = stream->codecpar->bits_per_raw_sample
                ? stream->codecpar->bits_per_raw_sample
                : stream->codecpar->bits_per_coded_sample;

            #ifdef DEBUG
                fprintf(stderr, "stream->codecpar->bits_per_coded_sample: %i\n", stream->codecpar->bits_per_coded_sample);
                fprintf(stderr, "stream->codecpar->bits_per_raw_sample  : %i\n", stream->codecpar->bits_per_raw_sample);
                fprintf(stderr, "stream->codecpar->bit_rate             : %i\n", stream->codecpar->bit_rate);
                fprintf(stderr, "format_context->packet_size         : %i\n", format_context->packet_size);
                fprintf(stderr, "frames     : %" PRId64 "\n", stream->nb_frames);
                fprintf(stderr, "sample_rate: %i\n", stream->codecpar->sample_rate);
                fprintf(stderr, "sampleSize : %i\n", stream->codecpar->bits_per_coded_sample);
                fprintf(stderr, "channels   : %i\n", stream->codecpar->channels);
                fprintf(stderr, "frame_size : %i\n", (int)frame_size);
                fprintf(stderr, "codec_id   : %i\n", stream->codecpar->codec_id);
                fprintf(stderr, "duration   : %" PRId64 "\n", (int64_t)duration_in_microseconds);
                fprintf(stderr, "frame_rate : %f\n", frame_rate);
                if (big_endian) {
                    fprintf(stderr, "big_endian  : true\n");
                } else {
                    fprintf(stderr, "big_endian  : false\n");
                }
            #endif
            audio_format = create_ffaudiofileformat(env, url,
                                                           stream->codecpar->codec_id,
                                                           (jfloat)stream->codecpar->sample_rate,
                                                           sample_size,
                                                           stream->codecpar->channels,
                                                           frame_size,
                                                           frame_rate,
                                                           big_endian,
                                                           duration_in_microseconds,
                                                           stream->codecpar->bit_rate,
                                                           vbr);

            (*env)->SetObjectArrayElement(env, *array, audio_stream_number, audio_format);
            audio_stream_number++;

            // clean up
            /*
            if (stream && stream->codec) {
                avcodec_close(stream->codec);
            }
            */
        }
    }

bail:
    return res;
}