/** * Opens the given URL to determine its AudioFileFormat. * * @param env JNIEnv * @param instance calling FFAudioFileReader instance * @param url URL (as jstring) * @return AudioFileFormat objects */ JNIEXPORT jobjectArray JNICALL Java_com_tagtraum_ffsampledsp_FFAudioFileReader_getAudioFileFormatsFromURL(JNIEnv *env, jobject instance, jstring url) { #ifdef DEBUG fprintf(stderr, "openFromUrl_1\n"); #endif int res = 0; AVFormatContext *format_context = NULL; jobjectArray array = NULL; //AVStream *stream = NULL; //int stream_index = 0; init_ids(env); const char *input_url = (*env)->GetStringUTFChars(env, url, NULL); res = ff_open_format_context(env, &format_context, input_url); if (res) { goto bail; } res = create_ffaudiofileformats(env, format_context, &array, url); if (res) { goto bail; } bail: if (format_context) { avformat_close_input(&format_context); } (*env)->ReleaseStringUTFChars(env, url, input_url); return array; }
static struct cmdline_options * cmdline_options_init(struct cmdline_options *opt) { MEMZERO(opt, *opt, 1); init_ids(opt); opt->src.enc.index = src_encoding_index; return opt; }
/* * Class: mapnik_Mapnik * Method: nativeInit * Signature: ()V */ JNIEXPORT void JNICALL Java_mapnik_Mapnik_nativeInit (JNIEnv *env, jclass c) { PREAMBLE; if (initialized) return; if (init_ids(env)) initialized=true; TRAILER_VOID; }
jint JNI_OnLoad(JavaVM *vm, void *reserved) { JNIEnv *env; if ((*vm)->GetEnv(vm, (void **) &env, JNI_VERSION_1_6)) { return JNI_ERR; /* JNI version not supported */ } jvm = vm; init_ids(env); init_functions(env); return JNI_VERSION_1_6; }
static struct cmdline_options * cmdline_options_init(struct cmdline_options *opt) { MEMZERO(opt, *opt, 1); init_ids(opt); opt->src.enc.index = src_encoding_index; opt->ext.enc.index = -1; opt->intern.enc.index = -1; #if defined DISABLE_RUBYGEMS && DISABLE_RUBYGEMS opt->disable |= DISABLE_BIT(gems); #endif return opt; }
/** * Re-configures SwrContext and Encoder to match the provided target_format. * * @param env JNIEnv * @param object stream instance this call stems from, i.e. a FFCodecInputStream * @param target_format target AudioFormat * @param aio_pointer Pointer to the FFAudioIO struct of the FFNativePeerInputStream that opened the file/stream * @return pointer to the FFAudioIO struct that was given as parameter */ JNIEXPORT jlong JNICALL Java_com_tagtraum_ffsampledsp_FFCodecInputStream_open(JNIEnv *env, jobject object, jobject target_format, jlong aio_pointer) { int res = 0; enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_NONE; int out_channel_layout = AV_CH_LAYOUT_STEREO; int is_float = 0; int is_signed = 0; AVCodec *encoder = NULL; int dither_method = SWR_DITHER_NONE; int output_sample_bits = 0; init_ids(env); FFAudioIO *aio = (FFAudioIO*)(intptr_t)aio_pointer; jfloat sample_rate = (*env)->CallFloatMethod(env, target_format, getSampleRate_MID); jint sample_size_in_bits = (*env)->CallIntMethod(env, target_format, getSampleSizeInBits_MID); jint channels = (*env)->CallIntMethod(env, target_format, getChannels_MID); jboolean big_endian = (*env)->CallBooleanMethod(env, target_format, isBigEndian_MID); jobject encoding = (*env)->CallObjectMethod(env, target_format, getEncoding_MID); jstring jencoding_name = (jstring)(*env)->CallObjectMethod(env, encoding, toString_MID); const char *encoding_name = (*env)->GetStringUTFChars(env, jencoding_name, NULL); is_float = strcmp("PCM_FLOAT", encoding_name) == 0; is_signed = strcmp("PCM_SIGNED", encoding_name) == 0; (*env)->ReleaseStringUTFChars(env, jencoding_name, encoding_name); #ifdef DEBUG fprintf(stderr, "encoding = %s\n", encoding_name); fprintf(stderr, "signed = %d\n", is_signed); fprintf(stderr, "float = %d\n", is_float); fprintf(stderr, "bits = %d\n", (int)sample_size_in_bits); #endif if (sample_size_in_bits <= 8) { out_sample_fmt = AV_SAMPLE_FMT_U8; } else if (sample_size_in_bits <=16) { out_sample_fmt = AV_SAMPLE_FMT_S16; } else if (sample_size_in_bits <= 32 && is_float) { out_sample_fmt = AV_SAMPLE_FMT_FLT; } else if (sample_size_in_bits <=32) { out_sample_fmt = AV_SAMPLE_FMT_S32; } else if (sample_size_in_bits <= 64 && is_float) { out_sample_fmt = AV_SAMPLE_FMT_DBL; } else { fprintf(stderr, "Will use 64 bit PCM_FLOAT even though it might not have been desired.\n"); out_sample_fmt = AV_SAMPLE_FMT_DBL; } if (aio->stream->codecpar->channels == channels) { out_channel_layout = aio->stream->codecpar->channel_layout; } else if (channels == 1) { out_channel_layout = AV_CH_LAYOUT_MONO; } else if (channels == 2) { out_channel_layout = AV_CH_LAYOUT_STEREO; } else { fprintf(stderr, "Undetermined channel layout, will use stereo.\n"); channels = 2; } if (aio->stream->codecpar->bits_per_coded_sample > sample_size_in_bits) { dither_method = SWR_DITHER_TRIANGULAR; output_sample_bits = sample_size_in_bits; } #ifdef DEBUG fprintf(stderr, "setting out format to: %d\n", out_sample_fmt); #endif // remove default setup if (aio->swr_context) { swr_free(&aio->swr_context); } // allocate new aio->swr_context = swr_alloc(); if (!aio->swr_context) { res = AVERROR(ENOMEM); throwIOExceptionIfError(env, res, "Could not allocate swr context."); goto bail; } // standard stuff from input av_opt_set_sample_fmt(aio->swr_context, "in_sample_fmt", aio->stream->codecpar->format, 0); av_opt_set_int(aio->swr_context, "in_channel_count", aio->stream->codecpar->channels, 0); av_opt_set_int(aio->swr_context, "in_channel_layout", aio->stream->codecpar->channel_layout, 0); av_opt_set_int(aio->swr_context, "in_sample_rate", aio->stream->codecpar->sample_rate, 0); // custom stuff av_opt_set_int(aio->swr_context, "out_channel_layout", out_channel_layout, 0); av_opt_set_int(aio->swr_context, "out_channel_count", channels, 0); av_opt_set_int(aio->swr_context, "out_sample_rate", (int)round(sample_rate), 0); av_opt_set_sample_fmt(aio->swr_context, "out_sample_fmt", out_sample_fmt, 0); av_opt_set_int(aio->swr_context, "dither_method", dither_method, 0); av_opt_set_int(aio->swr_context, "output_sample_bits", output_sample_bits, 0); res = swr_init(aio->swr_context); if (res < 0) { res = AVERROR(ENOMEM); throwIOExceptionIfError(env, res, "Could not re-initialize swr context."); goto bail; } #ifdef DEBUG fprintf(stderr, "open codec: dither method : %d\n", dither_method); fprintf(stderr, "open codec: output sample bits: %d\n", aio->swr_context->dither.output_sample_bits); #endif // re-adjust encoder encoder = ff_find_encoder(out_sample_fmt, sample_size_in_bits, big_endian, is_signed); if (!encoder) { res = AVERROR(EINVAL); throwIOExceptionIfError(env, res, "Could not find suitable encoder."); goto bail; } res = ff_init_encoder(env, aio, encoder); if (res < 0) { goto bail; } bail: return (jlong)(intptr_t)aio; }
/** * Decode a frame to a packet, run the result through SwrContext, if desired, encode it via an appropriate * encoder, and write the results to the Java-side native buffer. * * @param aio FFAudio context * @param cached true or false * @return number of bytes placed into java buffer or a negative value, if something went wrong */ static int decode_packet(FFAudioIO *aio, int cached) { int res = 0; uint8_t **resample_buf = NULL; jobject byte_buffer = NULL; uint8_t *javaBuffer = NULL; uint32_t out_buf_size = 0; int out_buf_samples = 0; int64_t out_channel_count; int64_t out_sample_rate; int flush = aio->got_frame; enum AVSampleFormat out; int bytesConsumed = 0; init_ids(aio->env, aio->java_instance); av_opt_get_int(aio->swr_context, "out_channel_count", 0, &out_channel_count); av_opt_get_int(aio->swr_context, "out_sample_rate", 0, &out_sample_rate); av_opt_get_sample_fmt(aio->swr_context, "out_sample_fmt", 0, &out); resample_buf = av_mallocz(sizeof(uint8_t *) * 1); // one plane! // make sure we really have an audio packet if (aio->decode_packet.stream_index == aio->stream_index) { // decode frame // got_frame indicates whether we got a frame bytesConsumed = avcodec_decode_audio4(aio->decode_context, aio->decode_frame, &aio->got_frame, &aio->decode_packet); if (bytesConsumed < 0) { throwUnsupportedAudioFileExceptionIfError(aio->env, bytesConsumed, "Failed to decode audio frame."); return bytesConsumed; } if (aio->got_frame) { aio->decoded_samples += aio->decode_frame->nb_samples; out_buf_samples = aio->decode_frame->nb_samples; #ifdef DEBUG fprintf(stderr, "samples%s n:%" PRIu64 " nb_samples:%d pts:%s\n", cached ? "(cached)" : "", aio->decoded_samples, aio->decode_frame->nb_samples, av_ts2timestr(aio->decode_frame->pts, &aio->decode_context->time_base)); #endif // adjust out sample number for a different sample rate // this is an estimate!! out_buf_samples = av_rescale_rnd( swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate) + aio->decode_frame->nb_samples, out_sample_rate, aio->stream->codecpar->sample_rate, AV_ROUND_UP ); // allocate new aio->audio_data buffers res = av_samples_alloc(aio->audio_data, NULL, av_frame_get_channels(aio->decode_frame), aio->decode_frame->nb_samples, aio->decode_frame->format, 1); if (res < 0) { throwIOExceptionIfError(aio->env, res, "Could not allocate audio buffer."); return AVERROR(ENOMEM); } // copy audio data to aio->audio_data av_samples_copy(aio->audio_data, aio->decode_frame->data, 0, 0, aio->decode_frame->nb_samples, av_frame_get_channels(aio->decode_frame), aio->decode_frame->format); res = resample(aio, resample_buf, out_buf_samples, (const uint8_t **)aio->audio_data, aio->decode_frame->nb_samples); if (res < 0) goto bail; else out_buf_samples = res; } else if (flush && swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate)) { res = resample(aio, resample_buf, swr_get_delay(aio->swr_context, aio->stream->codecpar->sample_rate), NULL, 0); if (res < 0) goto bail; else out_buf_samples = res; } else { #ifdef DEBUG fprintf(stderr, "Got no frame.\n"); #endif } if (out_buf_samples > 0) { res = av_samples_get_buffer_size(NULL, (int)out_channel_count, out_buf_samples, out, 1); if (res < 0) goto bail; else out_buf_size = res; // ensure native buffer capacity if (aio->java_buffer_capacity < out_buf_size) { aio->java_buffer_capacity = (*aio->env)->CallIntMethod(aio->env, aio->java_instance, setNativeBufferCapacity_MID, (jint)out_buf_size); } // get java-managed byte buffer reference byte_buffer = (*aio->env)->GetObjectField(aio->env, aio->java_instance, nativeBuffer_FID); if (!byte_buffer) { res = -1; throwIOExceptionIfError(aio->env, 1, "Failed to get native buffer."); goto bail; } // we have some samples, let's copy them to the java buffer, using the desired encoding javaBuffer = (uint8_t *)(*aio->env)->GetDirectBufferAddress(aio->env, byte_buffer); if (!javaBuffer) { throwIOExceptionIfError(aio->env, 1, "Failed to get address for native buffer."); goto bail; } if (aio->encode_context) { aio->encode_frame->nb_samples = out_buf_samples; res = encode_buffer(aio, resample_buf[0], out_buf_size, javaBuffer); if (res < 0) { out_buf_size = 0; goto bail; } out_buf_size = res; } else { memcpy(javaBuffer, resample_buf[0], out_buf_size); } // we already wrote to the buffer, now we still need to // set new bytebuffer limit and position to 0. (*aio->env)->CallObjectMethod(aio->env, byte_buffer, rewind_MID); (*aio->env)->CallObjectMethod(aio->env, byte_buffer, limit_MID, out_buf_size); } } aio->resampled_samples += out_buf_size; bail: if (resample_buf) { if (resample_buf[0]) av_freep(&resample_buf[0]); av_free(resample_buf); } if (aio->audio_data[0]) av_freep(&aio->audio_data[0]); return res; }
/** * Opens the byte buffer to determine its AudioFileFormat. * * @param env JNIEnv * @param instance calling FFAudioFileReader instance * @param byte_buffer audio data * @return AudioFileFormat objects */ JNIEXPORT jobjectArray JNICALL Java_com_tagtraum_ffsampledsp_FFAudioFileReader_getAudioFileFormatsFromBuffer(JNIEnv *env, jobject instance, jobject byte_buffer) { int res = 0; AVFormatContext *format_context = NULL; //AVStream *stream = NULL; jobjectArray array = NULL; unsigned char* callbackBuffer = NULL; FFCallback *callback = NULL; AVIOContext *io_context; init_ids(env); callback = calloc(1, sizeof(FFCallback)); if (!callback) { res = AVERROR(ENOMEM); throwIOExceptionIfError(env, res, "Could not allocate callback."); goto bail; } callback->env = env; callback->byte_buffer = byte_buffer; callback->call_count = 0; format_context = avformat_alloc_context(); if (!format_context) { res = AVERROR(ENOMEM); throwIOExceptionIfError(env, res, "Could not allocate format context."); goto bail; } // limit probe to less than what we read in one chunk... format_context->probesize = 8*1024; // this corresponds to the Java code! format_context->max_analyze_duration = 5*AV_TIME_BASE; callbackBuffer = (unsigned char*)av_malloc(CALLBACK_BUFFERSIZE * sizeof(uint8_t)); if (!callbackBuffer) { res = AVERROR(ENOMEM); throwIOExceptionIfError(env, res, "Could not allocate callback buffer."); goto bail; } io_context = avio_alloc_context( callbackBuffer, // IOBuffer CALLBACK_BUFFERSIZE, // Buffer Size (32kb corresponds to Java code) 0, // Write flag, only reading, so 0 callback, // FFCallback pointer (opaque) read_callback, // Read callback NULL, // Write callback NULL // Seek callback ); if (!io_context) { res = AVERROR(ENOMEM); throwIOExceptionIfError(env, res, "Could not allocate custom IO context."); goto bail; } // we didn't supply a seek function in avio_alloc_context, so we need to make sure we don't seek.. io_context->seekable = 0; format_context->pb = io_context; res = ff_open_format_context(env, &format_context, "MemoryAVIOContext"); if (res) { goto bail; } res = create_ffaudiofileformats(env, format_context, &array, NULL); if (res) { goto bail; } bail: /* if (stream && stream->codec) { avcodec_close(stream->codec); } */ if (format_context) { AVFormatContext *s = format_context; if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO)) { if (s->pb) { avio_flush(s->pb); av_free(s->pb->buffer); av_free(s->pb); } } avformat_close_input(&format_context); } if (callback) { free(callback); } return array; }
/** * Creates the FFAudioIO, custom AVIOContext etc for reading data from the stream. * * @param env JNIEnv * @param stream calling FFStreamInputStream instance * @return pointer to the created FFAudioIO */ JNIEXPORT jlong JNICALL Java_com_tagtraum_ffsampledsp_FFStreamInputStream_open(JNIEnv *env, jobject stream, jint streamIndex) { int res = 0; FFAudioIO *aio; AVIOContext *io_context; unsigned char* callback_buffer = NULL; init_ids(env, stream); aio = calloc(1, sizeof(FFAudioIO)); if (!aio) { res = AVERROR(ENOMEM); throwIOExceptionIfError(env, res, "Could not allocate audio IO."); goto bail; } aio->env = env; aio->java_instance = stream; aio->format_context = avformat_alloc_context(); if (!aio->format_context) { res = AVERROR(ENOMEM); throwIOExceptionIfError(env, res, "Could not allocate format context."); goto bail; } // limit probe to less than what we read in one chunk... aio->format_context->probesize = 8*1024; aio->format_context->max_analyze_duration = 5*AV_TIME_BASE; callback_buffer = (unsigned char*)av_malloc(CALLBACK_BUFFERSIZE * sizeof(uint8_t)); if (!callback_buffer) { res = AVERROR(ENOMEM); throwIOExceptionIfError(env, res, "Could not allocate callback buffer."); goto bail; } io_context = avio_alloc_context( callback_buffer, // IOBuffer CALLBACK_BUFFERSIZE, // Buffer Size (32kb corresponds to Java code) 0, // Write flag, only reading, so 0 aio, // FFAudioIO pointer (opaque) read_callback, // Read callback NULL, // Write callback NULL // Seek callback ); if (!io_context) { res = AVERROR(ENOMEM); throwIOExceptionIfError(env, res, "Could not allocate custom IO context."); goto bail; } // we didn't supply a seek function in avio_alloc_context, // so we need to make sure we don't seek... io_context->seekable = 0; aio->format_context->pb = io_context; aio->stream_index = (int)streamIndex; res = ff_open_file(env, &aio->format_context, &aio->stream, &aio->decode_context, &aio->stream_index, "MemoryAVIOContext"); if (res) { // exception is already thrown goto bail; } res = ff_init_audioio(env, aio); if (res) { // exception is already thrown goto bail; } #ifdef DEBUG fprintf(stderr, "stream->codecpar->bits_per_coded_sample: %i\n", aio->stream->codecpar->bits_per_coded_sample); fprintf(stderr, "stream->codecpar->bits_per_raw_sample : %i\n", aio->stream->codecpar->bits_per_raw_sample); fprintf(stderr, "stream->codecpar->bit_rate : %i\n", aio->stream->codecpar->bit_rate); fprintf(stderr, "frames : %" PRId64 "\n", aio->stream->nb_frames); fprintf(stderr, "sample_rate: %i\n", aio->stream->codecpar->sample_rate); fprintf(stderr, "channels : %i\n", aio->stream->codecpar->channels); fprintf(stderr, "frame_size : %i\n", aio->stream->codecpar->frame_size); fprintf(stderr, "codec_id : %i\n", aio->stream->codecpar->codec_id); #endif bail: if (res) ff_audioio_free(aio); return (jlong)(intptr_t)aio; }