static av_cold int libshine_encode_init(AVCodecContext *avctx) { SHINEContext *s = avctx->priv_data; if (avctx->channels <= 0 || avctx->channels > 2){ av_log(avctx, AV_LOG_ERROR, "only mono or stereo is supported\n"); return AVERROR(EINVAL); } shine_set_config_mpeg_defaults(&s->config.mpeg); if (avctx->bit_rate) s->config.mpeg.bitr = avctx->bit_rate / 1000; s->config.mpeg.mode = avctx->channels == 2 ? STEREO : MONO; s->config.wave.samplerate = avctx->sample_rate; s->config.wave.channels = avctx->channels == 2 ? PCM_STEREO : PCM_MONO; if (shine_check_config(s->config.wave.samplerate, s->config.mpeg.bitr) < 0) { av_log(avctx, AV_LOG_ERROR, "invalid configuration\n"); return AVERROR(EINVAL); } s->shine = shine_initialise(&s->config); if (!s->shine) return AVERROR(ENOMEM); avctx->frame_size = shine_samples_per_pass(s->shine); ff_af_queue_init(avctx, &s->afq); return 0; }
static av_cold int aac_encode_init(AVCodecContext *avctx) { AACEncContext *s = avctx->priv_data; int i, ret = 0; const uint8_t *sizes[2]; uint8_t grouping[AAC_MAX_CHANNELS]; int lengths[2]; s->channels = avctx->channels; s->chan_map = aac_chan_configs[s->channels-1]; s->lambda = avctx->global_quality > 0 ? avctx->global_quality : 120; s->last_frame_pb_count = 0; avctx->extradata_size = 5; avctx->frame_size = 1024; avctx->initial_padding = 1024; avctx->bit_rate = (int)FFMIN( 6144 * s->channels / 1024.0 * avctx->sample_rate, avctx->bit_rate); avctx->profile = avctx->profile == FF_PROFILE_UNKNOWN ? FF_PROFILE_AAC_LOW : avctx->profile; for (i = 0; i < 16; i++) if (avctx->sample_rate == avpriv_mpeg4audio_sample_rates[i]) break; s->samplerate_index = i; ERROR_IF(s->samplerate_index == 16 || s->samplerate_index >= ff_aac_swb_size_1024_len || s->samplerate_index >= ff_aac_swb_size_128_len, "Unsupported sample rate %d\n", avctx->sample_rate); ERROR_IF(s->channels > AAC_MAX_CHANNELS || s->channels == 7, "Unsupported number of channels: %d\n", s->channels); WARN_IF(1024.0 * avctx->bit_rate / avctx->sample_rate > 6144 * s->channels, "Too many bits %f > %d per frame requested, clamping to max\n", 1024.0 * avctx->bit_rate / avctx->sample_rate, 6144 * s->channels); for (i = 0; i < FF_ARRAY_ELEMS(aacenc_profiles); i++) if (avctx->profile == aacenc_profiles[i]) break; ERROR_IF(i == FF_ARRAY_ELEMS(aacenc_profiles), "Unsupported encoding profile: %d\n", avctx->profile); if (avctx->profile == FF_PROFILE_MPEG2_AAC_LOW) { avctx->profile = FF_PROFILE_AAC_LOW; ERROR_IF(s->options.pred, "Main prediction unavailable in the \"mpeg2_aac_low\" profile\n"); ERROR_IF(s->options.ltp, "LTP prediction unavailable in the \"mpeg2_aac_low\" profile\n"); WARN_IF(s->options.pns, "PNS unavailable in the \"mpeg2_aac_low\" profile, turning off\n"); s->options.pns = 0; } else if (avctx->profile == FF_PROFILE_AAC_LTP) { s->options.ltp = 1; ERROR_IF(s->options.pred, "Main prediction unavailable in the \"aac_ltp\" profile\n"); } else if (avctx->profile == FF_PROFILE_AAC_MAIN) { s->options.pred = 1; ERROR_IF(s->options.ltp, "LTP prediction unavailable in the \"aac_main\" profile\n"); } else if (s->options.ltp) { avctx->profile = FF_PROFILE_AAC_LTP; WARN_IF(1, "Chainging profile to \"aac_ltp\"\n"); ERROR_IF(s->options.pred, "Main prediction unavailable in the \"aac_ltp\" profile\n"); } else if (s->options.pred) { avctx->profile = FF_PROFILE_AAC_MAIN; WARN_IF(1, "Chainging profile to \"aac_main\"\n"); ERROR_IF(s->options.ltp, "LTP prediction unavailable in the \"aac_main\" profile\n"); } s->profile = avctx->profile; s->coder = &ff_aac_coders[s->options.coder]; if (s->options.coder != AAC_CODER_TWOLOOP) { ERROR_IF(avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL, "Coders other than twoloop require -strict -2 and some may be removed in the future\n"); WARN_IF(s->options.coder == AAC_CODER_FAAC, "The FAAC-like coder will be removed in the near future, please use twoloop!\n"); s->options.intensity_stereo = 0; s->options.pns = 0; } if ((ret = dsp_init(avctx, s)) < 0) goto fail; if ((ret = alloc_buffers(avctx, s)) < 0) goto fail; put_audio_specific_config(avctx); sizes[0] = ff_aac_swb_size_1024[s->samplerate_index]; sizes[1] = ff_aac_swb_size_128[s->samplerate_index]; lengths[0] = ff_aac_num_swb_1024[s->samplerate_index]; lengths[1] = ff_aac_num_swb_128[s->samplerate_index]; for (i = 0; i < s->chan_map[0]; i++) grouping[i] = s->chan_map[i + 1] == TYPE_CPE; if ((ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], grouping)) < 0) goto fail; s->psypp = ff_psy_preprocess_init(avctx); ff_lpc_init(&s->lpc, 2*avctx->frame_size, TNS_MAX_ORDER, FF_LPC_TYPE_LEVINSON); av_lfg_init(&s->lfg, 0x72adca55); if (HAVE_MIPSDSP) ff_aac_coder_init_mips(s); if ((ret = ff_thread_once(&aac_table_init, &aac_encode_init_tables)) != 0) return AVERROR_UNKNOWN; ff_af_queue_init(avctx, &s->afq); return 0; fail: aac_encode_end(avctx); return ret; }
static av_cold int aac_encode_init(AVCodecContext *avctx) { AACEncContext *s = avctx->priv_data; int i, ret = 0; const uint8_t *sizes[2]; uint8_t grouping[AAC_MAX_CHANNELS]; int lengths[2]; avctx->frame_size = 1024; for (i = 0; i < 16; i++) if (avctx->sample_rate == avpriv_mpeg4audio_sample_rates[i]) break; s->channels = avctx->channels; ERROR_IF(i == 16, "Unsupported sample rate %d\n", avctx->sample_rate); ERROR_IF(s->channels > AAC_MAX_CHANNELS, "Unsupported number of channels: %d\n", s->channels); ERROR_IF(avctx->profile != FF_PROFILE_UNKNOWN && avctx->profile != FF_PROFILE_AAC_LOW, "Unsupported profile %d\n", avctx->profile); ERROR_IF(1024.0 * avctx->bit_rate / avctx->sample_rate > 6144 * s->channels, "Too many bits per frame requested\n"); s->samplerate_index = i; s->chan_map = aac_chan_configs[s->channels-1]; if (ret = dsp_init(avctx, s)) goto fail; if (ret = alloc_buffers(avctx, s)) goto fail; avctx->extradata_size = 5; put_audio_specific_config(avctx); sizes[0] = swb_size_1024[i]; sizes[1] = swb_size_128[i]; lengths[0] = ff_aac_num_swb_1024[i]; lengths[1] = ff_aac_num_swb_128[i]; for (i = 0; i < s->chan_map[0]; i++) grouping[i] = s->chan_map[i + 1] == TYPE_CPE; if (ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], grouping)) goto fail; s->psypp = ff_psy_preprocess_init(avctx); s->coder = &ff_aac_coders[s->options.aac_coder]; if (HAVE_MIPSDSPR1) ff_aac_coder_init_mips(s); s->lambda = avctx->global_quality ? avctx->global_quality : 120; ff_aac_tableinit(); for (i = 0; i < 428; i++) ff_aac_pow34sf_tab[i] = sqrt(ff_aac_pow2sf_tab[i] * sqrt(ff_aac_pow2sf_tab[i])); avctx->delay = 1024; ff_af_queue_init(avctx, &s->afq); return 0; fail: aac_encode_end(avctx); return ret; }
static av_cold int Faac_encode_init(AVCodecContext *avctx) { FaacAudioContext *s = avctx->priv_data; faacEncConfigurationPtr faac_cfg; unsigned long samples_input, max_bytes_output; int ret; /* number of channels */ if (avctx->channels < 1 || avctx->channels > 6) { av_log(avctx, AV_LOG_ERROR, "encoding %d channel(s) is not allowed\n", avctx->channels); ret = AVERROR(EINVAL); goto error; } s->faac_handle = faacEncOpen(avctx->sample_rate, avctx->channels, &samples_input, &max_bytes_output); if (!s->faac_handle) { av_log(avctx, AV_LOG_ERROR, "error in faacEncOpen()\n"); ret = AVERROR_UNKNOWN; goto error; } /* check faac version */ faac_cfg = faacEncGetCurrentConfiguration(s->faac_handle); if (faac_cfg->version != FAAC_CFG_VERSION) { av_log(avctx, AV_LOG_ERROR, "wrong libfaac version (compiled for: %d, using %d)\n", FAAC_CFG_VERSION, faac_cfg->version); ret = AVERROR(EINVAL); goto error; } /* put the options in the configuration struct */ switch(avctx->profile) { case FF_PROFILE_AAC_MAIN: faac_cfg->aacObjectType = MAIN; break; case FF_PROFILE_UNKNOWN: case FF_PROFILE_AAC_LOW: faac_cfg->aacObjectType = LOW; break; case FF_PROFILE_AAC_SSR: faac_cfg->aacObjectType = SSR; break; case FF_PROFILE_AAC_LTP: faac_cfg->aacObjectType = LTP; break; default: av_log(avctx, AV_LOG_ERROR, "invalid AAC profile\n"); ret = AVERROR(EINVAL); goto error; } faac_cfg->mpegVersion = MPEG4; faac_cfg->useTns = 0; faac_cfg->allowMidside = 1; faac_cfg->bitRate = avctx->bit_rate / avctx->channels; faac_cfg->bandWidth = avctx->cutoff; if(avctx->flags & CODEC_FLAG_QSCALE) { faac_cfg->bitRate = 0; faac_cfg->quantqual = avctx->global_quality / FF_QP2LAMBDA; } faac_cfg->outputFormat = 1; faac_cfg->inputFormat = FAAC_INPUT_16BIT; if (avctx->channels > 2) memcpy(faac_cfg->channel_map, channel_maps[avctx->channels-3], avctx->channels * sizeof(int)); avctx->frame_size = samples_input / avctx->channels; #if FF_API_OLD_ENCODE_AUDIO avctx->coded_frame= avcodec_alloc_frame(); if (!avctx->coded_frame) { ret = AVERROR(ENOMEM); goto error; } #endif /* Set decoder specific info */ avctx->extradata_size = 0; if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { unsigned char *buffer = NULL; unsigned long decoder_specific_info_size; if (!faacEncGetDecoderSpecificInfo(s->faac_handle, &buffer, &decoder_specific_info_size)) { avctx->extradata = av_malloc(decoder_specific_info_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!avctx->extradata) { ret = AVERROR(ENOMEM); goto error; } avctx->extradata_size = decoder_specific_info_size; memcpy(avctx->extradata, buffer, avctx->extradata_size); faac_cfg->outputFormat = 0; } free(buffer); } if (!faacEncSetConfiguration(s->faac_handle, faac_cfg)) { av_log(avctx, AV_LOG_ERROR, "libfaac doesn't support this output format!\n"); ret = AVERROR(EINVAL); goto error; } avctx->delay = FAAC_DELAY_SAMPLES; ff_af_queue_init(avctx, &s->afq); return 0; error: Faac_encode_close(avctx); return ret; }
static av_cold int aac_encode_init(AVCodecContext *avctx) { AACEncContext *s = avctx->priv_data; int i, ret = 0; const uint8_t *sizes[2]; uint8_t grouping[AAC_MAX_CHANNELS]; int lengths[2]; /* Constants */ s->last_frame_pb_count = 0; avctx->extradata_size = 5; avctx->frame_size = 1024; avctx->initial_padding = 1024; s->lambda = avctx->global_quality > 0 ? avctx->global_quality : 120; /* Channel map and unspecified bitrate guessing */ s->channels = avctx->channels; ERROR_IF(s->channels > AAC_MAX_CHANNELS || s->channels == 7, "Unsupported number of channels: %d\n", s->channels); s->chan_map = aac_chan_configs[s->channels-1]; if (!avctx->bit_rate) { for (i = 1; i <= s->chan_map[0]; i++) { avctx->bit_rate += s->chan_map[i] == TYPE_CPE ? 128000 : /* Pair */ s->chan_map[i] == TYPE_LFE ? 16000 : /* LFE */ 69000 ; /* SCE */ } } /* Samplerate */ for (i = 0; i < 16; i++) if (avctx->sample_rate == avpriv_mpeg4audio_sample_rates[i]) break; s->samplerate_index = i; ERROR_IF(s->samplerate_index == 16 || s->samplerate_index >= ff_aac_swb_size_1024_len || s->samplerate_index >= ff_aac_swb_size_128_len, "Unsupported sample rate %d\n", avctx->sample_rate); /* Bitrate limiting */ WARN_IF(1024.0 * avctx->bit_rate / avctx->sample_rate > 6144 * s->channels, "Too many bits %f > %d per frame requested, clamping to max\n", 1024.0 * avctx->bit_rate / avctx->sample_rate, 6144 * s->channels); avctx->bit_rate = (int64_t)FFMIN(6144 * s->channels / 1024.0 * avctx->sample_rate, avctx->bit_rate); /* Profile and option setting */ avctx->profile = avctx->profile == FF_PROFILE_UNKNOWN ? FF_PROFILE_AAC_LOW : avctx->profile; for (i = 0; i < FF_ARRAY_ELEMS(aacenc_profiles); i++) if (avctx->profile == aacenc_profiles[i]) break; if (avctx->profile == FF_PROFILE_MPEG2_AAC_LOW) { avctx->profile = FF_PROFILE_AAC_LOW; ERROR_IF(s->options.pred, "Main prediction unavailable in the \"mpeg2_aac_low\" profile\n"); ERROR_IF(s->options.ltp, "LTP prediction unavailable in the \"mpeg2_aac_low\" profile\n"); WARN_IF(s->options.pns, "PNS unavailable in the \"mpeg2_aac_low\" profile, turning off\n"); s->options.pns = 0; } else if (avctx->profile == FF_PROFILE_AAC_LTP) { s->options.ltp = 1; ERROR_IF(s->options.pred, "Main prediction unavailable in the \"aac_ltp\" profile\n"); } else if (avctx->profile == FF_PROFILE_AAC_MAIN) { s->options.pred = 1; ERROR_IF(s->options.ltp, "LTP prediction unavailable in the \"aac_main\" profile\n"); } else if (s->options.ltp) { avctx->profile = FF_PROFILE_AAC_LTP; WARN_IF(1, "Chainging profile to \"aac_ltp\"\n"); ERROR_IF(s->options.pred, "Main prediction unavailable in the \"aac_ltp\" profile\n"); } else if (s->options.pred) { avctx->profile = FF_PROFILE_AAC_MAIN; WARN_IF(1, "Chainging profile to \"aac_main\"\n"); ERROR_IF(s->options.ltp, "LTP prediction unavailable in the \"aac_main\" profile\n"); } s->profile = avctx->profile; /* Coder limitations */ s->coder = &ff_aac_coders[s->options.coder]; if (s->options.coder == AAC_CODER_ANMR) { ERROR_IF(avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL, "The ANMR coder is considered experimental, add -strict -2 to enable!\n"); s->options.intensity_stereo = 0; s->options.pns = 0; } ERROR_IF(s->options.ltp && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL, "The LPT profile requires experimental compliance, add -strict -2 to enable!\n"); /* M/S introduces horrible artifacts with multichannel files, this is temporary */ if (s->channels > 3) s->options.mid_side = 0; if ((ret = dsp_init(avctx, s)) < 0) goto fail; if ((ret = alloc_buffers(avctx, s)) < 0) goto fail; put_audio_specific_config(avctx); sizes[0] = ff_aac_swb_size_1024[s->samplerate_index]; sizes[1] = ff_aac_swb_size_128[s->samplerate_index]; lengths[0] = ff_aac_num_swb_1024[s->samplerate_index]; lengths[1] = ff_aac_num_swb_128[s->samplerate_index]; for (i = 0; i < s->chan_map[0]; i++) grouping[i] = s->chan_map[i + 1] == TYPE_CPE; if ((ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], grouping)) < 0) goto fail; s->psypp = ff_psy_preprocess_init(avctx); ff_lpc_init(&s->lpc, 2*avctx->frame_size, TNS_MAX_ORDER, FF_LPC_TYPE_LEVINSON); s->random_state = 0x1f2e3d4c; s->abs_pow34 = abs_pow34_v; s->quant_bands = quantize_bands; if (ARCH_X86) ff_aac_dsp_init_x86(s); if (HAVE_MIPSDSP) ff_aac_coder_init_mips(s); if ((ret = ff_thread_once(&aac_table_init, &aac_encode_init_tables)) != 0) return AVERROR_UNKNOWN; ff_af_queue_init(avctx, &s->afq); return 0; fail: aac_encode_end(avctx); return ret; }
static av_cold int ffat_init_encoder(AVCodecContext *avctx) { ATDecodeContext *at = avctx->priv_data; OSStatus status; AudioStreamBasicDescription in_format = { .mSampleRate = avctx->sample_rate, .mFormatID = kAudioFormatLinearPCM, .mFormatFlags = ((avctx->sample_fmt == AV_SAMPLE_FMT_FLT || avctx->sample_fmt == AV_SAMPLE_FMT_DBL) ? kAudioFormatFlagIsFloat : avctx->sample_fmt == AV_SAMPLE_FMT_U8 ? 0 : kAudioFormatFlagIsSignedInteger) | kAudioFormatFlagIsPacked, .mBytesPerPacket = av_get_bytes_per_sample(avctx->sample_fmt) * avctx->channels, .mFramesPerPacket = 1, .mBytesPerFrame = av_get_bytes_per_sample(avctx->sample_fmt) * avctx->channels, .mChannelsPerFrame = avctx->channels, .mBitsPerChannel = av_get_bytes_per_sample(avctx->sample_fmt) * 8, }; AudioStreamBasicDescription out_format = { .mSampleRate = avctx->sample_rate, .mFormatID = ffat_get_format_id(avctx->codec_id, avctx->profile), .mChannelsPerFrame = in_format.mChannelsPerFrame, }; UInt32 layout_size = sizeof(AudioChannelLayout) + sizeof(AudioChannelDescription) * avctx->channels; AudioChannelLayout *channel_layout = av_malloc(layout_size); if (!channel_layout) return AVERROR(ENOMEM); if (avctx->codec_id == AV_CODEC_ID_ILBC) { int mode = get_ilbc_mode(avctx); out_format.mFramesPerPacket = 8000 * mode / 1000; out_format.mBytesPerPacket = (mode == 20 ? 38 : 50); } status = AudioConverterNew(&in_format, &out_format, &at->converter); if (status != 0) { av_log(avctx, AV_LOG_ERROR, "AudioToolbox init error: %i\n", (int)status); av_free(channel_layout); return AVERROR_UNKNOWN; } if (!avctx->channel_layout) avctx->channel_layout = av_get_default_channel_layout(avctx->channels); if ((status = remap_layout(channel_layout, avctx->channel_layout, avctx->channels)) < 0) { av_log(avctx, AV_LOG_ERROR, "Invalid channel layout\n"); av_free(channel_layout); return status; } if (AudioConverterSetProperty(at->converter, kAudioConverterInputChannelLayout, layout_size, channel_layout)) { av_log(avctx, AV_LOG_ERROR, "Unsupported input channel layout\n"); av_free(channel_layout); return AVERROR(EINVAL); } if (avctx->codec_id == AV_CODEC_ID_AAC) { int tag = get_aac_tag(avctx->channel_layout); if (tag) { channel_layout->mChannelLayoutTag = tag; channel_layout->mNumberChannelDescriptions = 0; } } if (AudioConverterSetProperty(at->converter, kAudioConverterOutputChannelLayout, layout_size, channel_layout)) { av_log(avctx, AV_LOG_ERROR, "Unsupported output channel layout\n"); av_free(channel_layout); return AVERROR(EINVAL); } av_free(channel_layout); if (avctx->bits_per_raw_sample) AudioConverterSetProperty(at->converter, kAudioConverterPropertyBitDepthHint, sizeof(avctx->bits_per_raw_sample), &avctx->bits_per_raw_sample); #if !TARGET_OS_IPHONE if (at->mode == -1) at->mode = (avctx->flags & AV_CODEC_FLAG_QSCALE) ? kAudioCodecBitRateControlMode_Variable : kAudioCodecBitRateControlMode_Constant; AudioConverterSetProperty(at->converter, kAudioCodecPropertyBitRateControlMode, sizeof(at->mode), &at->mode); if (at->mode == kAudioCodecBitRateControlMode_Variable) { int q = avctx->global_quality / FF_QP2LAMBDA; if (q < 0 || q > 14) { av_log(avctx, AV_LOG_WARNING, "VBR quality %d out of range, should be 0-14\n", q); q = av_clip(q, 0, 14); } q = 127 - q * 9; AudioConverterSetProperty(at->converter, kAudioCodecPropertySoundQualityForVBR, sizeof(q), &q); } else #endif if (avctx->bit_rate > 0) { UInt32 rate = avctx->bit_rate; UInt32 size; status = AudioConverterGetPropertyInfo(at->converter, kAudioConverterApplicableEncodeBitRates, &size, NULL); if (!status && size) { UInt32 new_rate = rate; int count; int i; AudioValueRange *ranges = av_malloc(size); if (!ranges) return AVERROR(ENOMEM); AudioConverterGetProperty(at->converter, kAudioConverterApplicableEncodeBitRates, &size, ranges); count = size / sizeof(AudioValueRange); for (i = 0; i < count; i++) { AudioValueRange *range = &ranges[i]; if (rate >= range->mMinimum && rate <= range->mMaximum) { new_rate = rate; break; } else if (rate > range->mMaximum) { new_rate = range->mMaximum; } else { new_rate = range->mMinimum; break; } } if (new_rate != rate) { av_log(avctx, AV_LOG_WARNING, "Bitrate %u not allowed; changing to %u\n", rate, new_rate); rate = new_rate; } av_free(ranges); } AudioConverterSetProperty(at->converter, kAudioConverterEncodeBitRate, sizeof(rate), &rate); } at->quality = 96 - at->quality * 32; AudioConverterSetProperty(at->converter, kAudioConverterCodecQuality, sizeof(at->quality), &at->quality); if (!AudioConverterGetPropertyInfo(at->converter, kAudioConverterCompressionMagicCookie, &avctx->extradata_size, NULL) && avctx->extradata_size) { int extradata_size = avctx->extradata_size; uint8_t *extradata; if (!(avctx->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE))) return AVERROR(ENOMEM); if (avctx->codec_id == AV_CODEC_ID_ALAC) { avctx->extradata_size = 0x24; AV_WB32(avctx->extradata, 0x24); AV_WB32(avctx->extradata + 4, MKBETAG('a','l','a','c')); extradata = avctx->extradata + 12; avctx->extradata_size = 0x24; } else { extradata = avctx->extradata; } status = AudioConverterGetProperty(at->converter, kAudioConverterCompressionMagicCookie, &extradata_size, extradata); if (status != 0) { av_log(avctx, AV_LOG_ERROR, "AudioToolbox cookie error: %i\n", (int)status); return AVERROR_UNKNOWN; } else if (avctx->codec_id == AV_CODEC_ID_AAC) { GetByteContext gb; int tag, len; bytestream2_init(&gb, extradata, extradata_size); do { len = read_descr(&gb, &tag); if (tag == MP4DecConfigDescrTag) { bytestream2_skip(&gb, 13); len = read_descr(&gb, &tag); if (tag == MP4DecSpecificDescrTag) { len = FFMIN(gb.buffer_end - gb.buffer, len); memmove(extradata, gb.buffer, len); avctx->extradata_size = len; break; } } else if (tag == MP4ESDescrTag) { int flags; bytestream2_skip(&gb, 2); flags = bytestream2_get_byte(&gb); if (flags & 0x80) //streamDependenceFlag bytestream2_skip(&gb, 2); if (flags & 0x40) //URL_Flag bytestream2_skip(&gb, bytestream2_get_byte(&gb)); if (flags & 0x20) //OCRstreamFlag bytestream2_skip(&gb, 2); } } while (bytestream2_get_bytes_left(&gb)); } else if (avctx->codec_id != AV_CODEC_ID_ALAC) { avctx->extradata_size = extradata_size; } } ffat_update_ctx(avctx); #if !TARGET_OS_IPHONE && defined(__MAC_10_9) if (at->mode == kAudioCodecBitRateControlMode_Variable && avctx->rc_max_rate) { UInt32 max_size = avctx->rc_max_rate * avctx->frame_size / avctx->sample_rate; if (max_size) AudioConverterSetProperty(at->converter, kAudioCodecPropertyPacketSizeLimitForVBR, sizeof(max_size), &max_size); } #endif ff_af_queue_init(avctx, &at->afq); return 0; } static OSStatus ffat_encode_callback(AudioConverterRef converter, UInt32 *nb_packets, AudioBufferList *data, AudioStreamPacketDescription **packets, void *inctx) { AVCodecContext *avctx = inctx; ATDecodeContext *at = avctx->priv_data; AVFrame *frame; if (!at->frame_queue.available) { if (at->eof) { *nb_packets = 0; return 0; } else { *nb_packets = 0; return 1; } } frame = ff_bufqueue_get(&at->frame_queue); data->mNumberBuffers = 1; data->mBuffers[0].mNumberChannels = avctx->channels; data->mBuffers[0].mDataByteSize = frame->nb_samples * av_get_bytes_per_sample(avctx->sample_fmt) * avctx->channels; data->mBuffers[0].mData = frame->data[0]; if (*nb_packets > frame->nb_samples) *nb_packets = frame->nb_samples; ff_bufqueue_add(avctx, &at->used_frame_queue, frame); return 0; } static int ffat_encode(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { ATDecodeContext *at = avctx->priv_data; OSStatus ret; AudioBufferList out_buffers = { .mNumberBuffers = 1, .mBuffers = { { .mNumberChannels = avctx->channels, .mDataByteSize = at->pkt_size, } } };