/* Called whenever necessary by AudioConverterFillComplexBuffer */ static OSStatus inInputDataProc( AudioConverterRef converter, UInt32 *npackets, AudioBufferList *buffers, AudioStreamPacketDescription** ignored, void *userdata ) { hb_work_private_t *pv = userdata; if( pv->ibytes == 0 ) { *npackets = 0; hb_log( "CoreAudio: no data to use in inInputDataProc" ); return 1; } if( pv->buf != NULL ) free( pv->buf ); uint64_t pts, pos; buffers->mBuffers[0].mDataByteSize = MIN( *npackets * pv->isamplesiz, pv->ibytes ); buffers->mBuffers[0].mData = pv->buf = calloc( 1, buffers->mBuffers[0].mDataByteSize ); if( hb_list_bytes( pv->list ) >= buffers->mBuffers[0].mDataByteSize ) { hb_list_getbytes( pv->list, buffers->mBuffers[0].mData, buffers->mBuffers[0].mDataByteSize, &pts, &pos ); } else { hb_log( "CoreAudio: Not enough data, exiting inInputDataProc" ); *npackets = 0; return 1; } if( pv->ichanmap != &hb_qt_chan_map ) { hb_layout_remap( pv->ichanmap, &hb_qt_chan_map, pv->layout, (float*)buffers->mBuffers[0].mData, buffers->mBuffers[0].mDataByteSize / pv->isamplesiz ); } *npackets = buffers->mBuffers[0].mDataByteSize / pv->isamplesiz; pv->ibytes -= buffers->mBuffers[0].mDataByteSize; return noErr; }
/* Called whenever necessary by AudioConverterFillComplexBuffer */ static OSStatus inInputDataProc(AudioConverterRef converter, UInt32 *npackets, AudioBufferList *buffers, AudioStreamPacketDescription **ignored, void *userdata) { hb_work_private_t *pv = userdata; if (!pv->ibytes) { *npackets = 0; return 1; } if (pv->buf != NULL) { free(pv->buf); } buffers->mBuffers[0].mDataByteSize = MIN(pv->ibytes, pv->isamplesiz * *npackets); pv->buf = calloc(1, buffers->mBuffers[0].mDataByteSize); buffers->mBuffers[0].mData = pv->buf; if (hb_list_bytes(pv->list) >= buffers->mBuffers[0].mDataByteSize) { hb_list_getbytes(pv->list, buffers->mBuffers[0].mData, buffers->mBuffers[0].mDataByteSize, NULL, NULL); } else { *npackets = 0; return 1; } *npackets = buffers->mBuffers[0].mDataByteSize / pv->isamplesiz; pv->ibytes -= buffers->mBuffers[0].mDataByteSize; if (pv->ichanmap != &hb_qt_chan_map) { hb_layout_remap(pv->ichanmap, &hb_qt_chan_map, pv->layout, (float*)buffers->mBuffers[0].mData, *npackets); } return noErr; }
static hb_buffer_t * Encode( hb_work_object_t * w ) { hb_work_private_t * pv = w->private_data; uint64_t pts, pos; hb_audio_t * audio = w->audio; hb_buffer_t * buf; int ii; if( hb_list_bytes( pv->list ) < pv->input_samples * sizeof( float ) ) { return NULL; } hb_list_getbytes( pv->list, pv->buf, pv->input_samples * sizeof( float ), &pts, &pos); hb_chan_map_t *map = NULL; if ( audio->config.in.codec == HB_ACODEC_AC3 ) { map = &hb_ac3_chan_map; } else if ( audio->config.in.codec == HB_ACODEC_DCA ) { map = &hb_qt_chan_map; } if ( map ) { int layout; switch (audio->config.out.mixdown) { case HB_AMIXDOWN_MONO: layout = HB_INPUT_CH_LAYOUT_MONO; break; case HB_AMIXDOWN_STEREO: case HB_AMIXDOWN_DOLBY: case HB_AMIXDOWN_DOLBYPLII: layout = HB_INPUT_CH_LAYOUT_STEREO; break; case HB_AMIXDOWN_6CH: default: layout = HB_INPUT_CH_LAYOUT_3F2R | HB_INPUT_CH_LAYOUT_HAS_LFE; break; } hb_layout_remap( map, &hb_smpte_chan_map, layout, (float*)pv->buf, AC3_SAMPLES_PER_FRAME); } for (ii = 0; ii < pv->input_samples; ii++) { // ffmpeg float samples are -1.0 to 1.0 pv->samples[ii] = ((float*)pv->buf)[ii] / 32768.0; } buf = hb_buffer_init( pv->output_bytes ); buf->size = avcodec_encode_audio( pv->context, buf->data, buf->alloc, (short*)pv->samples ); buf->start = pts + 90000 * pos / pv->out_discrete_channels / sizeof( float ) / audio->config.out.samplerate; buf->stop = buf->start + 90000 * AC3_SAMPLES_PER_FRAME / audio->config.out.samplerate; buf->frametype = HB_FRAME_AUDIO; if ( !buf->size ) { hb_buffer_close( &buf ); return Encode( w ); } else if (buf->size < 0) { hb_log( "encac3: avcodec_encode_audio failed" ); hb_buffer_close( &buf ); return NULL; } return buf; }
static hb_buffer_t * Encode( hb_work_object_t * w ) { hb_work_private_t * pv = w->private_data; uint64_t pts, pos; hb_audio_t * audio = w->audio; hb_buffer_t * buf; if( hb_list_bytes( pv->list ) < pv->input_samples * sizeof( float ) ) { return NULL; } hb_list_getbytes( pv->list, pv->buf, pv->input_samples * sizeof( float ), &pts, &pos); // XXX: ffaac fails to remap from the internal libav* channel map (SMPTE) to the native AAC channel map // do it here - this hack should be removed if Libav fixes the bug hb_chan_map_t * out_map = ( w->codec_param == CODEC_ID_AAC ) ? &hb_qt_chan_map : &hb_smpte_chan_map; if ( audio->config.in.channel_map != out_map ) { hb_layout_remap( audio->config.in.channel_map, out_map, pv->layout, (float*)pv->buf, pv->samples_per_frame ); } // Do we need to convert our internal float format? if ( pv->context->sample_fmt != AV_SAMPLE_FMT_FLT ) { int isamp, osamp; AVAudioConvert *ctx; isamp = av_get_bytes_per_sample( AV_SAMPLE_FMT_FLT ); osamp = av_get_bytes_per_sample( pv->context->sample_fmt ); ctx = av_audio_convert_alloc( pv->context->sample_fmt, 1, AV_SAMPLE_FMT_FLT, 1, NULL, 0 ); // get output buffer size then malloc a buffer //nsamples = out_size / isamp; //buffer = av_malloc( nsamples * sizeof(hb_sample_t) ); // we're doing straight sample format conversion which // behaves as if there were only one channel. const void * const ibuf[6] = { pv->buf }; void * const obuf[6] = { pv->buf }; const int istride[6] = { isamp }; const int ostride[6] = { osamp }; av_audio_convert( ctx, obuf, ostride, ibuf, istride, pv->input_samples ); av_audio_convert_free( ctx ); } buf = hb_buffer_init( pv->output_bytes ); buf->size = avcodec_encode_audio( pv->context, buf->data, buf->alloc, (short*)pv->buf ); buf->start = pts + 90000 * pos / pv->out_discrete_channels / sizeof( float ) / audio->config.out.samplerate; buf->stop = buf->start + 90000 * pv->samples_per_frame / audio->config.out.samplerate; buf->frametype = HB_FRAME_AUDIO; if ( !buf->size ) { hb_buffer_close( &buf ); return Encode( w ); } else if (buf->size < 0) { hb_log( "encavcodeca: avcodec_encode_audio failed" ); hb_buffer_close( &buf ); return NULL; } return buf; }