예제 #1
0
static
void put_tool_tag(m4af_ctx_t *m4af, const aacenc_param_ex_t *params,
                  HANDLE_AACENCODER encoder)
{
    char tool_info[256];
    char *p = tool_info;
    LIB_INFO *lib_info = 0;

    p += sprintf(p, PROGNAME " %s, ", fdkaac_version);

    lib_info = calloc(FDK_MODULE_LAST, sizeof(LIB_INFO));
    if (aacEncGetLibInfo(lib_info) == AACENC_OK) {
        int i;
        for (i = 0; i < FDK_MODULE_LAST; ++i)
            if (lib_info[i].module_id == FDK_AACENC)
                break;
        p += sprintf(p, "libfdk-aac %s, ", lib_info[i].versionStr);
    }
    free(lib_info);
    if (params->bitrate_mode)
        sprintf(p, "VBR mode %d", params->bitrate_mode);
    else
        sprintf(p, "CBR %dkbps",
                aacEncoder_GetParam(encoder, AACENC_BITRATE) / 1000);

    m4af_add_itmf_string_tag(m4af, M4AF_TAG_TOOL, tool_info);
}
예제 #2
0
파일: aacenc.c 프로젝트: nu774/fdkaac
int aac_encode_frame(HANDLE_AACENCODER encoder,
                     const pcm_sample_description_t *format,
                     const int16_t *input, unsigned iframes,
                     aacenc_frame_t *output)
{
    uint32_t ilen = iframes * format->channels_per_frame;
    AACENC_BufDesc ibdesc = { 0 }, obdesc = { 0 };
    AACENC_InArgs iargs = { 0 };
    AACENC_OutArgs oargs = { 0 };
    void *ibufs[] = { (void*)input };
    void *obufs[1];
    INT ibuf_ids[] = { IN_AUDIO_DATA };
    INT obuf_ids[] = { OUT_BITSTREAM_DATA };
    INT ibuf_sizes[] = { ilen * sizeof(int16_t) };
    INT obuf_sizes[1];
    INT ibuf_el_sizes[] = { sizeof(int16_t) };
    INT obuf_el_sizes[] = { 1 };
    AACENC_ERROR err;
    unsigned channel_mode, obytes;

    channel_mode = aacEncoder_GetParam(encoder, AACENC_CHANNELMODE);
    obytes = 6144 / 8 * channel_mode;
    if (!output->data || output->capacity < obytes) {
        uint8_t *p = realloc(output->data, obytes);
        if (!p) return -1;
        output->capacity = obytes;
        output->data = p;
    }
    obufs[0] = output->data;
    obuf_sizes[0] = obytes;

    iargs.numInSamples = ilen ? ilen : -1; /* -1 for signaling EOF */
    ibdesc.numBufs = 1;
    ibdesc.bufs = ibufs;
    ibdesc.bufferIdentifiers = ibuf_ids;
    ibdesc.bufSizes = ibuf_sizes;
    ibdesc.bufElSizes = ibuf_el_sizes;
    obdesc.numBufs = 1;
    obdesc.bufs = obufs;
    obdesc.bufferIdentifiers = obuf_ids;
    obdesc.bufSizes = obuf_sizes;
    obdesc.bufElSizes = obuf_el_sizes;

    err = aacEncEncode(encoder, &ibdesc, &obdesc, &iargs, &oargs);
    if (err != AACENC_ENCODE_EOF && err != AACENC_OK) {
        fprintf(stderr, "ERROR: aacEncEncode() failed\n");
        return -1;
    }
    output->size = oargs.numOutBytes;
    return oargs.numInSamples / format->channels_per_frame;
}
예제 #3
0
파일: main.c 프로젝트: John-He-928/fdkaac
static
void put_tool_tag(m4af_ctx_t *m4af, const aacenc_param_ex_t *params,
                  HANDLE_AACENCODER encoder)
{
    char tool_info[256];
    char *p = tool_info;
    LIB_INFO lib_info;

    p += sprintf(p, PROGNAME " %s, ", fdkaac_version);
    aacenc_get_lib_info(&lib_info);
    p += sprintf(p, "libfdk-aac %s, ", lib_info.versionStr);
    if (params->bitrate_mode)
        sprintf(p, "VBR mode %d", params->bitrate_mode);
    else
        sprintf(p, "CBR %dkbps",
                aacEncoder_GetParam(encoder, AACENC_BITRATE) / 1000);

    m4af_add_itmf_string_tag(m4af, M4AF_TAG_TOOL, tool_info);
}
예제 #4
0
void SoftAACEncoder2::onQueueFilled(OMX_U32 /* portIndex */) {
    if (mSignalledError) {
        return;
    }

    List<BufferInfo *> &inQueue = getPortQueue(0);
    List<BufferInfo *> &outQueue = getPortQueue(1);

    if (!mSentCodecSpecificData) {
        // The very first thing we want to output is the codec specific
        // data. It does not require any input data but we will need an
        // output buffer to store it in.

        if (outQueue.empty()) {
            return;
        }

        if (AACENC_OK != aacEncEncode(mAACEncoder, NULL, NULL, NULL, NULL)) {
            ALOGE("Unable to initialize encoder for profile / sample-rate / bit-rate / channels");
            notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
            mSignalledError = true;
            return;
        }

        OMX_U32 actualBitRate  = aacEncoder_GetParam(mAACEncoder, AACENC_BITRATE);
        if (mBitRate != actualBitRate) {
            ALOGW("Requested bitrate %u unsupported, using %u", mBitRate, actualBitRate);
        }

        AACENC_InfoStruct encInfo;
        if (AACENC_OK != aacEncInfo(mAACEncoder, &encInfo)) {
            ALOGE("Failed to get AAC encoder info");
            notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
            mSignalledError = true;
            return;
        }

        BufferInfo *outInfo = *outQueue.begin();
        OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;

        if (outHeader->nOffset + encInfo.confSize > outHeader->nAllocLen) {
            ALOGE("b/34617444");
            android_errorWriteLog(0x534e4554,"34617444");
            notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
            mSignalledError = true;
            return;
        }

        outHeader->nFilledLen = encInfo.confSize;
        outHeader->nFlags = OMX_BUFFERFLAG_CODECCONFIG;

        uint8_t *out = outHeader->pBuffer + outHeader->nOffset;
        memcpy(out, encInfo.confBuf, encInfo.confSize);

        outQueue.erase(outQueue.begin());
        outInfo->mOwnedByUs = false;
        notifyFillBufferDone(outHeader);

        mSentCodecSpecificData = true;
    }

    size_t numBytesPerInputFrame =
        mNumChannels * kNumSamplesPerFrame * sizeof(int16_t);

    // Limit input size so we only get one ELD frame
    if (mAACProfile == OMX_AUDIO_AACObjectELD && numBytesPerInputFrame > 512) {
        numBytesPerInputFrame = 512;
    }

    for (;;) {
        // We do the following until we run out of buffers.

        while (mInputSize < numBytesPerInputFrame) {
            // As long as there's still input data to be read we
            // will drain "kNumSamplesPerFrame * mNumChannels" samples
            // into the "mInputFrame" buffer and then encode those
            // as a unit into an output buffer.

            if (mSawInputEOS || inQueue.empty()) {
                return;
            }

            BufferInfo *inInfo = *inQueue.begin();
            OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;

            const void *inData = inHeader->pBuffer + inHeader->nOffset;

            size_t copy = numBytesPerInputFrame - mInputSize;
            if (copy > inHeader->nFilledLen) {
                copy = inHeader->nFilledLen;
            }

            if (mInputFrame == NULL) {
                mInputFrame = new int16_t[numBytesPerInputFrame / sizeof(int16_t)];
                mAllocatedFrameSize = numBytesPerInputFrame;
            } else if (mAllocatedFrameSize != numBytesPerInputFrame) {
                ALOGE("b/34621073: changed size from %d to %d",
                        (int)mAllocatedFrameSize, (int)numBytesPerInputFrame);
                android_errorWriteLog(0x534e4554,"34621073");
                delete mInputFrame;
                mInputFrame = new int16_t[numBytesPerInputFrame / sizeof(int16_t)];
                mAllocatedFrameSize = numBytesPerInputFrame;

            }

            if (mInputSize == 0) {
                mInputTimeUs = inHeader->nTimeStamp;
            }

            memcpy((uint8_t *)mInputFrame + mInputSize, inData, copy);
            mInputSize += copy;

            inHeader->nOffset += copy;
            inHeader->nFilledLen -= copy;

            // "Time" on the input buffer has in effect advanced by the
            // number of audio frames we just advanced nOffset by.
            inHeader->nTimeStamp +=
                (copy * 1000000ll / mSampleRate)
                    / (mNumChannels * sizeof(int16_t));

            if (inHeader->nFilledLen == 0) {
                if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
                    mSawInputEOS = true;

                    // Pad any remaining data with zeroes.
                    memset((uint8_t *)mInputFrame + mInputSize,
                           0,
                           numBytesPerInputFrame - mInputSize);

                    mInputSize = numBytesPerInputFrame;
                }

                inQueue.erase(inQueue.begin());
                inInfo->mOwnedByUs = false;
                notifyEmptyBufferDone(inHeader);

                inData = NULL;
                inHeader = NULL;
                inInfo = NULL;
            }
        }

        // At this  point we have all the input data necessary to encode
        // a single frame, all we need is an output buffer to store the result
        // in.

        if (outQueue.empty()) {
            return;
        }

        BufferInfo *outInfo = *outQueue.begin();
        OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;

        uint8_t *outPtr = (uint8_t *)outHeader->pBuffer + outHeader->nOffset;
        size_t outAvailable = outHeader->nAllocLen - outHeader->nOffset;

        AACENC_InArgs inargs;
        AACENC_OutArgs outargs;
        memset(&inargs, 0, sizeof(inargs));
        memset(&outargs, 0, sizeof(outargs));
        inargs.numInSamples = numBytesPerInputFrame / sizeof(int16_t);

        void* inBuffer[]        = { (unsigned char *)mInputFrame };
        INT   inBufferIds[]     = { IN_AUDIO_DATA };
        INT   inBufferSize[]    = { (INT)numBytesPerInputFrame };
        INT   inBufferElSize[]  = { sizeof(int16_t) };

        AACENC_BufDesc inBufDesc;
        inBufDesc.numBufs           = sizeof(inBuffer) / sizeof(void*);
        inBufDesc.bufs              = (void**)&inBuffer;
        inBufDesc.bufferIdentifiers = inBufferIds;
        inBufDesc.bufSizes          = inBufferSize;
        inBufDesc.bufElSizes        = inBufferElSize;

        void* outBuffer[]       = { outPtr };
        INT   outBufferIds[]    = { OUT_BITSTREAM_DATA };
        INT   outBufferSize[]   = { 0 };
        INT   outBufferElSize[] = { sizeof(UCHAR) };

        AACENC_BufDesc outBufDesc;
        outBufDesc.numBufs           = sizeof(outBuffer) / sizeof(void*);
        outBufDesc.bufs              = (void**)&outBuffer;
        outBufDesc.bufferIdentifiers = outBufferIds;
        outBufDesc.bufSizes          = outBufferSize;
        outBufDesc.bufElSizes        = outBufferElSize;

        // Encode the mInputFrame, which is treated as a modulo buffer
        AACENC_ERROR encoderErr = AACENC_OK;
        size_t nOutputBytes = 0;

        do {
            memset(&outargs, 0, sizeof(outargs));

            outBuffer[0] = outPtr;
            outBufferSize[0] = outAvailable - nOutputBytes;

            encoderErr = aacEncEncode(mAACEncoder,
                                      &inBufDesc,
                                      &outBufDesc,
                                      &inargs,
                                      &outargs);

            if (encoderErr == AACENC_OK) {
                outPtr += outargs.numOutBytes;
                nOutputBytes += outargs.numOutBytes;

                if (outargs.numInSamples > 0) {
                    int numRemainingSamples = inargs.numInSamples - outargs.numInSamples;
                    if (numRemainingSamples > 0) {
                        memmove(mInputFrame,
                                &mInputFrame[outargs.numInSamples],
                                sizeof(int16_t) * numRemainingSamples);
                    }
                    inargs.numInSamples -= outargs.numInSamples;
                }
            }
        } while (encoderErr == AACENC_OK && inargs.numInSamples > 0);

        outHeader->nFilledLen = nOutputBytes;

        outHeader->nFlags = OMX_BUFFERFLAG_ENDOFFRAME;

        if (mSawInputEOS) {
            // We also tag this output buffer with EOS if it corresponds
            // to the final input buffer.
            outHeader->nFlags = OMX_BUFFERFLAG_EOS;
        }

        outHeader->nTimeStamp = mInputTimeUs;

#if 0
        ALOGI("sending %d bytes of data (time = %lld us, flags = 0x%08lx)",
              nOutputBytes, mInputTimeUs, outHeader->nFlags);

        hexdump(outHeader->pBuffer + outHeader->nOffset, outHeader->nFilledLen);
#endif

        outQueue.erase(outQueue.begin());
        outInfo->mOwnedByUs = false;
        notifyFillBufferDone(outHeader);

        outHeader = NULL;
        outInfo = NULL;

        mInputSize = 0;
    }
}
예제 #5
0
int main(int argc, char **argv)
{
    wav_io_context_t wav_io = { read_callback, seek_callback, tell_callback };
    m4af_io_callbacks_t
        m4af_io = { read_callback, write_callback, seek_callback, tell_callback };
    aacenc_param_ex_t params = { 0 };

    int result = 2;
    FILE *ifp = 0;
    FILE *ofp = 0;
    char *output_filename = 0;
#ifdef USE_LIBSNDFILE
    SNDFILE* snd = NULL;
    SF_INFO snd_info;
    pcm_sample_description_t snd_desc = { 0 };
#else
    wav_reader_t *wavf = 0;
#endif
    HANDLE_AACENCODER encoder = 0;
    AACENC_InfoStruct aacinfo = { 0 };
    m4af_ctx_t *m4af = 0;
    const pcm_sample_description_t *sample_format;
    int downsampled_timescale = 0;
    int frame_count = 0;
    struct stat stb = { 0 };

    setlocale(LC_CTYPE, "");
    setbuf(stderr, 0);

    if (parse_options(argc, argv, &params) < 0)
        return 1;

#ifdef USE_LIBSNDFILE
    if ((snd = sf_open (params.input_filename, SFM_READ, &snd_info)) == NULL) {
        fprintf(stderr, "ERROR: broken / unsupported input file\n");
        goto END;
    }
#ifdef USE_LIBSAMPLERATE
    if(params.resample) {
        snd_desc.sample_rate = params.resample;
        printf("resampling to %dhz\n", snd_desc.sample_rate);
    } else {
        snd_desc.sample_rate = snd_info.samplerate;
    }
    snd_desc.sample_type = PCM_TYPE_FLOAT; // always -- libsndfile does the conversion for us
    snd_desc.bits_per_channel = sizeof(float)*8;
#else
    snd_desc.sample_rate = snd_info.samplerate;
    snd_desc.sample_type = PCM_TYPE_SINT; // always -- libsndfile does the conversion for us
    snd_desc.bits_per_channel = sizeof(short)*8;
#endif
    snd_desc.channels_per_frame = snd_info.channels;
    snd_desc.bytes_per_frame = snd_info.channels * (snd_desc.bits_per_channel / 8);
    snd_desc.channel_mask = 0;

    sample_format = &snd_desc;
#else
    if ((ifp = aacenc_fopen(params.input_filename, "rb")) == 0) {
        aacenc_fprintf(stderr, "ERROR: %s: %s\n", params.input_filename,
                       strerror(errno));
        goto END;
    }
    
    if (fstat(fileno(ifp), &stb) == 0 && (stb.st_mode & S_IFMT) != S_IFREG) {
        wav_io.seek = 0;
        wav_io.tell = 0;
    }
    
    if (!params.is_raw) {
        if ((wavf = wav_open(&wav_io, ifp, params.ignore_length)) == 0) {
            fprintf(stderr, "ERROR: broken / unsupported input file\n");
            goto END;
        }
    } else {
        int bytes_per_channel;
        pcm_sample_description_t desc = { 0 };
        if (parse_raw_spec(params.raw_format, &desc) < 0) {
            fprintf(stderr, "ERROR: invalid raw-format spec\n");
            goto END;
        }
        desc.sample_rate = params.raw_rate;
        desc.channels_per_frame = params.raw_channels;
        bytes_per_channel = (desc.bits_per_channel + 7) / 8;
        desc.bytes_per_frame = params.raw_channels * bytes_per_channel;
        if ((wavf = raw_open(&wav_io, ifp, &desc)) == 0) {
            fprintf(stderr, "ERROR: failed to open raw input\n");
            goto END;
        }
    }

    sample_format = wav_get_format(wavf);
#endif

    if (aacenc_init(&encoder, (aacenc_param_t*)&params, sample_format,
                    &aacinfo) < 0)
        goto END;

    if (!params.output_filename) {
        const char *ext = params.transport_format ? ".aac" : ".m4a";
        output_filename = generate_output_filename(params.input_filename, ext);
        params.output_filename = output_filename;
    }

    if ((ofp = aacenc_fopen(params.output_filename, "wb+")) == 0) {
        aacenc_fprintf(stderr, "ERROR: %s: %s\n", params.output_filename,
                       strerror(errno));
        goto END;
    }
    handle_signals();
    if (!params.transport_format) {
        uint32_t scale;
        unsigned framelen = aacinfo.frameLength;
        int sbr_mode = aacenc_is_sbr_active((aacenc_param_t*)&params);
        int sig_mode = aacEncoder_GetParam(encoder, AACENC_SIGNALING_MODE);
        if (sbr_mode && !sig_mode)
            downsampled_timescale = 1;
        scale = sample_format->sample_rate >> downsampled_timescale;
        if ((m4af = m4af_create(M4AF_CODEC_MP4A, scale, &m4af_io, ofp)) < 0)
            goto END;
        m4af_set_decoder_specific_info(m4af, 0, aacinfo.confBuf,
                                       aacinfo.confSize);
        m4af_set_fixed_frame_duration(m4af, 0,
                                      framelen >> downsampled_timescale);
        m4af_set_priming_mode(m4af, params.gapless_mode + 1);
        m4af_begin_write(m4af);
    }