/*
/ Packet Video Audio MIO component
/
/ This implementation routes audio to AudioFlinger. Audio buffers are
/ enqueued in a message queue to a separate audio output thread. Once
/ the buffers have been successfully written, they are returned through
/ another message queue to the MIO and from there back to the engine.
/ This separation is necessary because most of the PV API is not
/ thread-safe.
*/
OSCL_EXPORT_REF AndroidAudioOutput::AndroidAudioOutput() :
    AndroidAudioMIO("AndroidAudioOutput"),
    iAudioThreadCreated(false),
    iExitAudioThread(false),
    iActiveTiming(NULL)
{
    iClockTimeOfWriting_ns = 0;
    iInputFrameSizeInBytes = 0;

    // semaphore used to communicate between this  mio and the audio output thread
    iAudioThreadSem = new OsclSemaphore();
    iAudioThreadSem->Create(0);
    iAudioThreadTermSem = new OsclSemaphore();
    iAudioThreadTermSem->Create(0);

    // locks to access the queues by this mio and by the audio output thread
    iOSSRequestQueueLock.Create();
    iOSSRequestQueue.reserve(iWriteResponseQueue.capacity());

    // create active timing object
    OsclMemAllocator alloc;
    OsclAny*ptr=alloc.allocate(sizeof(AndroidAudioMIOActiveTimingSupport));
    if (ptr) {
        iActiveTiming=new(ptr)AndroidAudioMIOActiveTimingSupport(kMaxClockDriftInMsecs, kMaxClockCorrection);
        iActiveTiming->setThreadSemaphore(iAudioThreadSem);
    }
}
Exemple #2
0
OSCL_EXPORT_REF AndroidAudioOutput::~AndroidAudioOutput()
{
    LOGV("destructor");

    // make sure output thread has exited
    RequestAndWaitForThreadExit();

    // cleanup active timing object
    if (iActiveTiming) {
        iActiveTiming->~AndroidAudioMIOActiveTimingSupport();
        OsclMemAllocator alloc;
        alloc.deallocate(iActiveTiming);
    }

    // clean up some thread interface objects
    iAudioThreadSem->Close();
    delete iAudioThreadSem;
    iAudioThreadTermSem->Close();
    delete iAudioThreadTermSem;
    iAudioThreadReturnSem->Close();
    delete iAudioThreadReturnSem;
    iAudioThreadCreatedSem->Close();
    delete iAudioThreadCreatedSem;

    iOSSRequestQueueLock.Close();
}
OSCL_EXPORT_REF CPVInterfaceProxy * CPVInterfaceProxy::NewL(
    PVProxiedEngine& app
    , Oscl_DefAlloc *alloc
    , int32 stacksize
    , uint32 nreserve1
    , uint32 nreserve2
    , int32 handlerPri
    , int32 notifierPri)
//called under app thread context
{
    OsclMemAllocator defallocL;
    OsclAny *ptr = NULL;
    if (alloc)
    {
        ptr = alloc->ALLOCATE(sizeof(CPVInterfaceProxy));
        OsclError::LeaveIfNull(ptr);
    }
    else
    {
        ptr = defallocL.ALLOCATE(sizeof(CPVInterfaceProxy));
    }
    CPVInterfaceProxy *self = OSCL_PLACEMENT_NEW(ptr, CPVInterfaceProxy(app, alloc, stacksize));
    int32 err;
    err = self->CPVIConstructL(nreserve1, nreserve2, handlerPri, notifierPri);
    if (err != OSCL_ERR_NONE)
    {
        self->Delete();
        return NULL;
    }
    return self;
}
bool
PVMFOMXEncPort::pvmiSetPortFormatSpecificInfoSync(OsclRefCounterMemFrag& aMemFrag)
{
    if ((iConnectedPort) &&
            (iTag == PVMF_OMX_ENC_NODE_PORT_TYPE_OUTPUT))
    {
        OsclAny* temp = NULL;
        iConnectedPort->QueryInterface(PVMI_CAPABILITY_AND_CONFIG_PVUUID, temp);
        PvmiCapabilityAndConfig *config = (PvmiCapabilityAndConfig*) temp;

        /*
         * Create PvmiKvp for capability settings
         */
        if ((config) && (aMemFrag.getMemFragSize() > 0))
        {
            OsclMemAllocator alloc;
            PvmiKvp kvp;
            kvp.key = NULL;
            kvp.length = oscl_strlen(PVMF_FORMAT_SPECIFIC_INFO_KEY) + 1; // +1 for \0
            kvp.key = (PvmiKeyType)alloc.ALLOCATE(kvp.length);
            if (kvp.key == NULL)
            {
                return false;
            }
            oscl_strncpy(kvp.key, PVMF_FORMAT_SPECIFIC_INFO_KEY, kvp.length);

            kvp.value.key_specific_value = (OsclAny*)(aMemFrag.getMemFragPtr());
            kvp.capacity = aMemFrag.getMemFragSize();
            kvp.length = aMemFrag.getMemFragSize();
            PvmiKvp* retKvp = NULL; // for return value
            int32 err;
            OSCL_TRY(err, config->setParametersSync(NULL, &kvp, 1, retKvp););
bool
PVMFSocketPort::pvmiGetPortInPlaceDataProcessingInfoSync(const char* aFormatValType,
        PvmiKvp*& aKvp)
{
    /*
     * Create PvmiKvp for capability settings
     */
    aKvp = NULL;
    OsclMemAllocator alloc;
    uint32 strLen = oscl_strlen(aFormatValType) + 1;
    uint8* ptr = (uint8*)alloc.allocate(sizeof(PvmiKvp) + strLen);
    if (!ptr)
    {
        PVLOGGER_LOGMSG(PVLOGMSG_INST_MLDBG, iLogger, PVLOGMSG_ERR, (0, "PVMFSocketPort::pvmiGetPortInPlaceDataProcessingInfoSync: Error - No memory. Cannot allocate PvmiKvp"));
        return false;
    }
    aKvp = new(ptr) PvmiKvp;
    ptr += sizeof(PvmiKvp);
    aKvp->key = (PvmiKeyType)ptr;
    oscl_strncpy(aKvp->key, aFormatValType, strLen);
    aKvp->length = aKvp->capacity = strLen;

#if SNODE_ENABLE_UDP_MULTI_PACKET
    if (iTag == PVMF_SOCKET_NODE_PORT_TYPE_SOURCE)
        aKvp->value.bool_value = false;//for the multiple UDP recv feature
    else
#endif
        aKvp->value.bool_value = true;

    return true;
}
OSCL_EXPORT_REF AndroidAudioStream::~AndroidAudioStream()
{
    LOGV("destructor");
    // cleanup active timing object
    if (iActiveTiming) {
        iActiveTiming->~AndroidAudioMIOActiveTimingSupport();
        OsclMemAllocator alloc;
        alloc.deallocate(iActiveTiming);
    }
}
 virtual void destruct_and_dealloc(OsclAny* ptr)
 {
     uint8* tmp_ptr = (uint8*) ptr;
     uint aligned_refcnt_size =
         oscl_mem_aligned_size(sizeof(OsclRefCounterSA<MediaCmdCleanupSA>));
     tmp_ptr += aligned_refcnt_size;
     PVMFMediaCmd* mcmd_ptr = reinterpret_cast<PVMFMediaCmd*>(tmp_ptr);
     mcmd_ptr->~PVMFMediaCmd();
     OsclMemAllocator alloc;
     alloc.deallocate(ptr);
 }
/*
/ Packet Video Audio MIO component
/
/ This implementation routes audio to a stream interface
*/
OSCL_EXPORT_REF AndroidAudioStream::AndroidAudioStream() :
    AndroidAudioMIO("AndroidAudioStream"),
    iActiveTiming(NULL), mClockUpdated(false)
{
    // create active timing object
    LOGV("constructor");
    OsclMemAllocator alloc;
    OsclAny*ptr=alloc.allocate(sizeof(AndroidAudioMIOActiveTimingSupport));
    if (ptr) {
        iActiveTiming = new(ptr)AndroidAudioMIOActiveTimingSupport(0, 0);
    }
}
OSCL_EXPORT_REF void CPVInterfaceProxy::Delete()
//called under app thread context
{
    Oscl_DefAlloc *alloc = this->iAlloc;
    bool default_alloc = (this->iAlloc == &this->iDefAlloc);
    this->~CPVInterfaceProxy();
    if (default_alloc)
    {
        OsclMemAllocator defalloc;
        defalloc.deallocate(this);
    }
    else
    {
        alloc->deallocate(this);
    }
}
PVMFStatus PVMFSocketPort::releaseParameters(PvmiMIOSession aSession,
        PvmiKvp* aParameters,
        int num_elements)
{
    OSCL_UNUSED_ARG(aSession);
    PVLOGGER_LOGMSG(PVLOGMSG_INST_MLDBG, iLogger, PVLOGMSG_INFO, (0, "PVMFSocketPort::releaseParameters: aSession=0x%x, aParameters=0x%x, num_elements=%d",
                    aSession, aParameters, num_elements));

    if ((num_elements != 1) ||
            (pv_mime_strcmp(aParameters->key, PVMI_PORT_CONFIG_INPLACE_DATA_PROCESSING_VALUE) != 0))
    {
        PVLOGGER_LOGMSG(PVLOGMSG_INST_MLDBG, iLogger, PVLOGMSG_ERR, (0, "PVMFSocketPort::releaseParameters: Error - Not a PvmiKvp created by this port"));
        return PVMFFailure;
    }
    OsclMemAllocator alloc;
    alloc.deallocate((OsclAny*)(aParameters));
    return PVMFSuccess;
}
//////	INetURI implementation
////////////////////////////////////////////////////////////////////////////////////
bool INetURI::setURI(OSCL_wString &aUri, const bool aRedirectURI)
{
    if (aUri.get_size() == 0) return false;

    OsclMemAllocator alloc;
    char *buf = (char*)alloc.allocate(aUri.get_size() + 1);
    if (!buf) return false;
    uint32 size = oscl_UnicodeToUTF8(aUri.get_cstr(), aUri.get_size(), buf, aUri.get_size() + 1);
    if (size == 0)
    {
        alloc.deallocate(buf);
        return false;
    }
    iURI = OSCL_HeapString<OsclMemAllocator> (buf, size);
    alloc.deallocate(buf);
    // clear iHost
    iHostName.set(NULL, 0);
    iRedirectURI = aRedirectURI;
    return true;
}
///////////////////////////////////Create a function to decode the StreamMuxConfig
// note this function should ideally also get a reference to an object that holds the values
// for the streammuxconfig... these are alse needed in the mediaInfo class (to pass to
// the parser constructor) and can be gotten here.  for now just get the audiospecificconfig
OSCL_EXPORT_REF uint8 * PV_LATM_Parser::ParseStreamMuxConfig(uint8* decoderSpecificConfig, int32 * size)
{
    uint32 SMC_SUCCESS = 0;
    uint32 SMC_INVALID_MUX_VERSION = 1;
    uint32 SMC_INVALID_NUM_PROGRAM = 2;
    uint32 SMC_INVALID_NUM_LAYER = 4;
    uint32 SMC_INVALID_OBJECT_TYPE = 8;
    uint32 SMC_USED_RESERVED_SAMPLING_FREQ = 16;

    uint32 samplingFreqTable[] =
    {
        96000, 88200, 64000, 48000, 44100,
        32000, 24000, 22050, 16000, 12000,
        11025, 8000, 7350
    };

    if (*size == 0)
    {
        // means there is nothing to parse
        return NULL;
    }


    // size should be the length of the decoderSpecificConfig.. the AudioSpecificConfing cant
    // be larger than that, so just allocate that number of bytes
    // we wont know until we've parsed it how big it is.
    OsclMemAllocator alloc;
    uint8* ASCPtr = (uint8*)(alloc.allocate(sizeof(uint8) * (*size)));
    if (ASCPtr == NULL)
    {
        // memory allocation problem?
        *size = 0;
        return NULL;
    }
    oscl_memset(ASCPtr, 0, *size);

    OsclExclusivePtrA<uint8, OsclMemAllocator> ascAutoPtr;
    ascAutoPtr.set(ASCPtr);

    //streamMuxConfig * sMC;
    sMC = (streamMuxConfig *) oscl_calloc(1, sizeof(streamMuxConfig));
    if (sMC == NULL)
    {		// unlikely: calloc failure
        return NULL;
    }


    sMC->parseResult = SMC_SUCCESS;  // set default result

    uint32 bitPos = 0;
    uint32 ASCPos = 0;

    int32 temp;
    int32 numProgram = 0;
    int32 prog, lay;
    int32 numLayer;
    int32 count;
    int32 dependsOnCoreCoder;

    // audio mux version
    sMC->audioMuxVersion = BufferReadBits(decoderSpecificConfig, &bitPos, 1);
    if (sMC->audioMuxVersion == 0)
    {
        // should not be anything other than 0!!

        // all streams same time framing
        sMC->allStreamsSameTimeFraming = BufferReadBits(decoderSpecificConfig, &bitPos, 1);

        /*
         *  numSubFrames -- how many payloadmux() are multiplexed
         */
        sMC->numSubFrames = BufferReadBits(decoderSpecificConfig, &bitPos, 6);

        /*
         *  numPrograms  -- how many programs are multiplexed
         */
        numProgram = BufferReadBits(decoderSpecificConfig, &bitPos, 4);

        if (numProgram != 0)
        {
            sMC->parseResult |= SMC_INVALID_NUM_PROGRAM;
            //numProgram = 0;
            // really should exit
            *size = 0;
            return NULL;
        }

        // loop through programs -- happens only once now
        for (prog = 0; prog <= numProgram; prog++)
        {
            // can only be one numProgram (RFC3016)
            numLayer = BufferReadBits(decoderSpecificConfig, &bitPos, 3);
            /*
             *  Number of scalable layers, only one is indicated in rfc3016
             */
            if (numLayer != 0)
            {
                sMC->parseResult |= SMC_INVALID_NUM_LAYER;
                //numLayer = 0;
                // really should exit
                *size = 0;
                return NULL;
            }

            for (lay = 0;lay <= numLayer;lay++)
            {
                //  can only be one numLayer (RFC3016)
                if (prog == 0 && lay == 0)
                {
                    /*
                     *  audioSpecificConfig
                     *
                     * it starts at byte 1's last (lsb) bit
                     * basically copy all the rest of the bytes into the ASCPtr
                     * then shift these over to be byte aligned
                     */
                    ASCPos = bitPos;

                    sMC->audioObjectType = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_OBJ_TYPE);

                    if (sMC->audioObjectType != MP4AUDIO_AAC_LC &&
                            sMC->audioObjectType != MP4AUDIO_LTP &&
                            sMC->audioObjectType != MP4AUDIO_PS &&
                            sMC->audioObjectType != MP4AUDIO_SBR)
                    {
                        sMC->parseResult |= SMC_INVALID_OBJECT_TYPE;
                        *size = 0;
                        return NULL;
                    }


                    // SamplingFrequencyIndex -- see audio spec for meanings
                    temp = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_SAMP_RATE_IDX);

                    if (temp == 13 || temp == 14)
                    {
                        sMC->parseResult |= SMC_USED_RESERVED_SAMPLING_FREQ;
                    }


                    if (temp <= 12)
                    {
                        sMC->samplingFrequency = samplingFreqTable[temp];
                    }

                    if (temp == 0xf)
                    {
                        // means the sampling frequency is specified directly in the next 24 bits
                        temp = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_SAMP_RATE);
                    }

                    // ChannelConfiguration
                    sMC->channelConfiguration = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_CHAN_CONFIG);

                    sMC->sbrPresentFlag = -1;

                    if (sMC->audioObjectType == MP4AUDIO_SBR ||
                            sMC->audioObjectType == MP4AUDIO_PS)
                    {
                        /* to disable explicit backward compatiblity check */
                        sMC->extensionAudioObjectType = sMC->audioObjectType;

                        sMC->sbrPresentFlag = 1;

                        sMC->extensionSamplingFrequencyIndex = /* extensionSamplingFrequencyIndex */
                            BufferReadBits(decoderSpecificConfig, &bitPos, LEN_SAMP_RATE_IDX);

                        if (sMC->extensionSamplingFrequencyIndex == 0x0f)
                        {
                            /*
                             * sampling rate not listed in Table 1.6.2,
                             * this release does not support this
                             */
                            sMC->extensionSamplingFrequency =  /* extensionSamplingFrequency */
                                BufferReadBits(decoderSpecificConfig, &bitPos, LEN_SAMP_RATE);
                        }


                        sMC->audioObjectType =  BufferReadBits(decoderSpecificConfig, &bitPos, LEN_OBJ_TYPE);
                    }


                    if (sMC->audioObjectType == MP4AUDIO_AAC_LC || sMC->audioObjectType == MP4AUDIO_LTP)
                    {
                        //  GASpecificConfig

                        // frameLengthFlag
                        temp = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_FRAME_LEN_FLAG);

                        // dependsOnCoreCoder
                        dependsOnCoreCoder = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_DEPEND_ON_CORE);


                        if (dependsOnCoreCoder == 1)
                        {
                            // means there are 14 more bits of coreCoderDelay
                            temp = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_CORE_DELAY);
                        }
                        // ExtensionFlag
                        int extensionFlag = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_EXT_FLAG);

                        if (sMC->channelConfiguration == 0)
                        {
                            // there should be a program_config_element
                            // defined in 4.4.1.1 of 3995 sp4
                            // note, since we are only parsing this to get the size of the
                            // audioSpecificConfig, we dont care about the values except to know
                            // how many loops to do in the parsing process... save these loop
                            // variables in an array
                            uint32 loopVars[6] = {0, 0, 0, 0, 0, 0};

                            // dont actually need these values, just increment bit pointer
                            bitPos += LEN_TAG; //temp = BufferReadBits(ASCPtr, &bitPos, 4); // element_instance_tag
                            bitPos += LEN_PROFILE; //temp = BufferReadBits(ASCPtr, &bitPos, 2); // object_type
                            bitPos += LEN_SAMP_IDX; //temp = BufferReadBits(ASCPtr, &bitPos, 4); // sampling frequency index
                            loopVars[0] = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_NUM_ELE); // num front channel elems
                            loopVars[1] = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_NUM_ELE); // num side channel elems
                            loopVars[2] = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_NUM_ELE); // num back channel elems
                            loopVars[3] = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_NUM_LFE); // num lfe channel elems
                            loopVars[3] = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_NUM_DAT); // num assoc data elems
                            loopVars[3] = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_NUM_CCE); // num valid cc elems

                            temp = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_MIX_PRES); // mono mixdown present
                            if (temp)
                            {
                                bitPos += LEN_NUM_ELE;
                            }
                            temp = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_MIX_PRES); // stereo mixdown present
                            if (temp)
                            {
                                bitPos += LEN_NUM_ELE;
                            }
                            temp = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_MIX_PRES); // matrix mixdown present
                            if (temp)
                            {
                                bitPos += LEN_NUM_DAT;
                            }

                            bitPos += (loopVars[0] * 5);  // front channel info
                            bitPos += (loopVars[1] * 5);  // side channel info
                            bitPos += (loopVars[2] * 5);  // back channel info
                            bitPos += (loopVars[3] * 4);  // lfe channel info
                            bitPos += (loopVars[4] * 4);  // assoc data info
                            bitPos += (loopVars[5] * 5);  // valid cc info

                            // then the spec says byte_alignement() .. need to add bits to byte align
                            // divide by 8, add 1, multiply by 8.  wont work if already byte aligned
                            // check with a mod 8
                            if (bitPos % 8 != 0)
                            {
                                bitPos = ((bitPos >> 3) + 1) << 3;
                            }

                            temp = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_COMMENT_BYTES); // comment field bytes
                            bitPos += (temp << 3);

                        }

                        // this below obviously cant happen at this point, but show it for clarity's sake
                        if (sMC->audioObjectType == MP4AUDIO_AAC_SCALABLE ||
                                sMC->audioObjectType == MP4AUDIO_ER_AAC_SCALABLE)
                        {
                        }

                        if (extensionFlag)
                        {
                            if (sMC->audioObjectType == MP4AUDIO_ER_BSAC)
                            {
                                // cant ever happen here
                            }
                            if (sMC->audioObjectType == MP4AUDIO_ER_AAC_LC ||
                                    sMC->audioObjectType == 18 ||
                                    sMC->audioObjectType == MP4AUDIO_ER_AAC_LTP ||
                                    sMC->audioObjectType == MP4AUDIO_ER_AAC_SCALABLE ||
                                    sMC->audioObjectType == MP4AUDIO_ER_TWINVQ ||
                                    sMC->audioObjectType == MP4AUDIO_ER_AAC_LD)
                            {
                                // cant ever happen here
                            }
                            // extensionFlag3 -- theoretically possible -- but should only see in future, if ever
                            temp = BufferReadBits(decoderSpecificConfig, &bitPos, 1);
                            if (temp)
                            {
                                // tbd in version 3
                            }
                        }
                    }