/* / Packet Video Audio MIO component / / This implementation routes audio to AudioFlinger. Audio buffers are / enqueued in a message queue to a separate audio output thread. Once / the buffers have been successfully written, they are returned through / another message queue to the MIO and from there back to the engine. / This separation is necessary because most of the PV API is not / thread-safe. */ OSCL_EXPORT_REF AndroidAudioOutput::AndroidAudioOutput() : AndroidAudioMIO("AndroidAudioOutput"), iAudioThreadCreated(false), iExitAudioThread(false), iActiveTiming(NULL) { iClockTimeOfWriting_ns = 0; iInputFrameSizeInBytes = 0; // semaphore used to communicate between this mio and the audio output thread iAudioThreadSem = new OsclSemaphore(); iAudioThreadSem->Create(0); iAudioThreadTermSem = new OsclSemaphore(); iAudioThreadTermSem->Create(0); // locks to access the queues by this mio and by the audio output thread iOSSRequestQueueLock.Create(); iOSSRequestQueue.reserve(iWriteResponseQueue.capacity()); // create active timing object OsclMemAllocator alloc; OsclAny*ptr=alloc.allocate(sizeof(AndroidAudioMIOActiveTimingSupport)); if (ptr) { iActiveTiming=new(ptr)AndroidAudioMIOActiveTimingSupport(kMaxClockDriftInMsecs, kMaxClockCorrection); iActiveTiming->setThreadSemaphore(iAudioThreadSem); } }
bool PVMFSocketPort::pvmiGetPortInPlaceDataProcessingInfoSync(const char* aFormatValType, PvmiKvp*& aKvp) { /* * Create PvmiKvp for capability settings */ aKvp = NULL; OsclMemAllocator alloc; uint32 strLen = oscl_strlen(aFormatValType) + 1; uint8* ptr = (uint8*)alloc.allocate(sizeof(PvmiKvp) + strLen); if (!ptr) { PVLOGGER_LOGMSG(PVLOGMSG_INST_MLDBG, iLogger, PVLOGMSG_ERR, (0, "PVMFSocketPort::pvmiGetPortInPlaceDataProcessingInfoSync: Error - No memory. Cannot allocate PvmiKvp")); return false; } aKvp = new(ptr) PvmiKvp; ptr += sizeof(PvmiKvp); aKvp->key = (PvmiKeyType)ptr; oscl_strncpy(aKvp->key, aFormatValType, strLen); aKvp->length = aKvp->capacity = strLen; #if SNODE_ENABLE_UDP_MULTI_PACKET if (iTag == PVMF_SOCKET_NODE_PORT_TYPE_SOURCE) aKvp->value.bool_value = false;//for the multiple UDP recv feature else #endif aKvp->value.bool_value = true; return true; }
/* / Packet Video Audio MIO component / / This implementation routes audio to a stream interface */ OSCL_EXPORT_REF AndroidAudioStream::AndroidAudioStream() : AndroidAudioMIO("AndroidAudioStream"), iActiveTiming(NULL), mClockUpdated(false) { // create active timing object LOGV("constructor"); OsclMemAllocator alloc; OsclAny*ptr=alloc.allocate(sizeof(AndroidAudioMIOActiveTimingSupport)); if (ptr) { iActiveTiming = new(ptr)AndroidAudioMIOActiveTimingSupport(0, 0); } }
////// INetURI implementation //////////////////////////////////////////////////////////////////////////////////// bool INetURI::setURI(OSCL_wString &aUri, const bool aRedirectURI) { if (aUri.get_size() == 0) return false; OsclMemAllocator alloc; char *buf = (char*)alloc.allocate(aUri.get_size() + 1); if (!buf) return false; uint32 size = oscl_UnicodeToUTF8(aUri.get_cstr(), aUri.get_size(), buf, aUri.get_size() + 1); if (size == 0) { alloc.deallocate(buf); return false; } iURI = OSCL_HeapString<OsclMemAllocator> (buf, size); alloc.deallocate(buf); // clear iHost iHostName.set(NULL, 0); iRedirectURI = aRedirectURI; return true; }
///////////////////////////////////Create a function to decode the StreamMuxConfig // note this function should ideally also get a reference to an object that holds the values // for the streammuxconfig... these are alse needed in the mediaInfo class (to pass to // the parser constructor) and can be gotten here. for now just get the audiospecificconfig OSCL_EXPORT_REF uint8 * PV_LATM_Parser::ParseStreamMuxConfig(uint8* decoderSpecificConfig, int32 * size) { uint32 SMC_SUCCESS = 0; uint32 SMC_INVALID_MUX_VERSION = 1; uint32 SMC_INVALID_NUM_PROGRAM = 2; uint32 SMC_INVALID_NUM_LAYER = 4; uint32 SMC_INVALID_OBJECT_TYPE = 8; uint32 SMC_USED_RESERVED_SAMPLING_FREQ = 16; uint32 samplingFreqTable[] = { 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350 }; if (*size == 0) { // means there is nothing to parse return NULL; } // size should be the length of the decoderSpecificConfig.. the AudioSpecificConfing cant // be larger than that, so just allocate that number of bytes // we wont know until we've parsed it how big it is. OsclMemAllocator alloc; uint8* ASCPtr = (uint8*)(alloc.allocate(sizeof(uint8) * (*size))); if (ASCPtr == NULL) { // memory allocation problem? *size = 0; return NULL; } oscl_memset(ASCPtr, 0, *size); OsclExclusivePtrA<uint8, OsclMemAllocator> ascAutoPtr; ascAutoPtr.set(ASCPtr); //streamMuxConfig * sMC; sMC = (streamMuxConfig *) oscl_calloc(1, sizeof(streamMuxConfig)); if (sMC == NULL) { // unlikely: calloc failure return NULL; } sMC->parseResult = SMC_SUCCESS; // set default result uint32 bitPos = 0; uint32 ASCPos = 0; int32 temp; int32 numProgram = 0; int32 prog, lay; int32 numLayer; int32 count; int32 dependsOnCoreCoder; // audio mux version sMC->audioMuxVersion = BufferReadBits(decoderSpecificConfig, &bitPos, 1); if (sMC->audioMuxVersion == 0) { // should not be anything other than 0!! // all streams same time framing sMC->allStreamsSameTimeFraming = BufferReadBits(decoderSpecificConfig, &bitPos, 1); /* * numSubFrames -- how many payloadmux() are multiplexed */ sMC->numSubFrames = BufferReadBits(decoderSpecificConfig, &bitPos, 6); /* * numPrograms -- how many programs are multiplexed */ numProgram = BufferReadBits(decoderSpecificConfig, &bitPos, 4); if (numProgram != 0) { sMC->parseResult |= SMC_INVALID_NUM_PROGRAM; //numProgram = 0; // really should exit *size = 0; return NULL; } // loop through programs -- happens only once now for (prog = 0; prog <= numProgram; prog++) { // can only be one numProgram (RFC3016) numLayer = BufferReadBits(decoderSpecificConfig, &bitPos, 3); /* * Number of scalable layers, only one is indicated in rfc3016 */ if (numLayer != 0) { sMC->parseResult |= SMC_INVALID_NUM_LAYER; //numLayer = 0; // really should exit *size = 0; return NULL; } for (lay = 0;lay <= numLayer;lay++) { // can only be one numLayer (RFC3016) if (prog == 0 && lay == 0) { /* * audioSpecificConfig * * it starts at byte 1's last (lsb) bit * basically copy all the rest of the bytes into the ASCPtr * then shift these over to be byte aligned */ ASCPos = bitPos; sMC->audioObjectType = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_OBJ_TYPE); if (sMC->audioObjectType != MP4AUDIO_AAC_LC && sMC->audioObjectType != MP4AUDIO_LTP && sMC->audioObjectType != MP4AUDIO_PS && sMC->audioObjectType != MP4AUDIO_SBR) { sMC->parseResult |= SMC_INVALID_OBJECT_TYPE; *size = 0; return NULL; } // SamplingFrequencyIndex -- see audio spec for meanings temp = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_SAMP_RATE_IDX); if (temp == 13 || temp == 14) { sMC->parseResult |= SMC_USED_RESERVED_SAMPLING_FREQ; } if (temp <= 12) { sMC->samplingFrequency = samplingFreqTable[temp]; } if (temp == 0xf) { // means the sampling frequency is specified directly in the next 24 bits temp = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_SAMP_RATE); } // ChannelConfiguration sMC->channelConfiguration = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_CHAN_CONFIG); sMC->sbrPresentFlag = -1; if (sMC->audioObjectType == MP4AUDIO_SBR || sMC->audioObjectType == MP4AUDIO_PS) { /* to disable explicit backward compatiblity check */ sMC->extensionAudioObjectType = sMC->audioObjectType; sMC->sbrPresentFlag = 1; sMC->extensionSamplingFrequencyIndex = /* extensionSamplingFrequencyIndex */ BufferReadBits(decoderSpecificConfig, &bitPos, LEN_SAMP_RATE_IDX); if (sMC->extensionSamplingFrequencyIndex == 0x0f) { /* * sampling rate not listed in Table 1.6.2, * this release does not support this */ sMC->extensionSamplingFrequency = /* extensionSamplingFrequency */ BufferReadBits(decoderSpecificConfig, &bitPos, LEN_SAMP_RATE); } sMC->audioObjectType = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_OBJ_TYPE); } if (sMC->audioObjectType == MP4AUDIO_AAC_LC || sMC->audioObjectType == MP4AUDIO_LTP) { // GASpecificConfig // frameLengthFlag temp = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_FRAME_LEN_FLAG); // dependsOnCoreCoder dependsOnCoreCoder = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_DEPEND_ON_CORE); if (dependsOnCoreCoder == 1) { // means there are 14 more bits of coreCoderDelay temp = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_CORE_DELAY); } // ExtensionFlag int extensionFlag = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_EXT_FLAG); if (sMC->channelConfiguration == 0) { // there should be a program_config_element // defined in 4.4.1.1 of 3995 sp4 // note, since we are only parsing this to get the size of the // audioSpecificConfig, we dont care about the values except to know // how many loops to do in the parsing process... save these loop // variables in an array uint32 loopVars[6] = {0, 0, 0, 0, 0, 0}; // dont actually need these values, just increment bit pointer bitPos += LEN_TAG; //temp = BufferReadBits(ASCPtr, &bitPos, 4); // element_instance_tag bitPos += LEN_PROFILE; //temp = BufferReadBits(ASCPtr, &bitPos, 2); // object_type bitPos += LEN_SAMP_IDX; //temp = BufferReadBits(ASCPtr, &bitPos, 4); // sampling frequency index loopVars[0] = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_NUM_ELE); // num front channel elems loopVars[1] = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_NUM_ELE); // num side channel elems loopVars[2] = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_NUM_ELE); // num back channel elems loopVars[3] = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_NUM_LFE); // num lfe channel elems loopVars[3] = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_NUM_DAT); // num assoc data elems loopVars[3] = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_NUM_CCE); // num valid cc elems temp = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_MIX_PRES); // mono mixdown present if (temp) { bitPos += LEN_NUM_ELE; } temp = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_MIX_PRES); // stereo mixdown present if (temp) { bitPos += LEN_NUM_ELE; } temp = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_MIX_PRES); // matrix mixdown present if (temp) { bitPos += LEN_NUM_DAT; } bitPos += (loopVars[0] * 5); // front channel info bitPos += (loopVars[1] * 5); // side channel info bitPos += (loopVars[2] * 5); // back channel info bitPos += (loopVars[3] * 4); // lfe channel info bitPos += (loopVars[4] * 4); // assoc data info bitPos += (loopVars[5] * 5); // valid cc info // then the spec says byte_alignement() .. need to add bits to byte align // divide by 8, add 1, multiply by 8. wont work if already byte aligned // check with a mod 8 if (bitPos % 8 != 0) { bitPos = ((bitPos >> 3) + 1) << 3; } temp = BufferReadBits(decoderSpecificConfig, &bitPos, LEN_COMMENT_BYTES); // comment field bytes bitPos += (temp << 3); } // this below obviously cant happen at this point, but show it for clarity's sake if (sMC->audioObjectType == MP4AUDIO_AAC_SCALABLE || sMC->audioObjectType == MP4AUDIO_ER_AAC_SCALABLE) { } if (extensionFlag) { if (sMC->audioObjectType == MP4AUDIO_ER_BSAC) { // cant ever happen here } if (sMC->audioObjectType == MP4AUDIO_ER_AAC_LC || sMC->audioObjectType == 18 || sMC->audioObjectType == MP4AUDIO_ER_AAC_LTP || sMC->audioObjectType == MP4AUDIO_ER_AAC_SCALABLE || sMC->audioObjectType == MP4AUDIO_ER_TWINVQ || sMC->audioObjectType == MP4AUDIO_ER_AAC_LD) { // cant ever happen here } // extensionFlag3 -- theoretically possible -- but should only see in future, if ever temp = BufferReadBits(decoderSpecificConfig, &bitPos, 1); if (temp) { // tbd in version 3 } } }