void get_dse( Char *DataStreamBytes, BITS *pInputStream) { Int i; Int data_byte_align_flag; UInt count; Int esc_count; Char *pDataStreamBytes; pDataStreamBytes = DataStreamBytes; /* * Get element instance tag ( 4 bits) * ( max of 16 per raw data block) */ get9_n_lessbits(LEN_TAG, pInputStream); /* * get data_byte_align_flag ( 1 bit0 to see if byte alignment is * performed within the DSE */ data_byte_align_flag = get1bits(pInputStream); /* * get count ( 8 bits) */ count = get9_n_lessbits(LEN_D_CNT, pInputStream); /* * if count == 255, its value it is incremented by a * second 8 bit value, esc_count. This final value represents * the number of bytes in the DSE */ if (count == (1 << LEN_D_CNT) - 1) { esc_count = (Int)get9_n_lessbits(LEN_D_ESC, pInputStream); /* 8 bits */ count += esc_count; } /* * Align if flag is set */ if (data_byte_align_flag) { byte_align(pInputStream); } for (i = count; i != 0; i--) { *(pDataStreamBytes++) = (Char) get9_n_lessbits( LEN_BYTE, pInputStream); } return; } /* end get_dse */
Int get_prog_config( tDec_Int_File *pVars, ProgConfig *pScratchPCE) { Int i; UInt tag; Int numChars; UInt temp; Bool flag; Int status = SUCCESS; BITS *pInputStream = &(pVars->inputStream); /* * The tag is used at the very end to see if this PCE is * the one to be used. Otherwise it does not need to be saved for the * the simple configurations to be used in this version of an AAC * decoder. * * All of the bits of this PCE must be read even if this PCE will not * be used. They are read into a temporary PCE, then later it is decided * whether to keep this PCE. * * To allow quick removal of the fields from the ProgConfig structure * that will probably not be used at a later date, * while still advancing the bitstream pointer,the return value of * getbits is saved into a temporary variable, then transfered to * the structure item. */ tag = get9_n_lessbits(LEN_TAG, pInputStream); pScratchPCE->profile = get9_n_lessbits(LEN_PROFILE, pInputStream); pScratchPCE->sampling_rate_idx = get9_n_lessbits(LEN_SAMP_IDX, pInputStream); if (!pVars->adif_test && (pScratchPCE->sampling_rate_idx != pVars->prog_config.sampling_rate_idx)) { #ifdef AAC_PLUS /* * PCE carries the baseline frequency, if SBR or PS are used, the frequencies will not match * so check for this unique case, and let decoding continue if this is a redundant PCE */ if ((pScratchPCE->sampling_rate_idx != (pVars->prog_config.sampling_rate_idx + 3)) || (pVars->mc_info.upsamplingFactor != 2)) #endif { /* rewind the pointer as implicit channel configuration maybe the case */ pInputStream->usedBits -= (LEN_TAG + LEN_PROFILE + LEN_SAMP_IDX); return (1); /* mismatch cannot happen */ } } /* * Retrieve the number of element lists for each of * front, side, back, lfe, data, and coupling. * * For two-channel stereo or mono, only the data in the front needs * to be saved. However, ALL fields need to be skipped over in some * fashion. Also, the number of elements needs to be temporarily saved * to call get_ele_list(). If that function was changed to pass in * the number of points to be read, the memory set aside inside the * ProgConfig structure could be removed. */ /* * The next six function calls could be combined into one, then use * shifts and masks to retrieve the individual fields. */ temp = get9_n_lessbits(LEN_NUM_ELE, pInputStream); pScratchPCE->front.num_ele = temp; /* Needed only to read in the element list. */ temp = get9_n_lessbits(LEN_NUM_ELE, pInputStream); pScratchPCE->side.num_ele = temp; /* Needed only to read in the element list. */ temp = get9_n_lessbits(LEN_NUM_ELE, pInputStream); pScratchPCE->back.num_ele = temp; /* Needed only to read in the element list. */ temp = get9_n_lessbits(LEN_NUM_LFE, pInputStream); pScratchPCE->lfe.num_ele = temp; /* Needed only to read in the element list. */ temp = get9_n_lessbits(LEN_NUM_DAT, pInputStream); pScratchPCE->data.num_ele = temp; /* Needed only to read in the element list. */ temp = get9_n_lessbits(LEN_NUM_CCE, pInputStream); pScratchPCE->coupling.num_ele = temp; /* * Read in mix down data. * * Whether these fields can be removed and have proper operation * will be determined at a later date. */ /* Read presence of mono_mix */ flag = get1bits(pInputStream);/* LEN_MIX_PRES,*/ pScratchPCE->mono_mix.present = flag; if (flag != FALSE) { temp = get9_n_lessbits(LEN_TAG, pInputStream); pScratchPCE->mono_mix.ele_tag = temp; } /* end if (flag != FALSE) */ /* Read presence of stereo mix */ flag = get1bits(pInputStream); /* LEN_MIX_PRES,*/ pScratchPCE->stereo_mix.present = flag; if (flag != FALSE) { temp = get9_n_lessbits(LEN_TAG, pInputStream); pScratchPCE->stereo_mix.ele_tag = temp; } /* end if (flag != FALSE) */ /* Read presence of matrix mix */ flag = get1bits(pInputStream); /* LEN_MIX_PRES,*/ pScratchPCE->matrix_mix.present = flag; if (flag != FALSE) { temp = get9_n_lessbits(LEN_MMIX_IDX, pInputStream); pScratchPCE->matrix_mix.ele_tag = temp; temp = get1bits(pInputStream); /* LEN_PSUR_ENAB,*/ pScratchPCE->matrix_mix.pseudo_enab = temp; } /* end if (flag != FALSE) */ /* * Get each of the element lists. Only the front information will be * used for the PV decoder, but the usedBits field of pInputStream must * be advanced appropriately. * * This could be optimized by advancing the bit stream for the * elements that do not need to be read. */ get_ele_list(&pScratchPCE->front, pInputStream, TRUE); get_ele_list(&pScratchPCE->side, pInputStream, TRUE); get_ele_list(&pScratchPCE->back, pInputStream, TRUE); get_ele_list(&pScratchPCE->lfe, pInputStream, FALSE); get_ele_list(&pScratchPCE->data, pInputStream, FALSE); get_ele_list(&pScratchPCE->coupling, pInputStream, TRUE); /* * The standard requests a byte alignment before reading in the * comment. This can be done because LEN_COMMENT_BYTES == 8. */ byte_align(pInputStream); numChars = get9_n_lessbits(LEN_COMMENT_BYTES, pInputStream); /* * Ignore the comment - it requires 65 bytes to store (or worse on DSP). * If this field is restored, make sure to append a trailing '\0' */ for (i = numChars; i > 0; i--) { pScratchPCE->comments[i] = (Char) get9_n_lessbits(LEN_BYTE, pInputStream); } /* end for */ if (pVars->current_program < 0) { /* * If this is the first PCE, it becomes the current, regardless of * its tag number. */ pVars->current_program = tag; pVars->mc_info.ch_info[0].tag = 0; } /* end if (pVars->current_program < 0) */ if (tag == (UInt)pVars->current_program) { /* * This branch is reached under two conditions: * 1) This is the first PCE found, it was selected in the above if * block. In all encoders found thus far, the tag value has been * zero. * 2) A PCE has been sent by the encoder with a tag that matches the * the first one sent. It will then be re-read. No encoder found * * Regardless, the temporary PCE will now be copied into the * the one official program configuration. */ /* * Keep adts setting in case of a redundant PCE (only applicable when * using aac-lib own adts parser) */ pScratchPCE->file_is_adts = pVars->prog_config.file_is_adts; pScratchPCE->headerless_frames = pVars->prog_config.headerless_frames; pv_memcpy(&pVars->prog_config, pScratchPCE, sizeof(ProgConfig)); tag = 0; /* * Check that dual-mono does not carry more than 2 tracks, otherwise flag an non-supported error */ if ((pVars->prog_config.front.num_ele > 2) && !(pVars->prog_config.front.ele_is_cpe[tag])) { status = 1; } /* * Check that stereo does not carry more than 1 track, otherwise flag an non-supported error */ if ((pVars->prog_config.front.num_ele > 1) && (pVars->prog_config.front.ele_is_cpe[tag])) { status = 1; } if (!status) { /* enter configuration into MC_Info structure */ status = set_mc_info(&pVars->mc_info, (tMP4AudioObjectType)(pVars->prog_config.profile + 1), pVars->prog_config.sampling_rate_idx, pVars->prog_config.front.ele_tag[tag], pVars->prog_config.front.ele_is_cpe[tag], pVars->winmap, pVars->SFBWidth128); if (pVars->mc_info.upsamplingFactor == 2) { /* * prog_config.sampling_rate_idx corresponds to the aac base layer, * if the upsampling factor is active, then the output frequency needs * to be adjusted accordingly */ pVars->prog_config.sampling_rate_idx -= 3; } } } /* end if (tag == pVars->current_program) */ return (status); }
bool VC2Decoder::parseSeqHeader(char *_idata) { #define EXPECT_VAL(N) { uint32_t d = read_uint(idata, bits);\ if (d != (N)) {\ writelog(LOG_WARN, "%s:%d: Expected %d, got %d when parsing sequence header\n", __FILE__, __LINE__, (N), d); \ }\ } uint8_t *idata = (uint8_t *)_idata; if ((!mSeqHeaderEncoded) || mSeqHeaderEncodedLength == 0 || (memcmp(mSeqHeaderEncoded, idata, mSeqHeaderEncodedLength) != 0)) { writelog(LOG_INFO, "Processing Sequence Header"); int bits = 7; EXPECT_VAL(2); EXPECT_VAL(0); EXPECT_VAL(3); int level = read_uint(idata, bits); if (level != 3 && level != 6) { writelog(LOG_WARN, "%s:%d: Expected 3 or 6, got %d when reading level\n", __FILE__, __LINE__, level); } VC2DecoderParamsInternal params; params = mParams; params.video_format.base_video_format = read_uint(idata, bits); params.video_format.custom_dimensions_flag = read_bool(idata, bits); if (params.video_format.custom_dimensions_flag) { params.video_format.frame_width = read_uint(idata, bits); params.video_format.frame_height = read_uint(idata, bits); } params.video_format.custom_color_diff_format_flag = read_bool(idata, bits); if (params.video_format.custom_color_diff_format_flag) { params.video_format.color_diff_format_index = read_uint(idata, bits); } params.video_format.custom_scan_format_flag = read_bool(idata, bits); if (params.video_format.custom_scan_format_flag) { params.video_format.source_sampling = read_uint(idata, bits); } params.video_format.custom_frame_rate_flag = read_bool(idata, bits); if (params.video_format.custom_frame_rate_flag) { params.video_format.frame_rate_index = read_uint(idata, bits); if (params.video_format.frame_rate_index == 0) { params.video_format.frame_rate_numer = read_uint(idata, bits); params.video_format.frame_rate_denom = read_uint(idata, bits); } } params.video_format.custom_pixel_aspect_ratio_flag = read_bool(idata, bits); if (params.video_format.custom_pixel_aspect_ratio_flag) { params.video_format.pixel_aspect_ratio_index = read_uint(idata, bits); if (params.video_format.pixel_aspect_ratio_index == 0) { params.video_format.pixel_aspect_ratio_numer = read_uint(idata, bits); params.video_format.pixel_aspect_ratio_denom = read_uint(idata, bits); } } params.video_format.custom_clean_area_flag = read_bool(idata, bits); if (params.video_format.custom_clean_area_flag) { params.video_format.clean_width = read_uint(idata, bits); params.video_format.clean_height = read_uint(idata, bits); params.video_format.left_offset = read_uint(idata, bits); params.video_format.top_offset = read_uint(idata, bits); } params.video_format.custom_signal_range_flag = read_bool(idata, bits); if (params.video_format.custom_signal_range_flag) { params.video_format.signal_range_index = read_uint(idata, bits); if (params.video_format.signal_range_index == 0) { params.video_format.luma_offset = read_uint(idata, bits); params.video_format.luma_excursion = read_uint(idata, bits); params.video_format.color_diff_offset = read_uint(idata, bits); params.video_format.color_diff_excursion = read_uint(idata, bits); } } params.video_format.custom_color_spec_flag = read_bool(idata, bits); if (params.video_format.custom_color_spec_flag) { params.video_format.color_spec_index = read_uint(idata, bits); if (params.video_format.color_spec_index == 0) { params.video_format.custom_color_primaries_flag = read_bool(idata, bits); if (params.video_format.custom_color_primaries_flag) { params.video_format.color_primaries_index = read_uint(idata, bits); } params.video_format.custom_color_matrix_flag = read_bool(idata, bits); if (params.video_format.custom_color_matrix_flag) { params.video_format.color_matrix_index = read_uint(idata, bits); } params.video_format.custom_transfer_function_flag = read_bool(idata, bits); if (params.video_format.custom_transfer_function_flag) { params.video_format.transfer_function_index = read_uint(idata, bits); } } } uint32_t picture_coding_mode = read_uint(idata, bits); mInterlaced = (picture_coding_mode != 0); mParams = params; mConfigured = false; setVideoFormat(mParams); byte_align(idata, bits); if (mSeqHeaderEncoded) delete[] mSeqHeaderEncoded; mSeqHeaderEncodedLength = idata - (uint8_t *)_idata; mSeqHeaderEncoded = new uint8_t[mSeqHeaderEncodedLength]; memcpy(mSeqHeaderEncoded, _idata, mSeqHeaderEncodedLength); mSequenceInfo.video_format = params.video_format; mSequenceInfo.picture_coding_mode = picture_coding_mode; mSequenceInfo.sequence_headers_seen++; return true; } return false; }
int VC2Decoder::processTransformParams(uint8_t *_idata, int ilength) { (void)ilength; uint8_t *idata = (uint8_t *)_idata; if (!mConfigured || !mTransformParamsEncoded || mTransformParamsEncodedLength == 0 || memcmp(mTransformParamsEncoded, idata, mTransformParamsEncodedLength)) { writelog(LOG_INFO, "Processing Transform Params"); int bits = 7; VC2DecoderTransformParams transform_params; transform_params.wavelet_index = read_uint(idata, bits); transform_params.wavelet_depth = read_uint(idata, bits); transform_params.slices_x = read_uint(idata, bits); transform_params.slices_y = read_uint(idata, bits); int prefix_bytes = read_uint(idata, bits); int slice_size_scalar = read_uint(idata, bits); transform_params.custom_quant_matrix_flag = read_bool(idata, bits); if (transform_params.custom_quant_matrix_flag) { transform_params.quant_matrix_LL = read_uint(idata, bits); for (int l = 0; l < (int)transform_params.wavelet_depth - 1; l++) { transform_params.quant_matrix_HL[l] = read_uint(idata, bits); transform_params.quant_matrix_LH[l] = read_uint(idata, bits); transform_params.quant_matrix_HH[l] = read_uint(idata, bits); } } byte_align(idata, bits); mTransformParamsEncodedLength = (idata - (uint8_t*)_idata); if (mTransformParamsEncoded) delete[] mTransformParamsEncoded; mTransformParamsEncoded = new uint8_t[mTransformParamsEncodedLength]; memcpy(mTransformParamsEncoded, _idata, mTransformParamsEncodedLength); VC2DecoderParamsInternal params = mParams; params.transform_params = transform_params; params.slice_size_scalar = slice_size_scalar; params.slice_prefix_bytes = prefix_bytes; setParams(params); mSequenceInfo.transform_params = transform_params; mConfigured = true; #ifdef DEBUG { printf("--------------------------------------------------------------------\n"); printf(" Picture Header Stream Data\n"); printf("--------------------------------------------------------------------\n"); uint8_t *data = (uint8_t *)&_idata; for (int y = 0; y * 16 < mTransformParamsEncodedLength; y++) { printf(" "); for (int x = 0; x < 16 && y * 16 + x < mTransformParamsEncodedLength; x++) { printf(" %02x", data[y * 16 + x]); } printf("\n"); } printf("--------------------------------------------------------------------\n"); } #endif } else { idata += mTransformParamsEncodedLength; } return idata - _idata; }
OSCL_EXPORT_REF int32 GetActualAacConfig(uint8* aConfigHeader, uint8* aAudioObjectType, int32* aConfigHeaderSize, uint8* SamplingRateIndex, uint32* NumChannels) { tPVMP4AudioDecoderExternal * iAACDecExt = NULL; UInt initialUsedBits; /* Unsigned for C55x */ tDec_Int_File *pVars; /* Helper pointer */ MC_Info *pMC_Info; /* Helper pointer */ Int status = ERROR_BUFFER_OVERRUN; /* * Allocate memory to decode one AAC frame */ if (!iAACDecExt) { iAACDecExt = new tPVMP4AudioDecoderExternal; if (!iAACDecExt) { return 1; } iAACDecExt->inputBufferCurrentLength = 0; } iAACDecExt->pInputBuffer = aConfigHeader; iAACDecExt->inputBufferMaxLength = PVMP4AUDIODECODER_INBUFSIZE; iAACDecExt->inputBufferUsedLength = 0; iAACDecExt->remainderBits = 0; int32 memreq = PVMP4AudioDecoderGetMemRequirements(); uint8 *pMem = OSCL_ARRAY_NEW(uint8 , memreq); if (pMem == 0) { return KCODEC_INIT_FAILURE; } if (PVMP4AudioDecoderInitLibrary(iAACDecExt, pMem) != 0) { return KCODEC_INIT_FAILURE; } iAACDecExt->inputBufferCurrentLength = *aConfigHeaderSize; /* * Initialize "helper" pointers to existing memory. */ pVars = (tDec_Int_File *)pMem; pMC_Info = &pVars->mc_info; /* * Translate input buffer variables. */ pVars->inputStream.pBuffer = iAACDecExt->pInputBuffer; pVars->inputStream.availableBits = (UInt)(iAACDecExt->inputBufferCurrentLength << INBUF_ARRAY_INDEX_SHIFT); initialUsedBits = (UInt)((iAACDecExt->inputBufferUsedLength << INBUF_ARRAY_INDEX_SHIFT) + iAACDecExt->remainderBits); pVars->inputStream.inputBufferCurrentLength = (UInt)iAACDecExt->inputBufferCurrentLength; pVars->inputStream.usedBits = initialUsedBits; pVars->aacPlusEnabled = true; /* Always enable aacplus decoding */ if (initialUsedBits <= pVars->inputStream.availableBits) { /* * Buffer is not overrun, then * decode the AudioSpecificConfig() structure */ status = get_audio_specific_config(pVars); } byte_align(&pVars->inputStream); *aConfigHeaderSize = (Int32)((pVars->inputStream.usedBits) >> 3); *SamplingRateIndex = pVars->prog_config.sampling_rate_idx; *NumChannels = pVars->mc_info.nch; *aAudioObjectType = pVars->mc_info.audioObjectType; /* * Set parameters based on the explicit information from the * audio specific config */ if (pVars->mc_info.sbrPresentFlag) { if (pVars->mc_info.psPresentFlag) { *NumChannels += 1; } } pVars->status = status; /* * Clear allocated memory */ if (pMem != NULL) { OSCL_ARRAY_DELETE(pMem); pMem = NULL; } if (iAACDecExt) { OSCL_DELETE(iAACDecExt); iAACDecExt = NULL; } return status; }