////////////////////////////////////////////////////////////////////////////
///
/// Examine the PES private data header.
///
/// The data we have been passed may, or may not, be a PES private data area.
/// DVD stream will have the PES private data area but Bluray and broadcast
/// streams will not.
///
/// The private data only has about 10 non-variable bits so it is unsafe to
/// perform a simple signature check. Instead we use the values to predict the
/// location within the stream of the next sync word. If the sync word is found
/// at the predicted location then Collator_PesAudioDtshd_c::FindNextSyncWord()
/// will clear Collator_PesAudio_c::PassPesPrivateDataToElementaryStreamHandler
/// and we will progress as though we had DVD style PES encapsulation.
///
/// Should we ever loose sync then we will automatically switch back to
/// broadcast mode.
///
/// \return Collator status code, CollatorNoError indicates success.
///
CollatorStatus_t Collator_PesAudioDtshd_c::HandlePesPrivateData(unsigned char *PesPrivateData)
{
	BitStreamClass_c Bits;
	if (CollatorState == SeekingSyncWord)
	{
		// ensure the PES private data is passed to the sync detection code
		PassPesPrivateDataToElementaryStreamHandler = true;
		// parse the private data area (assuming that is what we have)
		Bits.SetPointer(PesPrivateData);
		unsigned int SubStreamId = Bits.Get(8);
		unsigned int NumberOfFrameHeaders = Bits.Get(8);
		unsigned int FirstAccessUnitPointer = Bits.Get(16);
		COLLATOR_DEBUG("FirstAccessUnitPointer: %d\n", FirstAccessUnitPointer);
		if (((SubStreamId & 0xf8) != 0x88) ||
				(NumberOfFrameHeaders > 127) ||
				(FirstAccessUnitPointer > 2034))
		{
			MakeDvdSyncWordPrediction(INVALID_PREDICTION);
		}
		else
		{
			// FirstAccessUnitPointer is relative to the final byte of the private data area. Since
			// the private data area will itself be scanned for start codes this means that we must
			// add three to our predicted offset.
			//
			// We also have a special case for FirstAccessUnitPointer equal to zero which means no access units
			// start within the frame. Some streams have been observed where the FirstAccessUnitPointer is zero
			// but the DTS frame synchronization follows the PES header (i.e. FirstAccessUnitPointer should be 1).
			// Such streams are badly authored but working around the problem here is very unlikely to cause
			// harm to broadcast-style streams.
			MakeDvdSyncWordPrediction(((FirstAccessUnitPointer == 0) ? 1 : FirstAccessUnitPointer) + 3);
		}
	}
	return CollatorNoError;
}
////////////////////////////////////////////////////////////////////////////
///
/// Examine the PES private data header.
///
/// The data we have been passed may, or may not, be a PES private data area.
/// DVD stream will have the PES private data area but Bluray and broadcast
/// streams will not.
///
/// The private data only has about 10 non-variable bits so it is unsafe to
/// perform a simple signature check. Instead we use the values to predict the
/// location within the stream of the next sync word. If the sync word is found
/// at the predicted location then Collator_PesAudioEAc3_c::FindNextSyncWord()
/// will clear Collator_PesAudio_c::PassPesPrivateDataToElementaryStreamHandler
/// and we will progress as though we had DVD style PES encapsulation.
///
/// Should we ever loose sync then we will automatically switch back to
/// broadcast mode.
///
/// \return Collator status code, CollatorNoError indicates success.
///
CollatorStatus_t Collator_PesAudioEAc3_c::HandlePesPrivateData(unsigned char *PesPrivateData)
{
  BitStreamClass_c Bits;

  if( CollatorState == SeekingSyncWord)
  {
    // ensure the PES private data is passed to the sync detection code
    PassPesPrivateDataToElementaryStreamHandler = true;

    // parse the private data area (assuming that is what we have)
    Bits.SetPointer(PesPrivateData);
    unsigned int SubStreamId = Bits.Get(8);
    unsigned int NumberOfFrameHeaders = Bits.Get(8);
    unsigned int FirstAccessUnitPointer = Bits.Get(16);

    if( ((SubStreamId & 0xb8) != 0x80) ||
	(NumberOfFrameHeaders > 127) ||
	(FirstAccessUnitPointer > 2034) ||
	(FirstAccessUnitPointer == 0) )
    {
      MakeDvdSyncWordPrediction( INVALID_PREDICTION );
    }
    else
    {
      // FirstAccessUnitPointer is relative to the final byte of the private data area. Since
      // the private data area will itself be scanned for start codes this means that we must
      // add three to our predicted offset.
      MakeDvdSyncWordPrediction( FirstAccessUnitPointer + 3 );
    }
  }

  return CollatorNoError;
}
////////////////////////////////////////////////////////////////////////////
///
/// Examine the PES private data header.
///
/// The data we have been passed may, or may not, be a PES private data area.
/// DVD stream will have the PES private data area but Bluray and broadcast
/// streams will not.
///
/// The private data only has about 10 non-variable bits so it is unsafe to
/// perform a simple signature check. Instead we use the values to predict the
/// location within the stream of the next sync word. If the sync word is found
/// at the predicted location then Collator_PesAudioEAc3_c::FindNextSyncWord()
/// will clear Collator_PesAudio_c::PassPesPrivateDataToElementaryStreamHandler
/// and we will progress as though we had DVD style PES encapsulation.
///
/// Should we ever loose sync then we will automatically switch back to
/// broadcast mode.
///
/// \return Collator status code, CollatorNoError indicates success.
///
CollatorStatus_t Collator_PesAudioMlp_c::HandlePesPrivateData(unsigned char *PesPrivateData)
{
    BitStreamClass_c Bits;

    // ensure the PES private data is passed to the sync detection code
    if( CollatorState == SeekingSyncWord)
    {
        PassPesPrivateDataToElementaryStreamHandler = true;
    }

    // parse the private data area (assuming that is what we have)
    Bits.SetPointer(PesPrivateData);
    unsigned int SubStreamId = Bits.Get(8);
    unsigned int Reserved = Bits.Get(3);
    Bits.FlushUnseen(5 + 8);
    unsigned int PrivateHeaderLength = Bits.Get(8);
    unsigned int FirstAccessUnitPointer = Bits.Get(16);

    COLLATOR_DEBUG("FirstAccessUnitPointer: %d \n", FirstAccessUnitPointer);

    if( ((SubStreamId & 0xff) != 0xA1) ||
            (Reserved != 0) ||
            (FirstAccessUnitPointer > 2034) ||
            (FirstAccessUnitPointer < 5) )
    {
        // there is no private data area of type Packed PCM (MLP in DVD-Audio)
        // check if we have a HD-DVD private data header type
        Bits.SetPointer(PesPrivateData + 1);
        FirstAccessUnitPointer = Bits.Get(16);
        if( ((SubStreamId & 0xf8) != 0xB0) ||
                (FirstAccessUnitPointer > 2025) ||
                (FirstAccessUnitPointer < 2) )
        {
            MakeDvdSyncWordPrediction( INVALID_PREDICTION );
        }
    }
    else
    {
        // FirstAccessUnitPointer is relative to the final byte of the private data area. Since
        // the private data area will itself be scanned for start codes this means that we must
        // add the private header length up to the first_acces_unit_pointer field
        // to our predicted offset
        // see DVD Specifications for Read-Only Disc / PArt 4.Audio Specifications Table 7.2.3.1.2-2
        MakeDvdSyncWordPrediction( FirstAccessUnitPointer - 1 + 6 );
        StuffingBytesLength = PrivateHeaderLength - 6;
    }

    return CollatorNoError;
}
////////////////////////////////////////////////////////////////////////////
///
/// Examine the supplied extension header and determine the length of the extension.
///
/// Extension headers are described in ISO/IEC 13818-3.
///
/// This is a utility function shared by the frame parser and the equivalent
/// collator. Due to its shared nature this is a static method and does not access
/// any information not provided via the function arguments.
///
/// <b>Header format</b>
///
/// <pre>
/// AAAAAAAA AAAABBBB BBBBBBBB BBBBCCCC CCCCCCCD (40 bits)
///
/// Sign            Length          Description
/// 
/// A                12             Frame sync (0x7ff)
/// B                16             CRC
/// C                11             Length in bytes
/// D                1              ID (reserved - set to zero)
/// </pre>
///
/// \return Frame parser status code, FrameParserNoError indicates success.
///
FrameParserStatus_t FrameParser_AudioAvs_c::ParseExtensionHeader(unsigned char *ExtensionHeader, unsigned int *ExtensionLength)
{
    BitStreamClass_c       Bits;
    
    Bits.SetPointer( ExtensionHeader );

    unsigned int frameSync = Bits.Get( 12 );
    
    if( 0x7ff != frameSync )
    {
	// not an printk error because this method is called speculatively
	FRAME_DEBUG( "Invalid start code %x\n", frameSync );
	return FrameParserError;
    }
    
    unsigned int crc = Bits.Get(16);
    
    unsigned int lengthInBytes = Bits.Get(11);

    unsigned int id = Bits.Get(1);
    if( 0 != id )
    {
	FRAME_ERROR( "Invalid (reserved) ID %x\n", id );
	return FrameParserError;
    }
    
    FRAME_DEBUG( "FrameSync %03x  CRC %04x  FrameSize %d  ID %d\n", frameSync, crc, lengthInBytes, id );
    
    *ExtensionLength = lengthInBytes;
    return FrameParserNoError;
}
////////////////////////////////////////////////////////////////////////////
///
/// Examine the supplied frame header and extract the information contained
/// within.
///
/// This is a utility function shared by the frame parser and the equivalent
/// collator. Due to its shared nature this is a static method and does not
/// access any information not provided via the function arguments.
///
/// <b>AC3 Bit stream</b>
///
/// From RFC-4184 (http://www.rfc-editor.org/rfc/rfc4184.txt).
///
/// <pre>
/// AC-3 bit streams are organized into synchronization frames. Each AC-3 frame
/// contains a Sync Information (SI) field, a Bit Stream Information (BSI) field,
/// and 6 audio blocks (AB), each representing 256 PCM samples for each channel.
/// The entire frame represents a time duration of 1536 PCM samples across all
/// coded channels (e.g., 32 msec @ 48kHz sample rate). Figure 1 shows the AC-3
/// frame format.
///
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/// |SI |BSI| AB0 | AB1 | AB2 | AB3 | AB4 | AB5 |AUX|CRC|
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
///
/// Figure 1. AC-3 Frame Format
///
/// The Synchronization Information field contains information needed to acquire
/// and maintain codec synchronization. The Bit Stream Information field contains
/// parameters that describe the coded audio service. Each audio block also
/// contains fields that indicate the use of various coding tools: block switching,
/// dither, coupling, and exponent strategy. They also contain metadata,
/// optionally used to enhance the playback, such as dynamic range control. Figure
/// 2 shows the structure of an AC-3 audio block. Note that field sizes vary
/// depending on the coded data.
///
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/// | Block |Dither |Dynamic |Coupling |Coupling |Exponent |
/// | switch |Flags |Range Ctrl |Strategy |Coordinates |Strategy |
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/// | Exponents | Bit Allocation | Mantissas |
/// | | Parameters | |
/// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
///
/// Figure 2. AC-3 Audio Block Format
///
/// </pre>
///
/// E-AC3 Bit stream decoding is done with the BD following assumptions:
/// Independant and dependant E-AC3 substreams are always tagged as belonging to programme 0
/// (so no need to filter programmes in e-ac3 bitstream)
///
/// \return Frame parser status code, FrameParserNoError indicates success.
///
FrameParserStatus_t FrameParser_AudioEAc3_c::ParseSingleFrameHeader(unsigned char *FrameHeaderBytes,
								    EAc3AudioParsedFrameHeader_t *ParsedFrameHeader,
								    bool SearchForConvSync)
{
	unsigned int SamplingFrequency;
	unsigned int FrameSize = 0;
	unsigned int NbSamples;
	unsigned char Bsid;
	Ac3StreamType_t Type;
	bool convsync = false;
	BitStreamClass_c Bits;
	Bits.SetPointer(FrameHeaderBytes);
	unsigned short SyncWord = Bits.Get(16);
	if (SyncWord != EAC3_START_CODE)
	{
		if (SearchForConvSync == false)
		{
			FRAME_ERROR("Invalid start code 0x%04x\n", SyncWord);
		}
		return FrameParserError;
	}
	Bits.SetPointer(&FrameHeaderBytes[5]);
	Bsid = Bits.Get(5);
	if (Bsid <= 8) // bsid <= 8 and bsid >= 0
	{
		unsigned int RateCode;
		unsigned int FsCode;
		unsigned int BitRate;
		Type = TypeAc3;
		Bits.SetPointer(&FrameHeaderBytes[4]);
		FsCode = Bits.Get(2);
		if (FsCode >= 3)
		{
			FRAME_ERROR("Invalid frequency code\n");
			return FrameParserError;
		}
		RateCode = Bits.Get(6);
		if (RateCode >= 38)
		{
			FRAME_ERROR("Invalid rate code\n");
			return FrameParserError;
		}
		BitRate = AC3Rate[RateCode >> 1];
		SamplingFrequency = EAC3SamplingFreq[FsCode];
		FrameSize = 2 * ((FsCode == 1) ? ((320 * BitRate / 147 + (RateCode & 1))) : (EAC3FsCodeToFrameSize[FsCode] * BitRate));
		NbSamples = 1536;
	}
////////////////////////////////////////////////////////////////////////////
///
/// Examine the supplied frame header and extract the information contained within.
///
/// This is a utility function shared by the frame parser and the equivalent
/// collator. Due to its shared nature this is a static method and does not access
/// any information not provided via the function arguments.
/// \return Frame parser status code, FrameParserNoError indicates success.
///
FrameParserStatus_t FrameParser_AudioMlp_c::ParseSingleFrameHeader( unsigned char *FrameHeaderBytes, 
								    MlpAudioParsedFrameHeader_t *LocalParsedFrameHeader )
{
  BitStreamClass_c Bits;
  int              AccessUnitLength;
  unsigned int     FormatSync;
  
  boolean          IsMajorSync = false;
  boolean          IsFBAType = false;

//

  Bits.SetPointer(FrameHeaderBytes);

  // check_nibble
  Bits.Flush(4);
  // access_unit_length
  AccessUnitLength = 2 * (Bits.Get(12));
  // input_timing
  Bits.FlushUnseen(16);
  
  // format_sync
  FormatSync = Bits.Get(32);

#if 0 //true for mlp only...
  // sanity check
  if (AccessUnitLength > MLP_MAX_ACCESS_UNIT_SIZE)
  {
    FRAME_ERROR("Uncorrect value for Acces Unit Length: %d\n", AccessUnitLength);
    FRAME_DEBUG("Frame header bytes: %2x, %2x, %2x, %2x\n", 
		FrameHeaderBytes[0], FrameHeaderBytes[1], FrameHeaderBytes[2], FrameHeaderBytes[3]);
    return FrameParserError;
  }
#endif

  if (FormatSync == MLP_FORMAT_SYNC_A)
  {
    IsMajorSync = true;
    IsFBAType = true;
  }
  else if (FormatSync == MLP_FORMAT_SYNC_B)
  {
    IsMajorSync = true;
  }

  if (IsMajorSync)
  {
    // the following variables are the exploded Format Info field of
    // a MLP major sync
    MlpSamplingFreq_t FreqId;
    
    // parse format_info field
    if (IsFBAType)
    {
      FreqId = (MlpSamplingFreq_t) Bits.Get(4);
      Bits.FlushUnseen(28);

      // sanity check on sampling frequency
      if (((FreqId > MlpSamplingFreq192) &&
	   (FreqId < MlpSamplingFreq44p1)) ||
	  (FreqId >= MlpSamplingFreqNone))
      {
	FRAME_ERROR( "Invalid Sampling Frequency: %d\n",
		     FreqId);
	return FrameParserError;
      }
    }
    else
    {
      // the format of format_info is the one described inthe DVD-Audio specs.
      MlpWordSize_t WordSizeId1 = (MlpWordSize_t) Bits.Get(4);
      MlpWordSize_t WordSizeId2 = (MlpWordSize_t) Bits.Get(4);

      FreqId = (MlpSamplingFreq_t) Bits.Get(4);
      MlpSamplingFreq_t FreqId2 = (MlpSamplingFreq_t) Bits.Get(4);

      // sanity checks on word sizes
      if ((WordSizeId1 >= MlpWordSizeNone) || 
	  ((WordSizeId2 >= MlpWordSizeNone) && (WordSizeId2 != MLP_DVD_AUDIO_NO_CH_GR2)))
	{
	  FRAME_ERROR( "Invalid Word Size\n");
	  return FrameParserError;
	}
      
      // sanity check on sampling frequencies
      if ( (((FreqId > MlpSamplingFreq192) &&
	     (FreqId < MlpSamplingFreq44p1)) ||
	    (FreqId >= MlpSamplingFreqNone)) ||
	   (((FreqId2 > MlpSamplingFreq96) &&
	     (FreqId2 < MlpSamplingFreq44p1)) ||
	    ((FreqId2 >= MlpSamplingFreq176p4) && (FreqId2 != MLP_DVD_AUDIO_NO_CH_GR2))) )
	{
	  FRAME_ERROR( "Invalid Sampling Frequency\n");
	  return FrameParserError;
	}
      Bits.FlushUnseen(16);
    }

    LocalParsedFrameHeader->NumberOfSamples = MlpSampleCount[FreqId];
    LocalParsedFrameHeader->SamplingFrequency = FreqId;

    {    
      // sanity checks on signature
      unsigned int Signature = Bits.Get(16);
      
      if (Signature != MLP_SIGNATURE)
	{
	  FRAME_ERROR("Wrong signature: %dx\n", Signature);
	  return FrameParserError;
	}
    }
  }

  LocalParsedFrameHeader->IsMajorSync = IsMajorSync;
  LocalParsedFrameHeader->Length = AccessUnitLength;

  //

  if (IsMajorSync)
  {
    FRAME_DEBUG( "IsMajorSync, IsFBAType: %d, Length: %d, NumberOfSamples: %d, Frequency %d\n",
		 IsFBAType, AccessUnitLength, LocalParsedFrameHeader->NumberOfSamples, MlpSamplingFreq[LocalParsedFrameHeader->SamplingFrequency]);
  }
  else
  {
    FRAME_DEBUG( "Length: %d\n",
		 AccessUnitLength);
  }
    
  return FrameParserNoError;
}
FrameParserStatus_t FrameParser_AudioLpcm_c::ParseFrameHeader(unsigned char *FrameHeaderBytes,
		LpcmAudioParsedFrameHeader_t *NextParsedFrameHeader,
		int GivenFrameSize)
{
	BitStreamClass_c       Bits;
	LpcmSamplingFreq_t     AudioSamplingFrequency1;
	LpcmSamplingFreq_t     AudioSamplingFrequency2  = LpcmSamplingFreqNone;
	unsigned int           NbSamples                = 0;
	char                   NumberOfAudioChannels    = 1;
	LpcmWordSize_t         WordSize1;
	LpcmWordSize_t         WordSize2                = LpcmWordSizeNone;
	int                    DynamicRangeControl      = 0;
	int                    ChannelAssignment        = 0xff; // this is the default assignment for the firmware (not exported by the firmware headers)
	bool                   MuteFlag                 = false;
	bool                   EmphasisFlag             = false;
	int                    FirstAccessUnitPointer   = 1;
	int                    AudioFrameNumber         = 0;
	int                    NbAccessUnits            = 1;
	int                    BitShiftChannel2         = 0;
	int                    FrameSize                = 0;
	unsigned int           ExtraPrivateHeaderLength = 0;
	unsigned char          SubStreamId              = 0;
	unsigned int           AudioFrameSize           = 0;
	LpcmStreamType_t StreamType = NextParsedFrameHeader->Type;
	memset(NextParsedFrameHeader, 0, sizeof(LpcmAudioParsedFrameHeader_t));
	Bits.SetPointer(FrameHeaderBytes);
	switch (StreamType)
	{
		case   TypeLpcmDVDVideo:
		{
			///< frame is a DVD video lpcm
			SubStreamId                  = Bits.Get(8);
			NbAccessUnits                = Bits.Get(8);
			FirstAccessUnitPointer       = Bits.Get(16);
			EmphasisFlag                 = Bits.Get(1);
			MuteFlag                     = Bits.Get(1);
			Bits.FlushUnseen(1);
			AudioFrameNumber             = Bits.Get(5);
			WordSize1                    = (LpcmWordSize_t) Bits.Get(2);
			AudioSamplingFrequency1      = (LpcmSamplingFreq_t) Bits.Get(2);
			Bits.FlushUnseen(1);
			NumberOfAudioChannels        = Bits.Get(3) + 1;
			DynamicRangeControl          = Bits.Get(8);
			// sanity checks...
			if ((SubStreamId & LPCM_DVD_VIDEO_SUBSTREAM_ID_MASK) != LPCM_DVD_VIDEO_SUBSTREAM_ID)
			{
				FRAME_ERROR("Invalid sub stream identifier (%x)\n", SubStreamId);
				return FrameParserError;
			}
			if ((AudioFrameNumber >= 20) && (AudioFrameNumber != 31))
			{
				FRAME_ERROR("Invalid audio frame number (%d)\n", AudioFrameNumber);
				return FrameParserError;
			}
			if (WordSize1 > LpcmWordSize24)
			{
				FRAME_ERROR("Invalid quantization word length (%d)\n", WordSize1);
				return FrameParserError;
			}
			if (NumberOfAudioChannels > 8)
			{
				FRAME_ERROR("Invalid number of audio channels (%d)\n", NumberOfAudioChannels);
				return FrameParserError;
			}
			if (AudioSamplingFrequency1 > LpcmSamplingFreq192)
			{
				FRAME_ERROR("Invalid Sampling Frequency (%d)\n", AudioSamplingFrequency1);
				return FrameParserError;
			}
			NbSamples          = LpcmDVDVideoSampleCount[AudioSamplingFrequency1];
			if (WordSize1 == LPCM_DVD_WS_20)
			{
				// 20 bits special case: 4 bits of the sample are loacted at the end of a
				// the first 16 -bit part of two samples ...
				// DVD Specifications for Read Only Disc / Part 3: Video Specifications
				// 5. Video Object / 5.4 Presentation Data / Figure 5.4.2.1-2
				AudioFrameSize = (NbSamples / 2) * NumberOfAudioChannels * 5;
			}
			else
			{
				AudioFrameSize = NbSamples * NumberOfAudioChannels * BytesPerSample[WordSize1];
			}
			break;
		}
		case TypeLpcmDVDAudio:
		{
			///< frame is a DVD audio lpcm
			SubStreamId                  = Bits.Get(8);
			Bits.FlushUnseen(3); // reserved
			// char upc_ean_isrc_number     = Bits.Get(5);
			// char upc_ean_isrc_data       = Bits.Get(8);
			Bits.FlushUnseen(13); // replaces the commented code out above...
			ExtraPrivateHeaderLength     = Bits.Get(8) + LPCM_DVD_AUDIO_PRIVATE_HEADER_LENGTH;
			FirstAccessUnitPointer       = Bits.Get(16);
			EmphasisFlag                 = Bits.Get(1);
			Bits.FlushUnseen(1); // reserved
			//char StereoPlayBackMode      = Bits.Get(1);
			//char DownMixCodeValidity     = Bits.Get(1);
			//char DownMixCode             = Bits.Get(4);
			Bits.FlushUnseen(6); // replaces the commented code out above...
			WordSize1                    = (LpcmWordSize_t)Bits.Get(4);
			WordSize2                    = (LpcmWordSize_t)Bits.Get(4);
			AudioSamplingFrequency1      = (LpcmSamplingFreq_t)Bits.Get(4);
			AudioSamplingFrequency2      = (LpcmSamplingFreq_t)Bits.Get(4);
			Bits.FlushUnseen(4); // reserved
			Bits.FlushUnseen(4); // char MultiChannelType        = Bits.Get(4);
			BitShiftChannel2             = Bits.Get(3);
			ChannelAssignment            = Bits.Get(5);
			DynamicRangeControl          = Bits.Get(8);
			// sanity checks...
			if ((SubStreamId & LPCM_DVD_AUDIO_SUBSTREAM_ID_MASK) != LPCM_DVD_AUDIO_SUBSTREAM_ID)
			{
				FRAME_ERROR("Invalid sub stream identifier (%x)\n", SubStreamId);
				return FrameParserError;
			}
			if ((WordSize1 > LpcmWordSize24) || ((WordSize2 > LpcmWordSize24) && (WordSize2 != LPCM_DVD_AUDIO_NO_CH_GR2)))
			{
				FRAME_ERROR("Invalid quantization word length\n");
				return FrameParserError;
			}
			if (AudioSamplingFrequency1 >= LpcmSamplingFreqLast)
			{
				FRAME_ERROR("Invalid Sampling Frequency (%d)\n", AudioSamplingFrequency1);
				return FrameParserError;
			}
			// the 'not specified' lies outside the range of LpcmSamplingFreqLast - bring it inside
			if (AudioSamplingFrequency2 == LpcmSamplingFreqNotSpecififed)
			{
				AudioSamplingFrequency2 = LpcmSamplingFreqNone;
			}
			if (AudioSamplingFrequency2 >= LpcmSamplingFreqLast)
			{
				FRAME_ERROR("Invalid Sampling Frequency (%d)\n", AudioSamplingFrequency2);
				return FrameParserError;
			}
			if (ChannelAssignment > 20)
			{
				FRAME_ERROR("Invalid channel assignment (%d)\n", ChannelAssignment);
				return FrameParserError;
			}
			//
			int NbSamples1 = LpcmDVDAudioSampleCount[AudioSamplingFrequency1];
			int NbSamples2 = LpcmDVDAudioSampleCount[AudioSamplingFrequency2];
			char NumberOfAudioChannels1 = DVDAudioChannelAssignment2ChannelCount1[ChannelAssignment];
			char NumberOfAudioChannels2 = DVDAudioChannelAssignment2ChannelCount2[ChannelAssignment];
			NumberOfAudioChannels = NumberOfAudioChannels2 + NumberOfAudioChannels1;
			int AudioFrameSize1, AudioFrameSize2;
			NbSamples = NbSamples1;
			if (WordSize1 == LPCM_DVD_WS_20)
			{
				// 20 bits special case: 4 bits of the sample are located at the end of a
				// the first 16 -bit part of two samples ...
				// DVD Specifications for Read Only Disc / Part 3: Video Specifications
				// 5. Video Object / 5.4 Presentation Data / Figure 5.4.2.1-2
				AudioFrameSize1 = (NbSamples1 / 2) * NumberOfAudioChannels1 * 5;
			}
			else
			{
				AudioFrameSize1 = NbSamples1 * NumberOfAudioChannels1 * BytesPerSample[WordSize1];
			}
			if (WordSize2 == LPCM_DVD_WS_20)
			{
				// 20 bits special case: 4 bits of the sample are located at the end of a
				// the first 16 -bit part of two samples ...
				// DVD Specifications for Read Only Disc / Part 3: Video Specifications
				// 5. Video Object / 5.4 Presentation Data / Figure 5.4.2.1-2
				AudioFrameSize2 = (NbSamples2 / 2) * NumberOfAudioChannels2 * 5;
			}
			else
			{
				AudioFrameSize2 = NbSamples2 * NumberOfAudioChannels2 * BytesPerSample[WordSize2];
			}
			AudioFrameSize = AudioFrameSize1 + AudioFrameSize2;
			break;
		}
		case TypeLpcmDVDHD:
		{
			///< frame is a DVD HD lpcm
			SubStreamId                  = Bits.Get(8);
			NbAccessUnits                = Bits.Get(8);
			FirstAccessUnitPointer       = Bits.Get(16);
			EmphasisFlag                 = Bits.Get(1);
			MuteFlag                     = Bits.Get(1);
			AudioFrameNumber             = Bits.Get(5);
			WordSize1                    = (LpcmWordSize_t)Bits.Get(2);
			AudioSamplingFrequency1      = (LpcmSamplingFreq_t)Bits.Get(3);
			NumberOfAudioChannels        = Bits.Get(4) + 1;
			DynamicRangeControl          = Bits.Get(8);
			Bits.FlushUnseen(3); // reserved
			//char DownMixCodeValidity     = Bits.Get(1);
			//char DownMixCode             = Bits.Get(4);
			Bits.FlushUnseen(5); // replaces the commented code out above...
			Bits.FlushUnseen(3); // reserved
			ChannelAssignment            = Bits.Get(5);
			// sanity checks...
			if ((SubStreamId & LPCM_DVD_VIDEO_SUBSTREAM_ID_MASK) != LPCM_DVD_VIDEO_SUBSTREAM_ID)
			{
				FRAME_ERROR("Invalid sub stream identifier (%x)\n", SubStreamId);
				return FrameParserError;
			}
			if ((AudioFrameNumber >= 20) && (AudioFrameNumber != 31))
			{
				FRAME_ERROR("Invalid audio frame number (%d)\n", AudioFrameNumber);
				return FrameParserError;
			}
			if (WordSize1 > LpcmWordSize24)
			{
				FRAME_ERROR("Invalid quantization word length (%d)\n", WordSize1);
				return FrameParserError;
			}
			if (NumberOfAudioChannels > 8)
			{
				FRAME_ERROR("Invalid number of audio channels (%d)\n", NumberOfAudioChannels);
				return FrameParserError;
			}
			if (AudioSamplingFrequency1 > LpcmSamplingFreq192)
			{
				FRAME_ERROR("Invalid Sampling Frequency (%d)\n", AudioSamplingFrequency1);
				return FrameParserError;
			}
			NbSamples = LpcmDVDAudioSampleCount[AudioSamplingFrequency1];
			if (WordSize1 == LPCM_DVD_WS_20)
			{
				// 20 bits special case: 4 bits of the sample are located at the end of a
				// the first 16 -bit part of two samples ...
				// DVD Specifications for Read Only Disc / Part 3: Video Specifications
				// 5. Video Object / 5.4 Presentation Data / Figure 5.4.2.1-2
				AudioFrameSize = (NbSamples / 2) * NumberOfAudioChannels * 5;
			}
			else
			{
				AudioFrameSize = NbSamples * NumberOfAudioChannels * BytesPerSample[WordSize1];
			}
			break;
		}
		case TypeLpcmDVDBD:
		case TypeLpcmSPDIFIN:
		{
			unsigned int  Sfreq;
			unsigned char BitsPerSample;
			unsigned int  BytesPerSample;
			///< frame is a BD lpcm or SPDIFIN
			FrameSize          = Bits.Get(16);
			ChannelAssignment  = Bits.Get(4);
			Sfreq              = Bits.Get(4);
			BitsPerSample      = Bits.Get(2);
			if ((ChannelAssignment == 0) || (ChannelAssignment > 11))
			{
				FRAME_ERROR("Invalid channel assignment (%d)\n", ChannelAssignment);
				return FrameParserError;
			}
			if (StreamType == TypeLpcmSPDIFIN)
			{
				EmphasisFlag            = Bits.Get(1); // Reuse StartFlag from BD spec.
				WordSize1               = (BitsPerSample) ? /* BD Definition */ (LpcmWordSize_t)(BitsPerSample - 1) : /* SPDIFIN extension */ LpcmWordSize32;
				NumberOfAudioChannels   = 2;
				// Sfreq is a 4-bit value and cannot possibly overflow the tables - invalid comes back None
				AudioSamplingFrequency1 = LpcmSpdifin2DVDSamplingFreq[Sfreq];
				BytesPerSample          = (WordSize1 < LpcmWordSize24) ? (WordSize1 + 2) : (WordSize1 + 1); /* As per BD : if WordSize == WS24 or WS20 then 3 bytes per sample */
				if (AudioSamplingFrequency1 == LpcmSamplingFreqNone)
				{
					FRAME_ERROR("Invalid sampling frequency (Sfreq %d)\n", Sfreq);
					return FrameParserError;
				}
			}
			else
			{
				// Sfreq is a 4-bit value and cannot possibly overflow the tables - invalid comes back None
				AudioSamplingFrequency1     = LpcmBD2DVDSamplingFreq[Sfreq];
				if (AudioSamplingFrequency1 == LpcmSamplingFreqNone)
				{
					FRAME_ERROR("Invalid sampling frequency (Sfreq %d)\n", Sfreq);
					return FrameParserError;
				}
				if (BitsPerSample == 0)
				{
					FRAME_ERROR("Invalid bits per sample value\n");
					return FrameParserError;
				}
				WordSize1         = (LpcmWordSize_t)(BitsPerSample - 1);
				BytesPerSample    = ((BitsPerSample >= 2) ? 3 : 2);
			}
			NumberOfAudioChannels = BDChannelAssignment2ChannelCount[ChannelAssignment];
			NbSamples             = FrameSize / (BytesPerSample * NumberOfAudioChannels);
			AudioFrameSize        = FrameSize;
			break;
		}
		default:
			// should not occur
			FRAME_ERROR("Internal Error: Unknown LPCM Frame type\n");
			return FrameParserError;
	}
	// sanity check on the first access unit pointer: is it outside the packet?
	if ((FirstAccessUnitPointer + FirstAccessUnitOffset[StreamType]) > GivenFrameSize)
	{
		FRAME_ERROR("Invalid First Acces Unit Pointer value (%d)\n", FirstAccessUnitPointer);
		return FrameParserError;
	}
	else if ((FirstAccessUnitPointer + FirstAccessUnitOffset[StreamType]) < AudioPesPrivateDataLength[StreamType])
	{
		FRAME_ERROR("Invalid FirstAccessUnitPointer (%d) + FirstAccessUnitOffset (%d) must be >= AudioPesPrivateDataLength (%d)\n",
					FirstAccessUnitPointer, FirstAccessUnitOffset[StreamType], AudioPesPrivateDataLength[StreamType]);
		return FrameParserError;
		//FirstAccessUnitPointer = AudioPesPrivateDataLength[StreamType] - FirstAccessUnitOffset[StreamType];
	}
	// compute the real number of access units, according to the frame size
	{
		unsigned int Payload          = (GivenFrameSize - (FirstAccessUnitPointer + FirstAccessUnitOffset[StreamType]));
		FRAME_DEBUG("Payload %d , GivenFrameSize %d\n",
					Payload, GivenFrameSize);
		// take ceiled number of frame header
		NbAccessUnits     = Payload / AudioFrameSize;
		NbAccessUnits     = ((NbAccessUnits * AudioFrameSize) < Payload) ? NbAccessUnits + 1 : NbAccessUnits;
		FrameSize =  NbAccessUnits * AudioFrameSize;
		NbSamples *= NbAccessUnits;
	}
	if (StreamType != TypeLpcmSPDIFIN)
	{
		FRAME_DEBUG("SamplingFreq %d Hz, FrameSize %d, Type % d, WordSize %d , Aud Frame Id %d\n",
					LpcmDVDSamplingFreq[AudioSamplingFrequency1], FrameSize, StreamType, WordSize1, AudioFrameNumber);
		FRAME_DEBUG("FirstAccessUnitPointer %d, NbAccessUnits %d,  Nb Channels % d, Nb Samples %d \n",
					FirstAccessUnitPointer, NbAccessUnits, NumberOfAudioChannels, NbSamples);
		if (StreamType == TypeLpcmDVDAudio)
		{
			FRAME_DEBUG("GR2 properties: SamplingFreq %d Hz, WordSize2 %d\n",
						LpcmDVDSamplingFreq[AudioSamplingFrequency2], WordSize2);
		}
	}
	else
	{
		FRAME_DEBUG("SamplingFreq %d Hz, FrameSize %d, Type % d, WordSize %d , Aud Frame Id %d\n",
					LpcmDVDSamplingFreq[AudioSamplingFrequency1], FrameSize, StreamType, WordSize1, AudioFrameNumber);
		FRAME_DEBUG("FirstAccessUnitPointer %d, NbAccessUnits %d,  Nb Channels % d, Nb Samples %d \n",
					FirstAccessUnitPointer, NbAccessUnits, NumberOfAudioChannels, NbSamples);
	}
	// we will send a whole audio frame
	NextParsedFrameHeader->Type                = StreamType;
	NextParsedFrameHeader->SamplingFrequency1  = AudioSamplingFrequency1;
	NextParsedFrameHeader->SamplingFrequency2  = AudioSamplingFrequency2;
	NextParsedFrameHeader->NumberOfSamples     = NbSamples;
	NextParsedFrameHeader->NumberOfChannels    = NumberOfAudioChannels;
	NextParsedFrameHeader->Length              = FrameSize;
	NextParsedFrameHeader->WordSize1           = WordSize1;
	NextParsedFrameHeader->WordSize2           = WordSize2;
	NextParsedFrameHeader->NbAccessUnits       = NbAccessUnits;
	NextParsedFrameHeader->FirstAccessUnitPointer = FirstAccessUnitPointer;
	NextParsedFrameHeader->DrcCode             = DynamicRangeControl;
	NextParsedFrameHeader->BitShiftChannel2    = BitShiftChannel2;
	NextParsedFrameHeader->EmphasisFlag        = EmphasisFlag;
	NextParsedFrameHeader->MuteFlag            = MuteFlag;
	NextParsedFrameHeader->PrivateHeaderLength = ((StreamType == TypeLpcmDVDAudio) ? ExtraPrivateHeaderLength : AudioPesPrivateDataLength[StreamType]);
	NextParsedFrameHeader->AudioFrameNumber    = AudioFrameNumber;
	NextParsedFrameHeader->SubStreamId         = SubStreamId;
	NextParsedFrameHeader->ChannelAssignment   = ChannelAssignment;
	NextParsedFrameHeader->AudioFrameSize      = AudioFrameSize;
	return FrameParserNoError;
}
////////////////////////////////////////////////////////////////////////////
///
/// Examine the supplied frame header and extract the information contained within.
///
/// This is a utility function shared by the frame parser and the equivalent
/// collator. Due to its shared nature this is a static method and does not access
/// any information not provided via the function arguments.
///
/// ** AAC format **
///
/// AAAAAAAA AAAABCCD EEFFFFGH HHIJKLMM
/// MMMMMMMM MMMNNNNN NNNNNNOO ........
///
/// Sign            Length          Position         Description
///
/// A                12             (31-20)          Sync code
/// B                 1              (19)            ID
/// C                 2             (18-17)          layer
/// D                 1              (16)            protect absent
/// E                 2             (15-14)          profile
/// F                 4             (13-10)          sample freq index
/// G                 1              (9)             private
/// H                 3             (8-6)            channel config
/// I                 1              (5)             original/copy
/// J                 1              (4)             home
/// K                 1              (3)             copyright id
/// L                 1              (2)             copyright start
/// M                 13         (1-0,31-21)         frame length
/// N                 11           (20-10)           adts buffer fullness
/// O                 2             (9-8)            num of raw data blocks in frame
///
/////////////////////////////////////////////////////////////////////////////////////////
//
//      Private - parse a frame header (NOTE do we already know it has a valid sync word?).
//
///
/// \return Frame parser status code, FrameParserNoError indicates success.
///
FrameParserStatus_t FrameParser_AudioAac_c::ParseFrameHeader(unsigned char *FrameHeaderBytes,
		AacAudioParsedFrameHeader_t *ParsedFrameHeader,
		int AvailableBytes,
		AacFrameParsingPurpose_t Action,
		bool EnableHeaderUnplayableErrors)
{
	unsigned int    SamplingFrequency = 0;
	unsigned int    SampleCount = 0;
	unsigned int    FrameSize = 0;
	BitStreamClass_c    Bits;
	AacFormatType_t Type;
	Bits.SetPointer(FrameHeaderBytes);
	unsigned int Sync = Bits.Get(11);
	if (Sync == AAC_AUDIO_LOAS_ASS_SYNC_WORD)
	{
		Type = AAC_AUDIO_LOAS_FORMAT;
		FrameSize = Bits.Get(13) + AAC_LOAS_ASS_SYNC_LENGTH_HEADER_SIZE;
		if (FrameSize > AAC_LOAS_ASS_MAX_FRAME_SIZE)
		{
			FRAME_COND_ERROR("Invalid frame size (%d bytes)\n", FrameSize);
			return FrameParserError;
		}
		if (FrameParserNoError != FrameParser_AudioAac_c::ParseAudioMuxElementConfig(&Bits,
				&SamplingFrequency,
				&SampleCount,
				AvailableBytes - AAC_LOAS_ASS_SYNC_LENGTH_HEADER_SIZE,
				Action))
		{
			return FrameParserError;
		}
	}
	else
	{
		// get more bits
		Sync |= Bits.Get(1) << 11;
		if (Sync == AAC_AUDIO_ADTS_SYNC_WORD)
		{
			Type = AAC_AUDIO_ADTS_FORMAT;
			Bits.FlushUnseen(1); //bool ID = Bits.Get(1);
			unsigned char Layer = Bits.Get(2);
			if (Layer != 0)
			{
				FRAME_COND_ERROR("Invalid AAC layer %d\n", Layer);
				return FrameParserError;
			}
			Bits.FlushUnseen(1); //protection_absent;
			unsigned int profile_ObjectType = Bits.Get(2);
			if ((profile_ObjectType + 1) != AAC_AUDIO_PROFILE_LC)
			{
				if (EnableHeaderUnplayableErrors)
				{
					FRAME_COND_ERROR("Unsupported AAC profile in ADTS: %d\n", profile_ObjectType);
					return FrameParserHeaderUnplayable;
				}
			}
			unsigned char sampling_frequency_index = Bits.Get(4);
			if (sampling_frequency_index > AAC_MAX_SAMPLE_RATE_IDX)
			{
				FRAME_COND_ERROR("Invalid sampling frequency index %d\n", sampling_frequency_index);
				return FrameParserError;
			}
			// multiply the sampling freq by two in case a sbr object is present
			SamplingFrequency = aac_sample_rates[sampling_frequency_index] * 2;
			Bits.FlushUnseen(1); //private_bit
			unsigned char channel_configuration = Bits.Get(3);
			if (channel_configuration > AAC_MAX_CHANNELS_IDX)
			{
				FRAME_COND_ERROR("Invalid channel configuration %d\n", channel_configuration);
				return FrameParserError;
			}
			Bits.FlushUnseen(1 + 1 + 1 + 1); //original/copy, home, copyright_identification_bit, copyright_identification_start
			FrameSize = Bits.Get(13); // aac_frame_length
			if (FrameSize < AAC_ADTS_MIN_FRAME_SIZE)
			{
				FRAME_COND_ERROR("Invalid frame size (%d bytes)\n", FrameSize);
				return FrameParserError;
			}
			Bits.FlushUnseen(11); //adts_buffer_fullness
			unsigned int no_raw_data_blocks_in_frame = Bits.Get(2);
			// multiple the sample count by two in case a sbr object is present
			SampleCount         = (no_raw_data_blocks_in_frame + 1) * 1024 * 2 ;
		}
		else
		{
			Sync |= Bits.Get(4) << 12;
			if (Sync == AAC_AUDIO_LOAS_EPASS_SYNC_WORD)
			{
				Type = AAC_AUDIO_LOAS_FORMAT;
				Bits.FlushUnseen(4); //futureUse
				FrameSize = Bits.Get(13) + AAC_LOAS_EP_ASS_HEADER_SIZE;
				// continue the parsing to get more info about the frame
				Bits.FlushUnseen(5 + 18); //frameCounter, headerParity
				AvailableBytes -= AAC_LOAS_EP_ASS_HEADER_SIZE;
				// now parse the EPMuxElement...
				bool epUsePreviousMuxConfig = Bits.Get(1);
				Bits.FlushUnseen(2); //epUsePreviousMuxConfigParity
				if (!epUsePreviousMuxConfig)
				{
					unsigned int epSpecificConfigLength = Bits.Get(10);
					unsigned int epSpecificConfigLengthParity = Bits.Get(11);
					AvailableBytes -= 3;
					if (AvailableBytes > 0)
					{
						Bits.FlushUnseen(epSpecificConfigLength * 8); //ErrorProtectionSpecificConfig
						Bits.FlushUnseen(epSpecificConfigLengthParity * 8); //ErrorProtectionSpecificConfigParity
						AvailableBytes -= epSpecificConfigLength + epSpecificConfigLengthParity;
					}
				}
				else
				{
					Bits.FlushUnseen(5); //ByteAlign()
					AvailableBytes -= 1;
				}
				if (FrameParserNoError != FrameParser_AudioAac_c::ParseAudioMuxElementConfig(&Bits,
						&SamplingFrequency,
						&SampleCount,
						AvailableBytes,
						Action))
				{
					return FrameParserError;
				}
			}
			else
			{
				Sync |= Bits.Get(16) << 16;
				if (Sync == AAC_AUDIO_ADIF_SYNC_WORD)
				{
					Type = AAC_AUDIO_ADIF_FORMAT;
					FRAME_COND_ERROR("The AAC ADIF format is not supported yet!\n");
					return FrameParserHeaderUnplayable;
				}
				else
				{
					FRAME_COND_ERROR("Unknown Synchronization (0x%x)\n", Sync);
					return FrameParserError;
				}
			}
		}
	}
	ParsedFrameHeader->SamplingFrequency = SamplingFrequency;
	ParsedFrameHeader->NumberOfSamples   = SampleCount;
	ParsedFrameHeader->Length            = FrameSize;
	ParsedFrameHeader->Type              = Type;
	return FrameParserNoError;
}