Example #1
0
HRESULT CStreamParser::ParsePlanarPCM(Packet *pPacket)
{
  CMediaType mt = m_pPin->GetActiveMediaType();

  WORD nChannels = 0, nBPS = 0, nBlockAlign = 0;
  audioFormatTypeHandler(mt.Format(), mt.FormatType(), nullptr, &nChannels, &nBPS, &nBlockAlign, nullptr);

  // Mono needs no special handling
  if (nChannels == 1)
    return Queue(pPacket);

  Packet *out = new Packet();
  out->CopyProperties(pPacket);
  out->SetDataSize(pPacket->GetDataSize());

  int nBytesPerChannel = nBPS / 8;
  int nAudioBlocks = pPacket->GetDataSize() / nChannels;
  BYTE *out_data = out->GetData();
  const BYTE *in_data = pPacket->GetData();

  for (int i = 0; i < nAudioBlocks; i += nBytesPerChannel) {
    // interleave the channels into audio blocks
    for (int c = 0; c < nChannels; c++) {
      memcpy(out_data + (c * nBytesPerChannel), in_data + (nAudioBlocks * c), nBytesPerChannel);
    }
    // Skip to the next output block
    out_data += nChannels * nBytesPerChannel;

    // skip to the next input sample
    in_data += nBytesPerChannel;
  }

  return Queue(out);
}
Example #2
0
Packet* CClip::GenerateSparseVideo(REFERENCE_TIME rtStart)
{
  Packet * ret = NULL;
  if (!SparseVideoAvailable() && m_vecClipVideoPackets.size()==0) return ret;
  if (m_pSparseVideoPacket != NULL)
  {
    if (m_vecClipVideoPackets.size()>0)
    {
      Packet * pBDPacket = m_vecClipVideoPackets[0];
      if (m_pSparseVideoPacket->rtStart + ONE_SECOND > pBDPacket->rtStart)
      {
        ivecVideoBuffers it = m_vecClipVideoPackets.begin();
        if ((*it)->rtStart !=Packet::INVALID_TIME)
        {
          delete m_pSparseVideoPacket;
          m_pSparseVideoPacket=*it;
          it=m_vecClipVideoPackets.erase(it);
        }
        else
        {
          it=m_vecClipVideoPackets.erase(it);
          if (!m_vecClipVideoPackets.size()) sparseVideo = false;
          return *it;
        }
      }
      else
      {
        m_pSparseVideoPacket->rtStart += HALF_SECOND/5;
        m_pSparseVideoPacket->rtStop += HALF_SECOND/5;
        m_pSparseVideoPacket->bFakeData = true;
      }
    }
    else
    {
      m_pSparseVideoPacket->rtStart += HALF_SECOND/5;
      m_pSparseVideoPacket->rtStop += HALF_SECOND/5;
      m_pSparseVideoPacket->bFakeData = true;
    }
    ret = new Packet();
    ret->SetData(m_pSparseVideoPacket->GetData(),m_pSparseVideoPacket->GetDataSize());
    ret->CopyProperties(*m_pSparseVideoPacket);
  }
  else
  {
    if (m_vecClipVideoPackets.size()>0)
    {
      ivecVideoBuffers it = m_vecClipVideoPackets.begin();
      if ((*it)->rtStart !=Packet::INVALID_TIME)
      {
        m_pSparseVideoPacket=*it;
        it=m_vecClipVideoPackets.erase(it);
        ret = new Packet();
        ret->SetData(m_pSparseVideoPacket->GetData(),m_pSparseVideoPacket->GetDataSize());
        ret->CopyProperties(*m_pSparseVideoPacket);
      }
      else
      {
        it=m_vecClipVideoPackets.erase(it);
        if (!m_vecClipVideoPackets.size()) sparseVideo = false;
        return *it;
      }
    }
  }
  return ret;
}