Пример #1
0
bool CCoreAudioAE::OpenCoreAudio(unsigned int sampleRate, bool forceRaw,
  enum AEDataFormat rawDataFormat)
{

  // remove any deleted streams
  CSingleLock streamLock(m_streamLock);
  for (StreamList::iterator itt = m_streams.begin(); itt != m_streams.end();)
  {
    CCoreAudioAEStream *stream = *itt;
    if (stream->IsDestroyed())
    {
      itt = m_streams.erase(itt);
      delete stream;
      continue;
    }
    else
    {
      // close all converter
      stream->CloseConverter();
    }
    ++itt;
  }

  /* override the sample rate based on the oldest stream if there is one */
  if (!m_streams.empty())
    sampleRate = m_streams.front()->GetSampleRate();

  if (forceRaw)
    m_rawPassthrough = true;
  else
    m_rawPassthrough = !m_streams.empty() && m_streams.front()->IsRaw();
  streamLock.Leave();
    
  if (m_rawPassthrough)
    CLog::Log(LOGINFO, "CCoreAudioAE::OpenCoreAudio - RAW passthrough enabled");

  std::string m_outputDevice =  g_guiSettings.GetString("audiooutput.audiodevice");

  // on iOS devices we set fixed to two channels.
  m_stdChLayout = AE_CH_LAYOUT_2_0;
#if defined(TARGET_DARWIN_OSX)
  switch (g_guiSettings.GetInt("audiooutput.channellayout"))
  {
    default:
    case  0: m_stdChLayout = AE_CH_LAYOUT_2_0; break; /* do not allow 1_0 output */
    case  1: m_stdChLayout = AE_CH_LAYOUT_2_0; break;
    case  2: m_stdChLayout = AE_CH_LAYOUT_2_1; break;
    case  3: m_stdChLayout = AE_CH_LAYOUT_3_0; break;
    case  4: m_stdChLayout = AE_CH_LAYOUT_3_1; break;
    case  5: m_stdChLayout = AE_CH_LAYOUT_4_0; break;
    case  6: m_stdChLayout = AE_CH_LAYOUT_4_1; break;
    case  7: m_stdChLayout = AE_CH_LAYOUT_5_0; break;
    case  8: m_stdChLayout = AE_CH_LAYOUT_5_1; break;
    case  9: m_stdChLayout = AE_CH_LAYOUT_7_0; break;
    case 10: m_stdChLayout = AE_CH_LAYOUT_7_1; break;
  }
#endif
  // force optical/coax to 2.0 output channels
  if (!m_rawPassthrough && g_guiSettings.GetInt("audiooutput.mode") == AUDIO_IEC958)
    m_stdChLayout = AE_CH_LAYOUT_2_0;

  // setup the desired format
  m_format.m_channelLayout = CAEChannelInfo(m_stdChLayout);

  // if there is an audio resample rate set, use it.
  if (g_advancedSettings.m_audioResample && !m_rawPassthrough)
  {
    sampleRate = g_advancedSettings.m_audioResample;
    CLog::Log(LOGINFO, "CCoreAudioAE::passthrough - Forcing samplerate to %d", sampleRate);
  }

  if (m_rawPassthrough)
  {
    switch (rawDataFormat)
    {
      case AE_FMT_AC3:
      case AE_FMT_DTS:
        m_format.m_channelLayout = CAEChannelInfo(AE_CH_LAYOUT_2_0);
        m_format.m_sampleRate   = 48000;
        m_format.m_dataFormat   = AE_FMT_S16NE;
        break;
      case AE_FMT_EAC3:
        m_format.m_channelLayout = CAEChannelInfo(AE_CH_LAYOUT_2_0);
        m_format.m_sampleRate   = 192000;
        m_format.m_dataFormat   = AE_FMT_S16NE;
        break;
      case AE_FMT_DTSHD:
      case AE_FMT_TRUEHD:
        m_format.m_channelLayout = CAEChannelInfo(AE_CH_LAYOUT_7_1);
        m_format.m_sampleRate   = 192000;
        m_format.m_dataFormat   = AE_FMT_S16NE;
        break;
      case AE_FMT_LPCM:
        m_format.m_channelLayout = CAEChannelInfo(AE_CH_LAYOUT_7_1);
        m_format.m_sampleRate   = sampleRate;
        m_format.m_dataFormat   = AE_FMT_FLOAT;
        break;
      default:
        break;
    }
  }
  else
  {
    m_format.m_sampleRate       = sampleRate;
    m_format.m_channelLayout    = CAEChannelInfo(m_stdChLayout);
    m_format.m_dataFormat       = AE_FMT_FLOAT;
  }

  m_format.m_encodedRate = 0;

  if (m_outputDevice.empty())
    m_outputDevice = "default";

  AEAudioFormat initformat = m_format;

  // initialize audio hardware
  m_Initialized = HAL->Initialize(this, m_rawPassthrough, initformat, rawDataFormat, m_outputDevice, m_volume);

  unsigned int bps         = CAEUtil::DataFormatToBits(m_format.m_dataFormat);
  m_chLayoutCount          = m_format.m_channelLayout.Count();
  m_format.m_frameSize     = (bps>>3) * m_chLayoutCount; //initformat.m_frameSize;
  //m_format.m_frames        = (unsigned int)(((float)m_format.m_sampleRate / 1000.0f) * (float)DELAY_FRAME_TIME);
  //m_format.m_frameSamples  = m_format.m_frames * m_format.m_channelLayout.Count();

  if ((initformat.m_channelLayout.Count() != m_chLayoutCount) && !m_rawPassthrough)
  {
    /* readjust parameters. hardware didn't accept channel count*/
    CLog::Log(LOGINFO, "CCoreAudioAE::Initialize: Setup channels (%d) greater than possible hardware channels (%d).",
              m_chLayoutCount, initformat.m_channelLayout.Count());

    m_format.m_channelLayout = CAEChannelInfo(initformat.m_channelLayout);
    m_chLayoutCount          = m_format.m_channelLayout.Count();
    m_format.m_frameSize     = (bps>>3) * m_chLayoutCount; //initformat.m_frameSize;
    //m_format.m_frameSamples  = m_format.m_frames * m_format.m_channelLayout.Count();
  }
Пример #2
0
// would be called by parser thread
bool
FLVParser::parseNextTag(bool index_only)
{
	// lock the stream while reading from it, so actionscript
	// won't mess with the parser on seek  or on getBytesLoaded
	boost::mutex::scoped_lock streamLock(_streamMutex);

	if ( index_only && _indexingCompleted ) return false; 
	if ( _parsingComplete ) return false;

	if ( _seekRequest )
	{
		clearBuffers();
		_seekRequest = false;
	}

	boost::uint64_t& position = index_only ? _nextPosToIndex : _lastParsedPosition;
	bool& completed = index_only ? _indexingCompleted : _parsingComplete;

	//log_debug("parseNextTag: _lastParsedPosition:%d, _nextPosToIndex:%d, index_only:%d", _lastParsedPosition, _nextPosToIndex, index_only);

	unsigned long thisTagPos = position;

	// Seek to next frame and skip the tag size 
	//log_debug("FLVParser::parseNextTag seeking to %d", thisTagPos+4);
	if (!_stream->seek(thisTagPos+4))
	{
            log_error(_("FLVParser::parseNextTag: can't seek to %d"),
                      thisTagPos+4);

		completed = true;
		return false;
	}
	//log_debug("FLVParser::parseNextTag seeked to %d", thisTagPos+4);

	// Read the tag info
	boost::uint8_t chunk[12];
	int actuallyRead = _stream->read(chunk, 12);
	if ( actuallyRead < 12 )
	{
		if ( actuallyRead )
                    log_error(_("FLVParser::parseNextTag: can't read tag info "
                                "(needed 12 bytes, only got %d)"), actuallyRead);
		// else { assert(_stream->eof(); } ?

		completed = true;

        // update bytes loaded
        boost::mutex::scoped_lock lock(_bytesLoadedMutex);
		_bytesLoaded = _stream->tell(); 
		return false;
	}

	FLVTag flvtag(chunk);

    // May be _lastParsedPosition OR _nextPosToIndex
    position += 15 + flvtag.body_size; 

	bool doIndex = (_lastParsedPosition+4 > _nextPosToIndex) || index_only;
	if ( _lastParsedPosition > _nextPosToIndex )
	{
		//log_debug("::parseNextTag setting _nextPosToIndex=%d", _lastParsedPosition+4);
		_nextPosToIndex = _lastParsedPosition;
	}

	if ( position > _bytesLoaded ) {
		boost::mutex::scoped_lock lock(_bytesLoadedMutex);
		_bytesLoaded = position;
	}

	// check for empty tag
	if (flvtag.body_size == 0) return true;

	if (flvtag.type == FLV_AUDIO_TAG)
	{
		FLVAudioTag audiotag(chunk[11]);

		if (doIndex) {
			indexAudioTag(flvtag, thisTagPos);
			if (index_only) {
				return true;
			}
		}


		std::auto_ptr<EncodedAudioFrame> frame = 
            parseAudioTag(flvtag, audiotag, thisTagPos);
		if (!frame.get()) {
			return false;
		}
		// Release the stream lock 
		// *before* pushing the frame as that 
		// might block us waiting for buffers flush
		// the _qMutex...
		// We've done using the stream for this tag parsing anyway
		streamLock.unlock();
		pushEncodedAudioFrame(frame);
	}
	else if (flvtag.type == FLV_VIDEO_TAG)
	{
		FLVVideoTag videotag(chunk[11]);

		if (doIndex) {
			indexVideoTag(flvtag, videotag, thisTagPos);
			if (index_only) {
				return true;
			}
		}

		std::auto_ptr<EncodedVideoFrame> frame = 
            parseVideoTag(flvtag, videotag, thisTagPos);
		if (!frame.get()) {
			return false;
		}

		// Release the stream lock 
		// *before* pushing the frame as that 
		// might block us waiting for buffers flush
		// the _qMutex...
		streamLock.unlock();
		pushEncodedVideoFrame(frame);

	}
	else if (flvtag.type == FLV_META_TAG)
	{
		if ( chunk[11] != 2 )
		{
			// ::processTags relies on the first AMF0 value being a string...
			log_unimpl(_("First byte of FLV_META_TAG is %d, expected "
                        "0x02 (STRING AMF0 type)"),
                    static_cast<int>(chunk[11]));
		}
		// Extract information from the meta tag
		std::auto_ptr<SimpleBuffer> metaTag(new SimpleBuffer(
                    flvtag.body_size-1));
		size_t actuallyRead = _stream->read(metaTag->data(),
                flvtag.body_size - 1);

        if ( actuallyRead < flvtag.body_size-1 )
		{
                    log_error(_("FLVParser::parseNextTag: can't read metaTag (%d) "
                                "body (needed %d bytes, only got %d)"),
				FLV_META_TAG, flvtag.body_size, actuallyRead);
			return false;
		}
		metaTag->resize(actuallyRead);

		boost::uint32_t terminus = getUInt24(metaTag->data() +
                actuallyRead - 3);

        if (terminus != 9) {
			log_error(_("Corrupt FLV: Meta tag unterminated!"));
		}

		boost::mutex::scoped_lock lock(_metaTagsMutex);
		_metaTags.insert(std::make_pair(flvtag.timestamp, MetaTags::mapped_type(metaTag)));
	}
	else
	{
		log_error(_("FLVParser::parseNextTag: unknown FLV tag type %d"),
                (int)chunk[0]);
		return false;
	}

	_stream->read(chunk, 4);
	boost::uint32_t prevtagsize = chunk[0] << 24 | chunk[1] << 16 |
        chunk[2] << 8 | chunk[3];
	if (prevtagsize != flvtag.body_size + 11) {
		log_error(_("Corrupt FLV: previous tag size record (%1%) unexpected "
                    "(actual size: %2%)"), prevtagsize, flvtag.body_size + 11);
	}

	return true;
}