Exemplo n.º 1
0
static uint32_t GetChannelMap(const CAEChannelInfo &channelLayout, bool passthrough)
{
  unsigned int channels = channelLayout.Count();
  uint32_t channel_map = 0;
  if (passthrough)
    return 0;

  static const unsigned char map_normal[] =
  {
    0, //AE_CH_RAW ,
    1, //AE_CH_FL
    2, //AE_CH_FR
    4, //AE_CH_FC
    3, //AE_CH_LFE
    7, //AE_CH_BL
    8, //AE_CH_BR
    1, //AE_CH_FLOC,
    2, //AE_CH_FROC,
    4, //AE_CH_BC,
    5, //AE_CH_SL
    6, //AE_CH_SR
  };
  static const unsigned char map_back[] =
  {
    0, //AE_CH_RAW ,
    1, //AE_CH_FL
    2, //AE_CH_FR
    4, //AE_CH_FC
    3, //AE_CH_LFE
    5, //AE_CH_BL
    6, //AE_CH_BR
    1, //AE_CH_FLOC,
    2, //AE_CH_FROC,
    4, //AE_CH_BC,
    5, //AE_CH_SL
    6, //AE_CH_SR
  };
  const unsigned char *map = map_normal;
  // According to CEA-861-D only RL and RR are known. In case of a format having SL and SR channels
  // but no BR BL channels, we use the wide map in order to open only the num of channels really
  // needed.
  if (channelLayout.HasChannel(AE_CH_BL) && !channelLayout.HasChannel(AE_CH_SL))
    map = map_back;

  for (unsigned int i = 0; i < channels; ++i)
  {
    AEChannel c = channelLayout[i];
    unsigned int chan = 0;
    if ((unsigned int)c < sizeof map_normal / sizeof *map_normal)
      chan = map[(unsigned int)c];
    if (chan > 0)
      channel_map |= (chan-1) << (3*i);
  }
  // These numbers are from Table 28 Audio InfoFrame Data byte 4 of CEA 861
  // and describe the speaker layout
  static const uint8_t cea_map[] = {
    0xff, // 0
    0xff, // 1
    0x00, // 2.0
    0x02, // 3.0
    0x08, // 4.0
    0x0a, // 5.0
    0xff, // 6
    0x12, // 7.0
    0xff, // 8
  };
  static const uint8_t cea_map_lfe[] = {
    0xff, // 0
    0xff, // 1
    0xff, // 2
    0x01, // 2.1
    0x03, // 3.1
    0x09, // 4.1
    0x0b, // 5.1
    0xff, // 7
    0x13, // 7.1
  };
  uint8_t cea = channelLayout.HasChannel(AE_CH_LFE) ? cea_map_lfe[channels] : cea_map[channels];
  if (cea == 0xff)
    CLog::Log(LOGERROR, "%s::%s - Unexpected CEA mapping %d,%d", CLASSNAME, __func__, channelLayout.HasChannel(AE_CH_LFE), channels);

  channel_map |= cea << 24;

  return channel_map;
}
Exemplo n.º 2
0
void CAEChannelInfo::AddMissingChannels(const CAEChannelInfo& rhs)
{
  for (unsigned int i = 0; i < rhs.Count(); i++)
    if (!HasChannel(rhs[i]))
      *this += rhs[i];
}
Exemplo n.º 3
0
void CAEChannelInfo::ResolveChannels(const CAEChannelInfo& rhs)
{
  /* mono gets upmixed to dual mono */
  if (m_channelCount == 1 && m_channels[0] == AE_CH_FC)
  {
    Reset();
    *this += AE_CH_FL;
    *this += AE_CH_FR;
    return;
  }

  bool srcHasSL = false;
  bool srcHasSR = false;
  bool srcHasRL = false;
  bool srcHasRR = false;
  bool srcHasBC = false;

  bool dstHasSL = false;
  bool dstHasSR = false;
  bool dstHasRL = false;
  bool dstHasRR = false;
  bool dstHasBC = false;

  for (unsigned int c = 0; c < rhs.m_channelCount; ++c)
    switch(rhs.m_channels[c])
    {
      case AE_CH_SL: dstHasSL = true; break;
      case AE_CH_SR: dstHasSR = true; break;
      case AE_CH_BL: dstHasRL = true; break;
      case AE_CH_BR: dstHasRR = true; break;
      case AE_CH_BC: dstHasBC = true; break;
      default:
        break;
    }

  CAEChannelInfo newInfo;
  for (unsigned int i = 0; i < m_channelCount; ++i)
  {
    switch (m_channels[i])
    {
      case AE_CH_SL: srcHasSL = true; break;
      case AE_CH_SR: srcHasSR = true; break;
      case AE_CH_BL: srcHasRL = true; break;
      case AE_CH_BR: srcHasRR = true; break;
      case AE_CH_BC: srcHasBC = true; break;
      default:
        break;
    }

    bool found = false;
    for (unsigned int c = 0; c < rhs.m_channelCount; ++c)
      if (m_channels[i] == rhs.m_channels[c])
      {
        found = true;
        break;
      }

    if (found)
      newInfo += m_channels[i];
  }

  /* we need to ensure we end up with rear or side channels for downmix to work */
  if (srcHasSL && !dstHasSL && dstHasRL && !newInfo.HasChannel(AE_CH_BL))
    newInfo += AE_CH_BL;
  if (srcHasSR && !dstHasSR && dstHasRR && !newInfo.HasChannel(AE_CH_BR))
    newInfo += AE_CH_BR;
  if (srcHasRL && !dstHasRL && dstHasSL && !newInfo.HasChannel(AE_CH_SL))
    newInfo += AE_CH_SL;
  if (srcHasRR && !dstHasRR && dstHasSR && !newInfo.HasChannel(AE_CH_SR))
    newInfo += AE_CH_SR;

  // mix back center if not available in destination layout
  // prefer mixing into backs if available
  if (srcHasBC && !dstHasBC)
  {
    if (dstHasRL && !newInfo.HasChannel(AE_CH_BL))
      newInfo += AE_CH_BL;
    else if (dstHasSL && !newInfo.HasChannel(AE_CH_SL))
      newInfo += AE_CH_SL;

    if (dstHasRR && !newInfo.HasChannel(AE_CH_BR))
      newInfo += AE_CH_BR;
    else if (dstHasSR && !newInfo.HasChannel(AE_CH_SR))
      newInfo += AE_CH_SR;
  }

  *this = newInfo;
}
Exemplo n.º 4
0
CPulseAEStream::CPulseAEStream(pa_context *context, pa_threaded_mainloop *mainLoop, enum AEDataFormat format, unsigned int sampleRate, CAEChannelInfo channelLayout, unsigned int options) : m_fader(this)
{
  ASSERT(channelLayout.Count());
  m_Destroyed = false;
  m_Initialized = false;
  m_Paused = false;

  m_Stream = NULL;
  m_Context = context;
  m_MainLoop = mainLoop;

  m_format = format;
  m_sampleRate = sampleRate;
  m_channelLayout = channelLayout;
  m_options = options;

  m_DrainOperation = NULL;
  m_slave = NULL;

  pa_threaded_mainloop_lock(m_MainLoop);

  m_SampleSpec.channels = channelLayout.Count();
  m_SampleSpec.rate = m_sampleRate;

  switch (m_format)
  {
    case AE_FMT_U8    : m_SampleSpec.format = PA_SAMPLE_U8; break;
    case AE_FMT_S16NE : m_SampleSpec.format = PA_SAMPLE_S16NE; break;
    case AE_FMT_S16LE : m_SampleSpec.format = PA_SAMPLE_S16LE; break;
    case AE_FMT_S16BE : m_SampleSpec.format = PA_SAMPLE_S16BE; break;
    case AE_FMT_S24NE3: m_SampleSpec.format = PA_SAMPLE_S24NE; break;
    case AE_FMT_S24NE4: m_SampleSpec.format = PA_SAMPLE_S24_32NE; break;
    case AE_FMT_S32NE : m_SampleSpec.format = PA_SAMPLE_S32NE; break;
    case AE_FMT_S32LE : m_SampleSpec.format = PA_SAMPLE_S32LE; break;
    case AE_FMT_S32BE : m_SampleSpec.format = PA_SAMPLE_S32BE; break;
    case AE_FMT_FLOAT : m_SampleSpec.format = PA_SAMPLE_FLOAT32NE; break;
#if PA_CHECK_VERSION(1,0,0)
    case AE_FMT_DTS   :
    case AE_FMT_EAC3  :
    case AE_FMT_AC3   : m_SampleSpec.format = PA_SAMPLE_S16NE; break;
#endif

    default:
      CLog::Log(LOGERROR, "PulseAudio: Invalid format %i", format);
      pa_threaded_mainloop_unlock(m_MainLoop);
      m_format = AE_FMT_INVALID;
      return;
  }

  if (!pa_sample_spec_valid(&m_SampleSpec))
  {
    CLog::Log(LOGERROR, "PulseAudio: Invalid sample spec");
    pa_threaded_mainloop_unlock(m_MainLoop);
    Destroy();
    return /*false*/;
  }

  m_frameSize = pa_frame_size(&m_SampleSpec);

  struct pa_channel_map map;
  map.channels = m_channelLayout.Count();

  for (unsigned int ch = 0; ch < m_channelLayout.Count(); ++ch)
    switch(m_channelLayout[ch])
    {
      case AE_CH_NULL: break;
      case AE_CH_MAX : break;
      case AE_CH_RAW : break;
      case AE_CH_FL  : map.map[ch] = PA_CHANNEL_POSITION_FRONT_LEFT           ; break;
      case AE_CH_FR  : map.map[ch] = PA_CHANNEL_POSITION_FRONT_RIGHT          ; break;
      case AE_CH_FC  : map.map[ch] = PA_CHANNEL_POSITION_FRONT_CENTER         ; break;
      case AE_CH_BC  : map.map[ch] = PA_CHANNEL_POSITION_REAR_CENTER          ; break;
      case AE_CH_BL  : map.map[ch] = PA_CHANNEL_POSITION_REAR_LEFT            ; break;
      case AE_CH_BR  : map.map[ch] = PA_CHANNEL_POSITION_REAR_RIGHT           ; break;
      case AE_CH_LFE : map.map[ch] = PA_CHANNEL_POSITION_LFE                  ; break;
      case AE_CH_FLOC: map.map[ch] = PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER ; break;
      case AE_CH_FROC: map.map[ch] = PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER; break;
      case AE_CH_SL  : map.map[ch] = PA_CHANNEL_POSITION_SIDE_LEFT            ; break;
      case AE_CH_SR  : map.map[ch] = PA_CHANNEL_POSITION_SIDE_RIGHT           ; break;
      case AE_CH_TC  : map.map[ch] = PA_CHANNEL_POSITION_TOP_CENTER           ; break;
      case AE_CH_TFL : map.map[ch] = PA_CHANNEL_POSITION_TOP_FRONT_LEFT       ; break;
      case AE_CH_TFR : map.map[ch] = PA_CHANNEL_POSITION_TOP_FRONT_RIGHT      ; break;
      case AE_CH_TFC : map.map[ch] = PA_CHANNEL_POSITION_TOP_CENTER           ; break;
      case AE_CH_TBL : map.map[ch] = PA_CHANNEL_POSITION_TOP_REAR_LEFT        ; break;
      case AE_CH_TBR : map.map[ch] = PA_CHANNEL_POSITION_TOP_REAR_RIGHT       ; break;
      case AE_CH_TBC : map.map[ch] = PA_CHANNEL_POSITION_TOP_REAR_CENTER      ; break;
      default: break;
    }

  m_MaxVolume     = CAEFactory::GetEngine()->GetVolume();
  m_Volume        = 1.0f;
  pa_volume_t paVolume = pa_sw_volume_from_linear((double)(m_Volume * m_MaxVolume));
  pa_cvolume_set(&m_ChVolume, m_SampleSpec.channels, paVolume);

#if PA_CHECK_VERSION(1,0,0)
  pa_format_info *info[1];
  info[0] = pa_format_info_new();
  switch(m_format)
  {
    case AE_FMT_DTS : info[0]->encoding = PA_ENCODING_DTS_IEC61937 ; break;
    case AE_FMT_EAC3: info[0]->encoding = PA_ENCODING_EAC3_IEC61937; break;
    case AE_FMT_AC3 : info[0]->encoding = PA_ENCODING_AC3_IEC61937 ; break;
    default:          info[0]->encoding = PA_ENCODING_PCM          ; break;
  }
  pa_format_info_set_rate         (info[0], m_SampleSpec.rate);
  pa_format_info_set_channels     (info[0], m_SampleSpec.channels);
  pa_format_info_set_channel_map  (info[0], &map);
  pa_format_info_set_sample_format(info[0], m_SampleSpec.format);
  m_Stream = pa_stream_new_extended(m_Context, "audio stream", info, 1, NULL);
  pa_format_info_free(info[0]);
#else
  m_Stream = pa_stream_new(m_Context, "audio stream", &m_SampleSpec, &map);
#endif

  if (m_Stream == NULL)
  {
    CLog::Log(LOGERROR, "PulseAudio: Could not create a stream");
    pa_threaded_mainloop_unlock(m_MainLoop);
    Destroy();
    return /*false*/;
  }

  pa_stream_set_state_callback(m_Stream, CPulseAEStream::StreamStateCallback, this);
  pa_stream_set_write_callback(m_Stream, CPulseAEStream::StreamRequestCallback, this);
  pa_stream_set_latency_update_callback(m_Stream, CPulseAEStream::StreamLatencyUpdateCallback, this);
  pa_stream_set_underflow_callback(m_Stream, CPulseAEStream::StreamUnderflowCallback, this);

  int flags = PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE;
  if (options && AESTREAM_FORCE_RESAMPLE)
    flags |= PA_STREAM_VARIABLE_RATE;

  if (pa_stream_connect_playback(m_Stream, NULL, NULL, (pa_stream_flags)flags, &m_ChVolume, NULL) < 0)
  {
    CLog::Log(LOGERROR, "PulseAudio: Failed to connect stream to output");
    pa_threaded_mainloop_unlock(m_MainLoop);
    Destroy();
    return /*false*/;
  }

  /* Wait until the stream is ready */
  do
  {
    pa_threaded_mainloop_wait(m_MainLoop);
    CLog::Log(LOGDEBUG, "PulseAudio: Stream %s", StreamStateToString(pa_stream_get_state(m_Stream)));
  }
  while (pa_stream_get_state(m_Stream) != PA_STREAM_READY && pa_stream_get_state(m_Stream) != PA_STREAM_FAILED);

  if (pa_stream_get_state(m_Stream) == PA_STREAM_FAILED)
  {
    CLog::Log(LOGERROR, "PulseAudio: Waited for the stream but it failed");
    pa_threaded_mainloop_unlock(m_MainLoop);
    Destroy();
    return /*false*/;
  }

  m_cacheSize = pa_stream_writable_size(m_Stream);

  pa_threaded_mainloop_unlock(m_MainLoop);

  m_Initialized = true;

  CLog::Log(LOGINFO, "PulseAEStream::Initialized");
  CLog::Log(LOGINFO, "  Sample Rate   : %d", m_sampleRate);
  CLog::Log(LOGINFO, "  Sample Format : %s", CAEUtil::DataFormatToStr(m_format));
  CLog::Log(LOGINFO, "  Channel Count : %d", m_channelLayout.Count());
  CLog::Log(LOGINFO, "  Channel Layout: %s", ((std::string)m_channelLayout).c_str());
  CLog::Log(LOGINFO, "  Frame Size    : %d", m_frameSize);
  CLog::Log(LOGINFO, "  Cache Size    : %d", m_cacheSize);

  Resume();

  return /*true*/;
}
Exemplo n.º 5
0
void CActiveAEStream::InitRemapper()
{
  // check if input format follows ffmpeg channel mask
  bool needRemap = false;
  unsigned int avLast, avCur = 0;
  for(unsigned int i=0; i<m_format.m_channelLayout.Count(); i++)
  {
    avLast = avCur;
    avCur = CAEUtil::GetAVChannel(m_format.m_channelLayout[i]);
    if(avCur < avLast)
    {
      needRemap = true;
      break;
    }
  }

  if(needRemap)
  {
    CLog::Log(LOGDEBUG, "CActiveAEStream::%s - initialize remapper", __FUNCTION__);

    m_remapper = CAEResampleFactory::Create();
    uint64_t avLayout = CAEUtil::GetAVChannelLayout(m_format.m_channelLayout);

    // build layout according to ffmpeg channel order
    // we need this for reference
    CAEChannelInfo ffmpegLayout;
    ffmpegLayout.Reset();
    int idx = 0;
    for(unsigned int i=0; i<m_format.m_channelLayout.Count(); i++)
    {
      for(unsigned int j=0; j<m_format.m_channelLayout.Count(); j++)
      {
        idx = CAEUtil::GetAVChannelIndex(m_format.m_channelLayout[j], avLayout);
        if (idx == (int)i)
        {
          ffmpegLayout += m_format.m_channelLayout[j];
          break;
        }
      }
    }

    // build remap layout we can pass to resampler as destination layout
    CAEChannelInfo remapLayout;
    remapLayout.Reset();
    for(unsigned int i=0; i<m_format.m_channelLayout.Count(); i++)
    {
      for(unsigned int j=0; j<m_format.m_channelLayout.Count(); j++)
      {
        idx = CAEUtil::GetAVChannelIndex(m_format.m_channelLayout[j], avLayout);
        if (idx == (int)i)
        {
          remapLayout += ffmpegLayout[j];
          break;
        }
      }
    }

    // initialize resampler for only doing remapping
    m_remapper->Init(avLayout,
                     m_format.m_channelLayout.Count(),
                     m_format.m_sampleRate,
                     CAEUtil::GetAVSampleFormat(m_format.m_dataFormat),
                     CAEUtil::DataFormatToUsedBits(m_format.m_dataFormat),
                     CAEUtil::DataFormatToDitherBits(m_format.m_dataFormat),
                     avLayout,
                     m_format.m_channelLayout.Count(),
                     m_format.m_sampleRate,
                     CAEUtil::GetAVSampleFormat(m_format.m_dataFormat),
                     CAEUtil::DataFormatToUsedBits(m_format.m_dataFormat),
                     CAEUtil::DataFormatToDitherBits(m_format.m_dataFormat),
                     false,
                     false,
                     &remapLayout,
                     AE_QUALITY_LOW, // not used for remapping
                     false);

    // extra sound packet, we can't resample to the same buffer
    m_remapBuffer = new CSoundPacket(m_inputBuffers->m_allSamples[0]->pkt->config, m_inputBuffers->m_allSamples[0]->pkt->max_nb_samples);
  }
}
Exemplo n.º 6
0
bool CAERemap::Initialize(CAEChannelInfo input, CAEChannelInfo output, bool finalStage, bool forceNormalize/* = false */, enum AEStdChLayout stdChLayout/* = AE_CH_LAYOUT_INVALID */)
{
  if (!input.Count() || !output.Count())
    return false;

  /* build the downmix matrix */
  memset(m_mixInfo, 0, sizeof(m_mixInfo));
  m_output = output;

  /* figure which channels we have */
  for (unsigned int o = 0; o < output.Count(); ++o)
    m_mixInfo[output[o]].in_dst = true;

  /* flag invalid channels for forced downmix */
  if (stdChLayout != AE_CH_LAYOUT_INVALID)
  {
    CAEChannelInfo layout = stdChLayout;
    for (unsigned int o = 0; o < output.Count(); ++o)
      if (!layout.HasChannel(output[o]))
        m_mixInfo[output[o]].in_dst = false;
  }

  m_outChannels = output.Count();

  /* lookup the channels that exist in the output */
  for (unsigned int i = 0; i < input.Count(); ++i)
  {
    AEMixInfo  *info = &m_mixInfo[input[i]];
    AEMixLevel *lvl  = &info->srcIndex[info->srcCount++];
    info->in_src = true;
    lvl->index   = i;
    lvl->level   = 1.0f;
    if (!info->in_dst)
    {
      for (unsigned int o = 0; o < output.Count(); ++o)
      {
        if (input[i] == output[o])
        {
          info->outIndex = o;
          break;
        }
      }
    }

    m_inChannels = i;
  }
  ++m_inChannels;

  /* the final stage does not need any down/upmix */
  if (finalStage)
    return true;

  /* downmix from the specified channel to the specified list of channels */
  #define RM(from, ...) \
    static AEChannel downmix_##from[] = {__VA_ARGS__, AE_CH_NULL}; \
    ResolveMix(from, CAEChannelInfo(downmix_##from));

  /*
    the order of this is important as we can not mix channels
    into ones that have already been resolved... eg

    TBR -> BR
    TBC -> TBL & TBR

    TBR will get resolved to BR, and then TBC will get
    resolved to TBL, but since TBR has been resolved it will
    never make it to the output. The order has to be reversed
    as the TBC center depends on the TBR downmix... eg

    TBC -> TBL & TBR
    TBR -> BR

    if for any reason out of order mapping becomes required
    looping this list should resolve the channels.
  */
  RM(AE_CH_TBC , AE_CH_TBL, AE_CH_TBR);
  RM(AE_CH_TBR , AE_CH_BR);
  RM(AE_CH_TBL , AE_CH_BL);
  RM(AE_CH_TC  , AE_CH_TFL, AE_CH_TFR);
  RM(AE_CH_TFC , AE_CH_TFL, AE_CH_TFR);
  RM(AE_CH_TFR , AE_CH_FR);
  RM(AE_CH_TFL , AE_CH_FL);
  RM(AE_CH_SR  , AE_CH_BR, AE_CH_FR);
  RM(AE_CH_SL  , AE_CH_BL, AE_CH_FL);
  RM(AE_CH_BC  , AE_CH_BL, AE_CH_BR);
  RM(AE_CH_FROC, AE_CH_FR, AE_CH_FC);
  RM(AE_CH_FLOC, AE_CH_FL, AE_CH_FC);
  RM(AE_CH_BL  , AE_CH_FL);
  RM(AE_CH_BR  , AE_CH_FR);
  RM(AE_CH_LFE , AE_CH_FL, AE_CH_FR);
  RM(AE_CH_FL  , AE_CH_FC);
  RM(AE_CH_FR  , AE_CH_FC);
  RM(AE_CH_BROC, AE_CH_BR, AE_CH_BC);
  RM(AE_CH_BLOC, AE_CH_BL, AE_CH_BC);

  /* since everything eventually mixes down to FC we need special handling for it */
  if (m_mixInfo[AE_CH_FC].in_src)
  {
    /* if there is no output FC channel, try to mix it the best possible way */
    if (!m_mixInfo[AE_CH_FC].in_dst)
    {
      /* if we have TFC & FL & FR */
      if (m_mixInfo[AE_CH_TFC].in_dst && m_mixInfo[AE_CH_FL].in_dst && m_mixInfo[AE_CH_FR].in_dst)
      {
        RM(AE_CH_FC, AE_CH_TFC, AE_CH_FL, AE_CH_FR);
      }
      /* if we have TFC */
      else if (m_mixInfo[AE_CH_TFC].in_dst)
      {
        RM(AE_CH_FC, AE_CH_TFC);
      }
      /* if we have FLOC & FROC */
      else if (m_mixInfo[AE_CH_FLOC].in_dst && m_mixInfo[AE_CH_FROC].in_dst)
      {
        RM(AE_CH_FC, AE_CH_FLOC, AE_CH_FROC);
      }
      /* if we have TC & FL & FR */
      else if (m_mixInfo[AE_CH_TC].in_dst && m_mixInfo[AE_CH_FL].in_dst && m_mixInfo[AE_CH_FR].in_dst)
      {
        RM(AE_CH_FC, AE_CH_TC, AE_CH_FL, AE_CH_FR);
      }
      /* if we have FL & FR */
      else if (m_mixInfo[AE_CH_FL].in_dst && m_mixInfo[AE_CH_FR].in_dst)
      {
        RM(AE_CH_FC, AE_CH_FL, AE_CH_FR);
      }
      /* if we have TFL & TFR */
      else if (m_mixInfo[AE_CH_TFL].in_dst && m_mixInfo[AE_CH_TFR].in_dst)
      {
        RM(AE_CH_FC, AE_CH_TFL, AE_CH_TFR);
      }
      /* we dont have enough speakers to emulate FC */
      else
        return false;
    }
    else
    {
      /* if there is only one channel in the source and it is the FC and we have FL & FR, upmix to dual mono */
      if (m_inChannels == 1 && m_mixInfo[AE_CH_FL].in_dst && m_mixInfo[AE_CH_FR].in_dst)
      {
        RM(AE_CH_FC, AE_CH_FL, AE_CH_FR);
      }
    }
  }

  #undef RM

  if (g_guiSettings.GetBool("audiooutput.stereoupmix"))
    BuildUpmixMatrix(input, output);

  /* normalize the values */
  bool normalize;
  if (forceNormalize)
    normalize = true;
  else
  {
    normalize = g_guiSettings.GetBool("audiooutput.normalizelevels");
    CLog::Log(LOGDEBUG, "AERemap: Downmix normalization is %s", (normalize ? "enabled" : "disabled"));
  }

  if (normalize)
  {
    float max = 0;
    for (unsigned int o = 0; o < output.Count(); ++o)
    {
      AEMixInfo *info = &m_mixInfo[output[o]];
      float sum = 0;
      for (int i = 0; i < info->srcCount; ++i)
        sum += info->srcIndex[i].level;

      if (sum > max)
        max = sum;
    }

    float scale = 1.0f / max;
    for (unsigned int o = 0; o < output.Count(); ++o)
    {
      AEMixInfo *info = &m_mixInfo[output[o]];
      for (int i = 0; i < info->srcCount; ++i)
        info->srcIndex[i].level *= scale;
    }
  }

#if 1
  /* dump the matrix */
  CLog::Log(LOGINFO, "==[Downmix Matrix]==");
  for (unsigned int o = 0; o < output.Count(); ++o)
  {
    AEMixInfo *info = &m_mixInfo[output[o]];
    if (info->srcCount == 0)
      continue;

    std::stringstream s;
    s << CAEChannelInfo::GetChName(output[o]) << " =";
    for (int i = 0; i < info->srcCount; ++i)
      s << " " << CAEChannelInfo::GetChName(input[info->srcIndex[i].index]) << "(" << info->srcIndex[i].level << ")";

    CLog::Log(LOGINFO, "%s", s.str().c_str());
  }
  CLog::Log(LOGINFO, "====================\n");
#endif

  return true;
}
bool CAESinkALSA::Initialize(AEAudioFormat &format, std::string &device)
{
  CAEChannelInfo channelLayout = GetChannelLayout(format, 2, 8);
  m_initDevice = device;
  m_initFormat = format;
  ALSAConfig inconfig, outconfig;
  inconfig.format = format.m_dataFormat;
  inconfig.sampleRate = format.m_sampleRate;
  inconfig.channels = channelLayout.Count();

  /* if we are raw, correct the data format */
  if (AE_IS_RAW(format.m_dataFormat))
  {
    inconfig.format   = AE_FMT_S16NE;
    m_passthrough     = true;
  }
  else
  {
    m_passthrough   = false;
  }
#if defined(HAS_LIBAMCODEC)
  if (aml_present())
  {
    aml_set_audio_passthrough(m_passthrough);
    device = "default";
  }
#endif

  if (inconfig.channels == 0)
  {
    CLog::Log(LOGERROR, "CAESinkALSA::Initialize - Unable to open the requested channel layout");
    return false;
  }

  AEDeviceType devType = AEDeviceTypeFromName(device);

  std::string AESParams;
  /* digital interfaces should have AESx set, though in practice most
   * receivers don't care */
  if (m_passthrough || devType == AE_DEVTYPE_HDMI || devType == AE_DEVTYPE_IEC958)
    GetAESParams(format, AESParams);

  CLog::Log(LOGINFO, "CAESinkALSA::Initialize - Attempting to open device \"%s\"", device.c_str());

  /* get the sound config */
  snd_config_t *config;
  snd_config_copy(&config, snd_config);

  if (!OpenPCMDevice(device, AESParams, inconfig.channels, &m_pcm, config))
  {
    CLog::Log(LOGERROR, "CAESinkALSA::Initialize - failed to initialize device \"%s\"", device.c_str());
    snd_config_delete(config);
    return false;
  }

  /* get the actual device name that was used */
  device = snd_pcm_name(m_pcm);
  m_device = device;

  CLog::Log(LOGINFO, "CAESinkALSA::Initialize - Opened device \"%s\"", device.c_str());

  /* free the sound config */
  snd_config_delete(config);

  if (!InitializeHW(inconfig, outconfig) || !InitializeSW(outconfig))
    return false;

  // we want it blocking
  snd_pcm_nonblock(m_pcm, 0);
  snd_pcm_prepare (m_pcm);

  if (m_passthrough && inconfig.channels != outconfig.channels)
  {
    CLog::Log(LOGINFO, "CAESinkALSA::Initialize - could not open required number of channels");
    return false;
  }
  // adjust format to the configuration we got
  format.m_channelLayout = GetChannelLayout(format, outconfig.channels, outconfig.channels);
  format.m_sampleRate = outconfig.sampleRate;
  format.m_frames = outconfig.periodSize;
  format.m_frameSize = outconfig.frameSize;
  format.m_frameSamples = outconfig.periodSize * outconfig.channels;
  format.m_dataFormat = outconfig.format;

  m_format              = format;
  m_formatSampleRateMul = 1.0 / (double)m_format.m_sampleRate;

  return true;
}
Exemplo n.º 8
0
//Note: in multichannel mode CA will either pull 2 channels of data (stereo) or 6/8 channels of data
//(every speaker setup with more then 2 speakers). The difference between the number of real speakers
//and 6/8 channels needs to be padded with unknown channels so that the sample size fits 6/8 channels
//
//device [in] - the device whose channel layout should be used
//channelMap [in/out] - if filled it will it indicates that we are called from initialize and we log the requested map, out returns the channelMap for device
//channelsPerFrame [in] - the number of channels this device is configured to (e.x. 2 or 6/8)
void AEDeviceEnumerationOSX::GetAEChannelMap(CAEChannelInfo &channelMap, unsigned int channelsPerFrame) const
{
  CCoreAudioChannelLayout calayout;
  bool logMapping = channelMap.Count() > 0; // only log if the engine requests a layout during init
  bool mapAvailable = false;
  unsigned int numberChannelsInDeviceLayout = CA_MAX_CHANNELS; // default number of channels from CAChannelMap
  AudioChannelLayout *layout = NULL;
  
  // try to fetch either the multichannel or the stereo channel layout from the device
  if (channelsPerFrame == 2 || channelMap.Count() == 2)
    mapAvailable = m_caDevice.GetPreferredChannelLayoutForStereo(calayout);
  else
    mapAvailable = m_caDevice.GetPreferredChannelLayout(calayout);
  
  // if a map was fetched - check if it is usable
  if (mapAvailable)
  {
    layout = calayout;
    if (layout == NULL || layout->mChannelLayoutTag != kAudioChannelLayoutTag_UseChannelDescriptions)
      mapAvailable = false;// wrong map format
    else
      numberChannelsInDeviceLayout = layout->mNumberChannelDescriptions;
  }
  
  // start the mapping action
  // the number of channels to be added to the outgoing channelmap
  // this is CA_MAX_CHANNELS at max and might be lower for some output devices (channelsPerFrame)
  unsigned int numChannelsToMap = std::min((unsigned int)CA_MAX_CHANNELS, (unsigned int)channelsPerFrame);
  
  // if there was a map fetched we force the number of
  // channels to map to channelsPerFrame (this allows mapping
  // of more then CA_MAX_CHANNELS if needed)
  if (mapAvailable)
    numChannelsToMap = channelsPerFrame;
  
  std::string layoutStr;
  
  if (logMapping)
  {
    CLog::Log(LOGDEBUG, "%s Engine requests layout %s", __FUNCTION__, ((std::string)channelMap).c_str());
    
    if (mapAvailable)
      CLog::Log(LOGDEBUG, "%s trying to map to %s layout: %s", __FUNCTION__, channelsPerFrame == 2 ? "stereo" : "multichannel", calayout.ChannelLayoutToString(*layout, layoutStr));
    else
      CLog::Log(LOGDEBUG, "%s no map available - using static multichannel map layout", __FUNCTION__);
  }
  
  channelMap.Reset();// start with an empty map
  
  for (unsigned int channel = 0; channel < numChannelsToMap; channel++)
  {
    // we only try to map channels which are defined in the device layout
    enum AEChannel currentChannel;
    if (channel < numberChannelsInDeviceLayout)
    {
      // get the channel from the fetched map
      if (mapAvailable)
        currentChannel = caChannelToAEChannel(layout->mChannelDescriptions[channel].mChannelLabel);
      else// get the channel from the default map
        currentChannel = CAChannelMap[channel];
      
    }
    else// fill with unknown channels
      currentChannel = caChannelToAEChannel(kAudioChannelLabel_Unknown);
    
    if(!channelMap.HasChannel(currentChannel))// only add if not already added
      channelMap += currentChannel;
  }
  
  if (logMapping)
    CLog::Log(LOGDEBUG, "%s mapped channels to layout %s", __FUNCTION__, ((std::string)channelMap).c_str());
}
Exemplo n.º 9
0
void CAESinkWASAPI::EnumerateDevicesEx(AEDeviceInfoList &deviceInfoList, bool force)
{
  IMMDeviceEnumerator* pEnumerator = NULL;
  IMMDeviceCollection* pEnumDevices = NULL;
  IMMDevice*           pDefaultDevice = NULL;
  CAEDeviceInfo        deviceInfo;
  CAEChannelInfo       deviceChannels;
  LPWSTR               pwszID = NULL;
  std::wstring         wstrDDID;

  WAVEFORMATEXTENSIBLE wfxex = {0};
  HRESULT              hr;

  hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void**)&pEnumerator);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Could not allocate WASAPI device enumerator. CoCreateInstance error code: %li", hr)

  UINT uiCount = 0;

  // get the default audio endpoint
  if(pEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &pDefaultDevice) == S_OK)
  {
    if(pDefaultDevice->GetId(&pwszID) == S_OK)
    {
      wstrDDID = pwszID;
      CoTaskMemFree(pwszID);
    }
    SAFE_RELEASE(pDefaultDevice);
  }

  // enumerate over all audio endpoints
  hr = pEnumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &pEnumDevices);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint enumeration failed.")

  hr = pEnumDevices->GetCount(&uiCount);
  EXIT_ON_FAILURE(hr, __FUNCTION__": Retrieval of audio endpoint count failed.")

  for (UINT i = 0; i < uiCount; i++)
  {
    IMMDevice *pDevice = NULL;
    IPropertyStore *pProperty = NULL;
    PROPVARIANT varName;
    PropVariantInit(&varName);

    deviceInfo.m_channels.Reset();
    deviceInfo.m_dataFormats.clear();
    deviceInfo.m_sampleRates.clear();

    hr = pEnumDevices->Item(i, &pDevice);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint failed.");
      goto failed;
    }

    hr = pDevice->OpenPropertyStore(STGM_READ, &pProperty);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint properties failed.");
      SAFE_RELEASE(pDevice);
      goto failed;
    }

    hr = pProperty->GetValue(PKEY_Device_FriendlyName, &varName);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint device name failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }

    std::string strFriendlyName = localWideToUtf(varName.pwszVal);
    PropVariantClear(&varName);

    hr = pProperty->GetValue(PKEY_AudioEndpoint_GUID, &varName);
    if(FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint GUID failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }

    std::string strDevName = localWideToUtf(varName.pwszVal);
    PropVariantClear(&varName);

    hr = pProperty->GetValue(PKEY_AudioEndpoint_FormFactor, &varName);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint form factor failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }
    std::string strWinDevType = winEndpoints[(EndpointFormFactor)varName.uiVal].winEndpointType;
    AEDeviceType aeDeviceType = winEndpoints[(EndpointFormFactor)varName.uiVal].aeDeviceType;

    PropVariantClear(&varName);

    hr = pProperty->GetValue(PKEY_AudioEndpoint_PhysicalSpeakers, &varName);
    if (FAILED(hr))
    {
      CLog::Log(LOGERROR, __FUNCTION__": Retrieval of WASAPI endpoint speaker layout failed.");
      SAFE_RELEASE(pDevice);
      SAFE_RELEASE(pProperty);
      goto failed;
    }
    unsigned int uiChannelMask = std::max(varName.uintVal, (unsigned int) KSAUDIO_SPEAKER_STEREO);

    deviceChannels.Reset();

    for (unsigned int c = 0; c < WASAPI_SPEAKER_COUNT; c++)
    {
      if (uiChannelMask & WASAPIChannelOrder[c])
        deviceChannels += AEChannelNames[c];
    }

    PropVariantClear(&varName);

    IAudioClient *pClient;
    hr = pDevice->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pClient);
    if (SUCCEEDED(hr))
    {
      /* Test format DTS-HD */
      wfxex.Format.cbSize               = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
      wfxex.Format.nSamplesPerSec       = 192000;
      wfxex.dwChannelMask               = KSAUDIO_SPEAKER_7POINT1_SURROUND;
      wfxex.Format.wFormatTag           = WAVE_FORMAT_EXTENSIBLE;
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_DTS_HD;
      wfxex.Format.wBitsPerSample       = 16;
      wfxex.Samples.wValidBitsPerSample = 16;
      wfxex.Format.nChannels            = 8;
      wfxex.Format.nBlockAlign          = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
      wfxex.Format.nAvgBytesPerSec      = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_DTSHD));

      /* Test format Dolby TrueHD */
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_MLP;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_TRUEHD));

      /* Test format Dolby EAC3 */
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL_PLUS;
      wfxex.Format.nChannels            = 2;
      wfxex.Format.nBlockAlign          = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
      wfxex.Format.nAvgBytesPerSec      = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_EAC3));

      /* Test format DTS */
      wfxex.Format.nSamplesPerSec       = 48000;
      wfxex.dwChannelMask               = KSAUDIO_SPEAKER_5POINT1;
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_DTS;
      wfxex.Format.nBlockAlign          = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
      wfxex.Format.nAvgBytesPerSec      = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_DTS));

      /* Test format Dolby AC3 */
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_AC3));

      /* Test format AAC */
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEC61937_AAC;
      hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
      if (SUCCEEDED(hr))
        deviceInfo.m_dataFormats.push_back(AEDataFormat(AE_FMT_AAC));

      /* Test format for PCM format iteration */
      wfxex.Format.cbSize               = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
      wfxex.dwChannelMask               = KSAUDIO_SPEAKER_STEREO;
      wfxex.Format.wFormatTag           = WAVE_FORMAT_EXTENSIBLE;
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;

      for (int p = AE_FMT_FLOAT; p > AE_FMT_INVALID; p--)
      {
        if (p < AE_FMT_FLOAT)
          wfxex.SubFormat               = KSDATAFORMAT_SUBTYPE_PCM;
        wfxex.Format.wBitsPerSample     = CAEUtil::DataFormatToBits((AEDataFormat) p);
        wfxex.Format.nBlockAlign        = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
        wfxex.Format.nAvgBytesPerSec    = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
        if (p <= AE_FMT_S24NE4 && p >= AE_FMT_S24BE4)
        {
          wfxex.Samples.wValidBitsPerSample = 24;
        }
        else
        {
          wfxex.Samples.wValidBitsPerSample = wfxex.Format.wBitsPerSample;
        }

        hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
        if (SUCCEEDED(hr))
          deviceInfo.m_dataFormats.push_back((AEDataFormat) p);
      }

      /* Test format for sample rate iteration */
      wfxex.Format.cbSize               = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
      wfxex.dwChannelMask               = KSAUDIO_SPEAKER_STEREO;
      wfxex.Format.wFormatTag           = WAVE_FORMAT_EXTENSIBLE;
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_PCM;
      wfxex.Format.wBitsPerSample       = 16;
      wfxex.Samples.wValidBitsPerSample = 16;
      wfxex.Format.nChannels            = 2;
      wfxex.Format.nBlockAlign          = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
      wfxex.Format.nAvgBytesPerSec      = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;

      for (int j = 0; j < WASAPISampleRateCount; j++)
      {
        wfxex.Format.nSamplesPerSec     = WASAPISampleRates[j];
        wfxex.Format.nAvgBytesPerSec    = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
        hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
        if (SUCCEEDED(hr))
          deviceInfo.m_sampleRates.push_back(WASAPISampleRates[j]);
      }

      /* Test format for channels iteration */
      wfxex.Format.cbSize               = sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX);
      wfxex.dwChannelMask               = KSAUDIO_SPEAKER_STEREO;
      wfxex.Format.wFormatTag           = WAVE_FORMAT_EXTENSIBLE;
      wfxex.SubFormat                   = KSDATAFORMAT_SUBTYPE_PCM;
      wfxex.Format.nSamplesPerSec       = 48000;
      wfxex.Format.wBitsPerSample       = 16;
      wfxex.Samples.wValidBitsPerSample = 16;
      wfxex.Format.nChannels            = 2;
      wfxex.Format.nBlockAlign          = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
      wfxex.Format.nAvgBytesPerSec      = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;

      bool hasLpcm = false;

      // Try with KSAUDIO_SPEAKER_DIRECTOUT
      for (unsigned int k = WASAPI_SPEAKER_COUNT; k > 0; k--)
      {
        wfxex.dwChannelMask             = KSAUDIO_SPEAKER_DIRECTOUT;
        wfxex.Format.nChannels          = k;
        wfxex.Format.nBlockAlign        = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
        wfxex.Format.nAvgBytesPerSec    = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
        hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
        if (SUCCEEDED(hr))
        {
          if (k > 3) // Add only multichannel LPCM
          {
            deviceInfo.m_dataFormats.push_back(AE_FMT_LPCM);
            hasLpcm = true;
          }
          break;
        }
      }

      /* Try with reported channel mask */
      for (unsigned int k = WASAPI_SPEAKER_COUNT; k > 0; k--)
      {
        wfxex.dwChannelMask             = uiChannelMask;
        wfxex.Format.nChannels          = k;
        wfxex.Format.nBlockAlign        = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
        wfxex.Format.nAvgBytesPerSec    = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
        hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
        if (SUCCEEDED(hr))
        {
          if ( !hasLpcm && k > 3) // Add only multichannel LPCM
          {
            deviceInfo.m_dataFormats.push_back(AE_FMT_LPCM);
            hasLpcm = true;
          }
          break;
        }
      }

      /* Try with specific speakers configurations */
      for (unsigned int i = 0; i < ARRAYSIZE(layoutsList); i++)
      {
        unsigned int nmbOfCh;
        wfxex.dwChannelMask             = ChLayoutToChMask(layoutsList[i], &nmbOfCh);
        wfxex.Format.nChannels          = nmbOfCh;
        wfxex.Format.nBlockAlign        = wfxex.Format.nChannels * (wfxex.Format.wBitsPerSample >> 3);
        wfxex.Format.nAvgBytesPerSec    = wfxex.Format.nSamplesPerSec * wfxex.Format.nBlockAlign;
        hr = pClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, &wfxex.Format, NULL);
        if (SUCCEEDED(hr))
        {
          if ( deviceChannels.Count() < nmbOfCh)
            deviceChannels = layoutsList[i];
          if ( !hasLpcm && nmbOfCh > 3) // Add only multichannel LPCM
          {
            deviceInfo.m_dataFormats.push_back(AE_FMT_LPCM);
            hasLpcm = true;
          }
        }
      }
      pClient->Release();
    }
    else
    {
      CLog::Log(LOGDEBUG, __FUNCTION__": Failed to activate device for passthrough capability testing.");
    }

    deviceInfo.m_deviceName       = strDevName;
    deviceInfo.m_displayName      = strWinDevType.append(strFriendlyName);
    deviceInfo.m_displayNameExtra = std::string("WASAPI: ").append(strFriendlyName);
    deviceInfo.m_deviceType       = aeDeviceType;
    deviceInfo.m_channels         = deviceChannels;

    /* Store the device info */
    deviceInfoList.push_back(deviceInfo);

    if(pDevice->GetId(&pwszID) == S_OK)
    {
      if(wstrDDID.compare(pwszID) == 0)
      {
        deviceInfo.m_deviceName = std::string("default");
        deviceInfo.m_displayName = std::string("default");
        deviceInfo.m_displayNameExtra = std::string("");
        deviceInfoList.push_back(deviceInfo);
      }
      CoTaskMemFree(pwszID);
    }

    SAFE_RELEASE(pDevice);
    SAFE_RELEASE(pProperty);
  }
Exemplo n.º 10
0
bool CAESinkALSA::Initialize(AEAudioFormat &format, std::string &device)
{
  CAEChannelInfo channelLayout;
  m_initDevice = device;
  m_initFormat = format;

  /* if we are raw, correct the data format */
  if (AE_IS_RAW(format.m_dataFormat))
  {
    channelLayout     = GetChannelLayout(format);
    format.m_dataFormat = AE_FMT_S16NE;
    m_passthrough       = true;
  }
  else
  {
    channelLayout = GetChannelLayout(format);
    m_passthrough   = false;
  }
#if defined(HAS_AMLPLAYER) || defined(HAS_LIBAMCODEC)
  if (aml_present())
  {
    aml_set_audio_passthrough(m_passthrough);
    device = "default";
  }
#endif

  if (channelLayout.Count() == 0)
  {
    CLog::Log(LOGERROR, "CAESinkALSA::Initialize - Unable to open the requested channel layout");
    return false;
  }

  format.m_channelLayout = channelLayout;

  AEDeviceType devType = AEDeviceTypeFromName(device);

  std::string AESParams;
  /* digital interfaces should have AESx set, though in practice most
   * receivers don't care */
  if (m_passthrough || devType == AE_DEVTYPE_HDMI || devType == AE_DEVTYPE_IEC958)
    GetAESParams(format, AESParams);

  CLog::Log(LOGINFO, "CAESinkALSA::Initialize - Attempting to open device \"%s\"", device.c_str());

  /* get the sound config */
  snd_config_t *config;
  snd_config_copy(&config, snd_config);

  if (!OpenPCMDevice(device, AESParams, channelLayout.Count(), &m_pcm, config))
  {
    CLog::Log(LOGERROR, "CAESinkALSA::Initialize - failed to initialize device \"%s\"", device.c_str());
    snd_config_delete(config);
    return false;
  }

  /* get the actual device name that was used */
  device = snd_pcm_name(m_pcm);
  m_device = device;

  CLog::Log(LOGINFO, "CAESinkALSA::Initialize - Opened device \"%s\"", device.c_str());

  /* free the sound config */
  snd_config_delete(config);

  if (!InitializeHW(format) || !InitializeSW(format))
    return false;

  snd_pcm_nonblock(m_pcm, 1);
  snd_pcm_prepare (m_pcm);

  m_format              = format;
  m_formatSampleRateMul = 1.0 / (double)m_format.m_sampleRate;

  return true;
}