コード例 #1
0
ファイル: atsctables.cpp プロジェクト: DocOnDev/mythtv
QString CableVirtualChannelTable::toString(void) const
{
    QString str;
    str.append(QString("VCT Cable: channels(%1) tsid(0x%2) ")
               .arg(ChannelCount()).arg(TransportStreamID(), 0, 16));
    str.append(QString("seclength(%3)\n").arg(Length()));
    for (uint i = 0; i < ChannelCount(); i++)
    {
        str.append(toString(i)).append("\n");
    }
    if (0 != GlobalDescriptorsLength())
    {
        str.append(QString("global descriptors length: %1\n")
                   .arg(GlobalDescriptorsLength()));
        vector<const unsigned char*> desc =
            MPEGDescriptor::Parse(GlobalDescriptors(),
                                  GlobalDescriptorsLength());
        str.append(QString("global descriptors count: %1\n").arg(desc.size()));
        for (uint i = 0; i < desc.size(); i++)
        {
            str.append(QString(" %1\n")
                       .arg(MPEGDescriptor(desc[i]).toString()));
        }
    }
    return str;
}
コード例 #2
0
ファイル: Channel.cpp プロジェクト: Jedzia/Humbug
//get a channel
CChannel* CChannel::GetChannel(int channel)
{
	//check for -1
	if(channel==-1)
	{
		//look for a channel
		for(int index=0;index<ChannelCount();index++)
		{
			//check for valid channel
			if(GetChannel(index)->IsPlaying()) continue;
			//found one
			return(GetChannel(index));
		}
		//no channel found
		return(&s_NullChannel);
	}
	else
	{
		//check range
		if(channel<ChannelCount())
		{
			//return the channel
			return(s_ChannelList[channel]);
		}
		else
		{
			//not a valid channel
			return(&s_NullChannel);
		}
	}
}
コード例 #3
0
ファイル: Channel.cpp プロジェクト: Jedzia/Humbug
//allocate channels
bool CChannel::Allocate(int channelcount)
{
	//allocate new number of channels
	int newchannelcount=Mix_AllocateChannels(channelcount);
	//delete any excess channels
	while(newchannelcount<ChannelCount())
	{
		//grab last channel
		CChannel* pChannel=GetChannel(ChannelCount()-1);
		//delete the channel
		delete pChannel;
		//remove last item in vector list
		s_ChannelList.pop_back();
	}
	//add new channels
	while(newchannelcount>ChannelCount())
	{
		//create a new channel
		CChannel* pChannel=new CChannel(ChannelCount());
		//add channel to list
		s_ChannelList.push_back(pChannel);
	}
	//return
	return(newchannelcount==channelcount);
}
コード例 #4
0
ファイル: dvbdescriptors.cpp プロジェクト: DocOnDev/mythtv
QString UKChannelListDescriptor::toString() const
{
    QString ret = "UKChannelListDescriptor sid->chan_num: ";
    for (uint i = 0; i < ChannelCount(); i++)
    {
        ret += QString("%1->%2").arg(ServiceID(i)).arg(ChannelNumber(i));
        ret += (i+1<ChannelCount()) ? ", " : "";
    }
    return ret;
}
コード例 #5
0
ファイル: atsctables.cpp プロジェクト: DocOnDev/mythtv
void VirtualChannelTable::Parse(void) const
{
    _ptrs.clear();
    _ptrs.push_back(const_cast<unsigned char*>(psipdata()) + 2);
    for (uint i = 0; i < ChannelCount(); i++)
        _ptrs.push_back(_ptrs[i] + 32 + DescriptorsLength(i));
}
コード例 #6
0
void
AudioBlock::AllocateChannels(uint32_t aChannelCount)
{
  MOZ_ASSERT(mDuration == WEBAUDIO_BLOCK_SIZE);

  if (mBufferIsDownstreamRef) {
    // This is not our buffer to re-use.
    ClearDownstreamMark();
  } else if (mBuffer && ChannelCount() == aChannelCount) {
    AudioBlockBuffer* buffer = mBuffer->AsAudioBlockBuffer();
    if (buffer && !buffer->HasLastingShares()) {
      MOZ_ASSERT(mBufferFormat == AUDIO_FORMAT_FLOAT32);
      // No need to allocate again.
      mVolume = 1.0f;
      return;
    }
  }

  // XXX for SIMD purposes we should do something here to make sure the
  // channel buffers are 16-byte aligned.
  nsRefPtr<AudioBlockBuffer> buffer = AudioBlockBuffer::Create(aChannelCount);
  mChannelData.SetLength(aChannelCount);
  for (uint32_t i = 0; i < aChannelCount; ++i) {
    mChannelData[i] = buffer->ChannelData(i);
  }
  mBuffer = buffer.forget();
  mVolume = 1.0f;
  mBufferFormat = AUDIO_FORMAT_FLOAT32;
}
コード例 #7
0
ファイル: atsctables.cpp プロジェクト: DaveDaCoda/mythtv
QString VirtualChannelTable::toStringXML(uint indent_level) const
{
    QString indent_0 = xml_indent(indent_level);
    QString indent_1 = xml_indent(indent_level + 1);
    QString indent_2 = xml_indent(indent_level + 2);

    QString section_name = QString("%1VirtualChannelSection")
        .arg((TableID::TVCT == TableID()) ? "Terrestrial" : "Cable");

    QString mapid;
    if (TableID::CVCT == TableID())
    {
        uint sctemapid = (pesdata()[3]<<8) | pesdata()[4];
        mapid = QString(" mapid=\"0x%1\"").arg(sctemapid,4,16,QChar('0'));
    }

    QString str =
        QString("%1<%2 tsid=\"0x%3\" channel_count=\"%4\""
                "\n%5global_descriptors_length=\"%6\"%7"
                "\n%8%9>\n")
        .arg(indent_0)
        .arg(section_name)
        .arg(TransportStreamID(),4,16,QChar('0'))
        .arg(ChannelCount())
        .arg(indent_1)
        .arg(GlobalDescriptorsLength())
        .arg(mapid)
        .arg(indent_1)
        .arg(PSIPTable::XMLValues(indent_level + 1));

    vector<const unsigned char*> gdesc =
        MPEGDescriptor::Parse(GlobalDescriptors(), GlobalDescriptorsLength());
    for (uint i = 0; i < gdesc.size(); i++)
    {
        str += MPEGDescriptor(gdesc[i], 300)
            .toStringXML(indent_level + 1) + "\n";
    }

    for (uint i = 0; i < ChannelCount(); i++)
        str += ChannelStringXML(indent_level + 1, i) + "\n";

    return str + indent_0 + QString("</%1>").arg(section_name);
}
コード例 #8
0
ファイル: Remap.cpp プロジェクト: svn2github/framewave-trunk
static FwStatus My_FW_Remap(const TS* pSrc, FwiSize srcSize, int srcStep, FwiRect srcRoi, 
					  const Fw32f *pxMap, int xMapStep, const Fw32f *pyMap, int yMapStep,
					  TS* pDst, int dstStep, FwiSize dstRoiSize, int interpolation)
{
	//Parameter checking
    if (pxMap==0 || pyMap==0) 
		return fwStsNullPtrErr;

	if (interpolation != FWI_INTER_LINEAR && interpolation != FWI_INTER_NN &&
		interpolation != FWI_INTER_CUBIC)	
		return fwStsInterpolationErr;
	
	int channel=ChannelCount(chSrc);
	FwStatus status = My_FW_ParaCheck<TS>(pSrc, srcSize, srcStep, srcRoi, pDst, dstStep,
		dstRoiSize, channel);
	if (status !=fwStsNoErr) return status;

	int x, y, flag=0;
	double xmap, ymap;
	
	//dstStep and srcStep are byte size
	//we need to change it with data array size
	dstStep = dstStep / (sizeof(TS));
	srcStep = srcStep / (sizeof(TS));
	xMapStep = xMapStep / (sizeof(Fw32f));
	yMapStep = yMapStep / (sizeof(Fw32f));

	int channel1;
	// Will not change 4th channel element in AC4
	if (chSrc == AC4) channel1=3;
	else channel1=channel;
	Fw32f round;
	// 32f is supported, but not 32u and 32s
	// No rounding is needed for 32f type
	if (sizeof(TS) == 4) round=0;
	else round=0.5;

	for (y=0; y<dstRoiSize.height; y++) {
		for (x=0; x<dstRoiSize.width; x++) {
			// potential variation
			// xmap=*(pxMap+x+y*xMapStep)+0.5;
			// ymap=*(pyMap+x+y*yMapStep)+0.5;
			xmap=*(pxMap+x+y*xMapStep);
			ymap=*(pyMap+x+y*yMapStep);
			
			// Remap function can map source point to any destination point. 
			// only general reference code is provided.
			My_FW_PointHandle<TS, disp> (xmap, ymap, x, y, pSrc, srcStep, srcRoi, 
				pDst, dstStep, interpolation, &flag, channel, channel1, round);			
		}
	}

	return fwStsNoErr;
}
コード例 #9
0
ファイル: atsctables.cpp プロジェクト: DocOnDev/mythtv
int VirtualChannelTable::Find(int major, int minor) const
{
    if (major>0)
    {
        for (uint i = 0; i < ChannelCount(); i++)
        {
            if ((MajorChannel(i) == (uint)major) &&
                (MinorChannel(i) == (uint)minor))
                return (int)i;
        }
    }
    else if (minor>0)
    {
        for (uint i = 0; i < ChannelCount(); i++)
        {
            if (MinorChannel(i) == (uint)minor)
                return (int)i;
        }
    }
    return -1;
}
コード例 #10
0
ファイル: atsctables.cpp プロジェクト: DaveDaCoda/mythtv
QString VirtualChannelTable::toString(void) const
{
    QString str;
    str.append(QString("%1 Virtual Channel Section\n%2"
                       "      channel_count(%3) tsid(0x%4)")
               .arg((TableID::TVCT == TableID()) ? "Terrestrial":"Cable")
               .arg(PSIPTable::toString())
               .arg(ChannelCount())
               .arg(TransportStreamID(),4,16,QChar('0')));

    if (TableID::CVCT == TableID())
    {
        uint sctemapid = (pesdata()[3]<<8) | pesdata()[4];
        str.append(QString(" mapid(0x%1)").arg(sctemapid,0,16));
    }

    str.append("\n");

    for (uint i = 0; i < ChannelCount(); i++)
        str.append(ChannelString(i));

    if (0 != GlobalDescriptorsLength())
    {
        str.append(QString("global descriptors length(%1) ")
                   .arg(GlobalDescriptorsLength()));
        vector<const unsigned char*> desc =
            MPEGDescriptor::Parse(GlobalDescriptors(),
                                  GlobalDescriptorsLength());
        str.append(QString("count: %1\n").arg(desc.size()));
        for (uint i = 0; i < desc.size(); i++)
        {
            str.append(QString(" %1\n")
                       .arg(MPEGDescriptor(desc[i]).toString()));
        }
    }

    return str;
}
コード例 #11
0
ファイル: atsctables.cpp プロジェクト: DocOnDev/mythtv
QString VirtualChannelTable::GetExtendedChannelName(uint i) const
{
    if ((i >= ChannelCount()) || DescriptorsLength(i) == 0)
        return QString::null;

    vector<const unsigned char*> parsed =
        MPEGDescriptor::Parse(Descriptors(i), DescriptorsLength(i));

    const unsigned char* desc =
        MPEGDescriptor::Find(parsed, DescriptorID::extended_channel_name);

    if (!desc)
        return QString::null;

    return ExtendedChannelNameDescriptor(desc).LongChannelNameString();
}
コード例 #12
0
    int AudioOutputDevice::RenderAudio(uint Samples) {
        if (Channels.empty()) return 0;

        // reset all channels with silence
        {
            std::vector<AudioChannel*>::iterator iterChannels = Channels.begin();
            std::vector<AudioChannel*>::iterator end          = Channels.end();
            for (; iterChannels != end; iterChannels++)
                (*iterChannels)->Clear(Samples); // zero out audio buffer
        }
        // do the same for master effects
        {
            std::vector<EffectChain*>::iterator iterChains = vEffectChains.begin();
            std::vector<EffectChain*>::iterator end        = vEffectChains.end();
            for (; iterChains != end; ++iterChains)
                (*iterChains)->ClearAllChannels(); // zero out audio buffers
        }

        int result = 0;

        // let all connected engines render audio for the current audio fragment cycle
        const std::set<Engine*>& engines = EnginesReader.Lock();
        #if CONFIG_RT_EXCEPTIONS
        try
        #endif // CONFIG_RT_EXCEPTIONS
        {
            std::set<Engine*>::iterator iterEngine = engines.begin();
            std::set<Engine*>::iterator end        = engines.end();
            for (; iterEngine != end; iterEngine++) {
                int res = (*iterEngine)->RenderAudio(Samples);
                if (res != 0) result = res;
            }
        }
        #if CONFIG_RT_EXCEPTIONS
        catch (std::runtime_error se) {
            std::cerr << "std::runtime_error: " << se.what() << std::endl << std::flush;
            exit(EXIT_FAILURE);
        }
        #endif // CONFIG_RT_EXCEPTIONS
        EnginesReader.Unlock();

        // now that the engines (might) have left fx send signals for master
        // effects, render all master effects
        {
            std::vector<EffectChain*>::iterator iterChains = vEffectChains.begin();
            std::vector<EffectChain*>::iterator end        = vEffectChains.end();
            for (; iterChains != end; ++iterChains) {
                if (!(*iterChains)->EffectCount()) continue;
                (*iterChains)->RenderAudio(Samples);
                // mix the result of the last effect in the chain to the audio
                // output device channel(s)
                Effect* pLastEffect =
                    (*iterChains)->GetEffect((*iterChains)->EffectCount() - 1);
                for (int iChan = 0; iChan < pLastEffect->OutputChannelCount() && iChan < ChannelCount(); ++iChan)
                    pLastEffect->OutputChannel(iChan)->MixTo(Channel(iChan), Samples);
            }
        }

        return result;
    }
コード例 #13
0
ファイル: Settings.cpp プロジェクト: elliottmc/MediaPortal-1
void AudioRendererSettings::LoadSettingsFromRegistry()
{
  USES_CONVERSION; // this is required for T2W macro
  
  Log("Loading settings from registry");

  HKEY hKey;
  char* lpData = new char[MAX_REG_LENGTH];

  // settings from Reclock - watch CPU usage when enabling these!
  /*bool usequickseek = false;
  bool useaafilter = false; //seems clearer without it
  int aafiltertaps = 56; //Def=32 doesnt matter coz its not used
  int seqms = 120; //reclock original is 82
  int seekwinms = 28; //reclock original is 28
  int overlapms = seekwinms; //reduces cutting sound if this is large
  int seqmslfe = 180; //larger value seems to preserve low frequencies better
  int seekwinmslfe = 42; //as percentage of seqms
  int overlapmslfe = seekwinmslfe; //reduces cutting sound if this is large
  */

  LPCTSTR WASAPIPreferredDeviceData = new TCHAR[MAX_REG_LENGTH];

  ZeroMemory((void*)WASAPIPreferredDeviceData, MAX_REG_LENGTH);

  // Try to access the setting root "Software\Team MediaPortal\Audio Renderer"
  RegOpenKeyEx(HKEY_CURRENT_USER, folder, NULL, KEY_ALL_ACCESS, &hKey);

  if (hKey)
  {
    ReadRegistryKeyDword(hKey, enableTimestretching, enableTimestretchingData);
    ReadRegistryKeyDword(hKey, WASAPIExclusive, WASAPIExclusiveData);
    ReadRegistryKeyDword(hKey, WASAPIUseEventMode, WASAPIUseEventModeData);
    ReadRegistryKeyDword(hKey, devicePeriod, devicePeriodData);
    ReadRegistryKeyDword(hKey, AC3Encoding, AC3EncodingData);
    ReadRegistryKeyDword(hKey, AC3bitrate, AC3bitrateData);
    ReadRegistryKeyDword(hKey, maxBias, maxBiasData);
    ReadRegistryKeyDword(hKey, minBias, minBiasData);
    ReadRegistryKeyDword(hKey, audioDelay, audioDelayData);
    ReadRegistryKeyDword(hKey, logSampleTimes, logSampleTimesData);
    ReadRegistryKeyDword(hKey, logDebug, logDebugData);
    ReadRegistryKeyDword(hKey, HWBasedRefClock, HWBasedRefClockData);
    ReadRegistryKeyDword(hKey, enableSyncAdjustment, enableSyncAdjustmentData);
    ReadRegistryKeyDword(hKey, forceSamplingRate, forceSamplingRateData);
    ReadRegistryKeyDword(hKey, forceBitDepth, forceBitDepthData);
    ReadRegistryKeyDword(hKey, resamplingQuality, resamplingQualityData);
    ReadRegistryKeyDword(hKey, speakerConfig, speakerConfigData);
    ReadRegistryKeyDword(hKey, forceChannelMixing, forceChannelMixingData);
    ReadRegistryKeyDword(hKey, releaseDeviceOnStop, releaseDeviceOnStopData);
    ReadRegistryKeyDword(hKey, expandMonoToStereo, expandMonoToStereoData);

    // SoundTouch quality settings
    ReadRegistryKeyDword(hKey, quality_USE_QUICKSEEK, quality_USE_QUICKSEEKData);
    ReadRegistryKeyDword(hKey, quality_USE_AA_FILTER, quality_USE_AA_FILTERData);
    ReadRegistryKeyDword(hKey, quality_AA_FILTER_LENGTH, quality_AA_FILTER_LENGTHData);
    ReadRegistryKeyDword(hKey, quality_SEQUENCE_MS, quality_SEQUENCE_MSData);
    ReadRegistryKeyDword(hKey, quality_SEEKWINDOW_MS, quality_SEEKWINDOW_MSData);
    ReadRegistryKeyDword(hKey, quality_OVERLAP_MS, quality_OVERLAP_MSData);

    ReadRegistryKeyString(hKey, WASAPIPreferredDevice, WASAPIPreferredDeviceData);

    Log("   EnableTimestrecthing:     %d", enableTimestretchingData);
    Log("   WASAPIExclusive:          %d", WASAPIExclusiveData);
    Log("   WASAPIUseEventMode:       %d", WASAPIUseEventModeData);
    Log("   AC3Encoding:              %d (0 = disabled, 1 = auto, 2 = forced)", AC3EncodingData);
    Log("   AC3bitrate:               %d", AC3bitrateData);
    Log("   MaxBias:                  %d", maxBiasData);
    Log("   MinBias:                  %d", minBiasData);
    Log("   AudioDelay:               %d", audioDelayData);
    Log("   LogSampleTimes:           %d", logSampleTimesData);
    Log("   LogDebug:                 %d", logDebugData);
    Log("   HWBasedRefClock:          %d", HWBasedRefClockData);
    Log("   EnableSyncAdjustment:     %d", enableSyncAdjustmentData);
    Log("   ForceSamplingRate:        %d", forceSamplingRateData);
    Log("   ForceBitDepth:            %d", forceBitDepthData);
    Log("   ResamplingQuality:        %s", ResamplingQualityAsString(resamplingQualityData));
    Log("   SpeakerConfig:            %d", speakerConfigData);
    Log("   ForceChannelMixing:       %d", forceChannelMixingData);
    Log("   ReleaseDeviceOnStop:      %d", releaseDeviceOnStopData);
    Log("   ExpandMonoToStereo:       %d", expandMonoToStereoData);
    Log("   quality_USE_QUICKSEEK:    %d", quality_USE_QUICKSEEKData);
    Log("   quality_USE_AA_FILTER:    %d", quality_USE_AA_FILTERData);
    Log("   quality_AA_FILTER_LENGTH: %d", quality_AA_FILTER_LENGTHData);
    Log("   quality_SEQUENCE_MS:      %d", quality_SEQUENCE_MSData);
    Log("   quality_SEEKWINDOW_MS:    %d", quality_SEEKWINDOW_MSData);
    Log("   quality_OVERLAP_MS:       %d", quality_OVERLAP_MSData);
    Log("   DevicePeriod:             %d (1 = minimal, 0 = driver default, other user defined)", devicePeriodData);
    Log("   WASAPIPreferredDevice:    %s", WASAPIPreferredDeviceData);

    m_bUseWASAPI = true;

    if (enableTimestretchingData > 0)
      m_bUseTimeStretching = true;
    else
      m_bUseTimeStretching = false;

    if (WASAPIExclusiveData > 0)
      m_WASAPIShareMode = AUDCLNT_SHAREMODE_EXCLUSIVE;
    else
      m_WASAPIShareMode = AUDCLNT_SHAREMODE_SHARED;

    if (WASAPIUseEventModeData > 0)
      m_bWASAPIUseEventMode = true;
    else
      m_bWASAPIUseEventMode = false;

    if (expandMonoToStereoData > 0)
      m_bExpandMonoToStereo = true;
    else
      m_bExpandMonoToStereo = false;

    if (AC3EncodingData == DISABLED || AC3EncodingData == AUTO || AC3EncodingData == FORCED)
      m_lAC3Encoding = AC3EncodingData;
    else
      m_lAC3Encoding = 0;

    m_dMaxBias = (double)maxBiasData / 10000.0;
    m_dMinBias = (double)minBiasData / 10000.0;

    m_lAudioDelay = audioDelayData;

    if (logSampleTimesData > 0)
      m_bLogSampleTimes = true;
    else
      m_bLogSampleTimes = false;
    
    if (logDebugData > 0)
      m_bLogDebug = true;
    else
      m_bLogDebug = false;

    if (HWBasedRefClockData > 0)
      m_bHWBasedRefClock = true;
    else
      m_bHWBasedRefClock = false;

    if (enableSyncAdjustmentData > 0)
      m_bEnableSyncAdjustment = true;
    else
      m_bEnableSyncAdjustment = false;

    bool AC3EncodingForced = AC3EncodingData == FORCED;
    bool sampleRateAllowed = AllowedValue(gAllowedSampleRates, sizeof(gAllowedSampleRates) / sizeof(int), forceSamplingRateData);
    bool bitDepthAllowed = AllowedValue(gAllowedBitDepths, sizeof(gAllowedBitDepths) / sizeof(int), forceBitDepthData);

    if (AC3EncodingForced)
    {
      if (sampleRateAllowed && (forceSamplingRateData != 48000 && forceSamplingRateData != 44100))
      {
        Log("   Warning: AC3 encoding forced and sampling rate set to non-matching!");
        sampleRateAllowed = false;
      }
    }
    else if (AC3EncodingData == AUTO && (forceSamplingRateData != 48000 && forceSamplingRateData != 44100 && forceSamplingRateData != 0))
      Log("   Warning: Using other than 48000 hz or 44100 hz sampling rates will disable AC3 encoding!");

    if (sampleRateAllowed || forceSamplingRateData == 0)
      m_nForceSamplingRate = forceSamplingRateData;
    else
    {
      m_nForceSamplingRate = 0;
      if (forceSamplingRateData != 0)
        Log("   invalid forced sample rate!");
    }

    if (bitDepthAllowed && !AC3EncodingForced || forceBitDepthData == 0)
      m_nForceBitDepth = forceBitDepthData;
    else
    {
      m_nForceBitDepth = 0;
      if (forceBitDepthData != 0)
        Log("   invalid forced bit depth!");
    }

    if (AllowedValue(gAllowedResamplingQualities, sizeof(gAllowedResamplingQualities) / sizeof(int), resamplingQualityData))
      m_nResamplingQuality = resamplingQualityData;
    else
    {
      m_nResamplingQuality = 4;
      Log("   invalid resampling quality setting, using 4 (SRC_LINEAR)");
    }

    if (AllowedValue(gAllowedAC3bitrates, sizeof(gAllowedAC3bitrates) / sizeof(int), AC3bitrateData))
      m_AC3bitrate = AC3bitrateData * 1000;
    else
    {
      m_AC3bitrate = DEFAULT_AC3_BITRATE;
      Log("   invalid AC3 bitrate, using 448");
    }

    m_hnsPeriod = devicePeriodData;

    if (forceChannelMixingData > 0)
      m_bForceChannelMixing = true;
    else
      m_bForceChannelMixing = false;

    // TODO validate channel mask
    if (speakerConfigData > 0)
    {
      m_lSpeakerConfig = speakerConfigData;
      m_lSpeakerCount = ChannelCount(m_lSpeakerConfig);

      if (AC3EncodingForced && m_lSpeakerCount > 6 && m_bForceChannelMixing)
      {
        m_lSpeakerConfig = KSAUDIO_SPEAKER_5POINT1_SURROUND;
        m_lSpeakerCount = 6;
        Log("   Warning: incompatible settings. ForceChannelMixing + AC3 encoding forced + more than 6 channels");
      }
    }
    else
      m_lSpeakerConfig = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT;

    if (releaseDeviceOnStopData > 0)
      m_bReleaseDeviceOnStop = true;
    else
      m_bReleaseDeviceOnStop = false;

    if (quality_USE_QUICKSEEKData > 0)
      m_bQuality_USE_QUICKSEEK = true;
    else
      m_bQuality_USE_QUICKSEEK = false;

    if (quality_USE_AA_FILTERData > 0)
      m_bQuality_USE_AA_FILTER = true;
    else
      m_bQuality_USE_AA_FILTER = false;

    delete[] m_wWASAPIPreferredDeviceId;
    m_wWASAPIPreferredDeviceId = new WCHAR[MAX_REG_LENGTH];
    
    wcsncpy(m_wWASAPIPreferredDeviceId, T2W(WASAPIPreferredDeviceData), MAX_REG_LENGTH);

    delete[] WASAPIPreferredDeviceData;
  }
  else // no settings in registry, create default values
  {
    Log("Failed to open %s", folder);
    Log("Initializing registry with default settings");

    LONG result = RegCreateKeyEx(HKEY_CURRENT_USER, folder, 0, NULL, REG_OPTION_NON_VOLATILE,
                                  KEY_ALL_ACCESS, NULL, &hKey, NULL);

    if (result == ERROR_SUCCESS) 
    {
      Log("Success creating master key");
      WriteRegistryKeyDword(hKey, enableTimestretching, enableTimestretchingData);
      WriteRegistryKeyDword(hKey, WASAPIExclusive, WASAPIExclusiveData);
      WriteRegistryKeyDword(hKey, WASAPIUseEventMode, WASAPIUseEventModeData);
      WriteRegistryKeyDword(hKey, devicePeriod, devicePeriodData);
      WriteRegistryKeyDword(hKey, AC3Encoding, AC3EncodingData);
      WriteRegistryKeyDword(hKey, AC3bitrate, AC3bitrateData);
      WriteRegistryKeyDword(hKey, maxBias, maxBiasData);
      WriteRegistryKeyDword(hKey, minBias, minBiasData);
      WriteRegistryKeyDword(hKey, audioDelay, audioDelayData);
      WriteRegistryKeyDword(hKey, logSampleTimes, logSampleTimesData);
      WriteRegistryKeyDword(hKey, logDebug, logDebugData);
      WriteRegistryKeyDword(hKey, HWBasedRefClock, HWBasedRefClockData);
      WriteRegistryKeyDword(hKey, enableSyncAdjustment, enableSyncAdjustmentData);
      WriteRegistryKeyDword(hKey, forceSamplingRate, forceSamplingRateData);
      WriteRegistryKeyDword(hKey, forceBitDepth, forceBitDepthData);
      WriteRegistryKeyDword(hKey, resamplingQuality, resamplingQualityData);
      WriteRegistryKeyDword(hKey, speakerConfig, speakerConfigData);
      WriteRegistryKeyDword(hKey, forceChannelMixing, forceChannelMixingData);
      WriteRegistryKeyDword(hKey, releaseDeviceOnStop, releaseDeviceOnStopData);
      WriteRegistryKeyDword(hKey, expandMonoToStereo, expandMonoToStereoData);
      WriteRegistryKeyDword(hKey, quality_USE_QUICKSEEK, quality_USE_QUICKSEEKData);
      WriteRegistryKeyDword(hKey, quality_USE_AA_FILTER, quality_USE_AA_FILTERData);
      WriteRegistryKeyDword(hKey, quality_AA_FILTER_LENGTH, quality_AA_FILTER_LENGTHData);
      WriteRegistryKeyDword(hKey, quality_SEQUENCE_MS, quality_SEQUENCE_MSData);
      WriteRegistryKeyDword(hKey, quality_SEEKWINDOW_MS, quality_SEEKWINDOW_MSData);
      WriteRegistryKeyDword(hKey, quality_OVERLAP_MS, quality_OVERLAP_MSData);

      delete[] m_wWASAPIPreferredDeviceId;
      m_wWASAPIPreferredDeviceId = new WCHAR[MAX_REG_LENGTH];
      wcsncpy(m_wWASAPIPreferredDeviceId, T2W(WASAPIPreferredDeviceData), MAX_REG_LENGTH);

      WriteRegistryKeyString(hKey, WASAPIPreferredDevice, WASAPIPreferredDeviceData);
    } 
    else 
      Log("Error creating master key %d", result);
  }
  
  delete[] lpData;
  RegCloseKey (hKey);
}