//if a file is dropped onto the playlist from explorer window or from any other data source, we process the file //and add it to the playlist. Open all subfolders and add process files in them too bool FileDropTarget::OnDropFiles(wxCoord x, wxCoord y, const wxArrayString &files) { wxArrayString dirFiles; wxDir myDir; int unsupportedFiles = 0; for (size_t i = 0; i < files.GetCount(); ++i){ if(myDir.Exists(files[i])){ myDir.Open(files[i]); size_t itemCount = myDir.GetAllFiles(files[i], &dirFiles); for(size_t n = 0; n < itemCount; n++){ if(IsFormatSupported(dirFiles[n])){ m_list.AddToPlayList(dirFiles[n]); }else{ unsupportedFiles++; } } } else{ if(IsFormatSupported(files[i])){ m_list.AddToPlayList(files[i]); } else { unsupportedFiles++; } } } if(unsupportedFiles != 0) wxMessageBox(wxString::Format("%d Files couldn't be added\nReason: Unsupported Format",unsupportedFiles)); return true; }
HRESULT __stdcall CMyDataObject::GetData(FORMATETC *pFormatEtc, STGMEDIUM *pStgMedium) { if (!IsFormatSupported(pFormatEtc)) return DV_E_FORMATETC; // Copy the storage medium data. pStgMedium->tymed = m_StgMedium.tymed; pStgMedium->pUnkForRelease = 0; pStgMedium->hGlobal = GlobalAlloc(GMEM_SHARE,sizeof(DROPFILES)+(TotalLength+1)*sizeof(TCHAR) ); // ‘ормирование структуры DROPFILES дл¤ драгндропа файлов DROPFILES *DP = (LPDROPFILES) GlobalLock(pStgMedium->hGlobal); ZeroMemory(DP, sizeof(DROPFILES)); DP->fWide = TRUE; DP->pFiles = sizeof(DROPFILES); LPTSTR Files = (LPTSTR)((PBYTE)DP+ DP->pFiles); for(size_t i =0; i<m_FileItems.GetCount(); i++) { lstrcpy(Files, m_FileItems[i]); Files += m_FileItems[i].GetLength()+1; } *Files=0; GlobalUnlock(DP); return S_OK; }
PVMFStatus PVMFAvcEncPort::SetFormat(PVMFFormatType aFormat) { LOG_STACK_TRACE((0, "PVMFAvcEncPort::SetFormat: aFormat=%d", aFormat)); if (!IsFormatSupported(aFormat)) return PVMFFailure; iFormat = aFormat; return PVMFSuccess; }
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OSStatus OALBuffer::AddAudioDataStatic(char* inAudioData, UInt32 inAudioDataSize, ALenum format, ALsizei freq) { #if LOG_VERBOSE DebugMessageN5("OALBuffer::AddAudioDataStatic() - OALBuffer:inAudioData:inAudioDataSize:format:freq = %ld:%p:%ld:%d:%d", (long int) mSelfToken, inAudioData, (long int) inAudioDataSize, format, freq); #endif #if LOG_EXTRAS DebugMessage("AddAudioDataStatic called: Converting Data Now"); #endif CAGuard::Locker bufferLock(mBufferLock); try { if (!IsFormatSupported(format)) throw ((OSStatus) AL_INVALID_VALUE); // this is not a valid buffer token or is an invalid format // don't allow if the buffer is in a queue if (mAttachedSourceList->Size() > 0) { DebugMessage("AddAudioDataStatic ATTACHMENT > 0"); throw ((OSStatus) AL_INVALID_OPERATION); } mPreConvertedDataSize = (UInt32) inAudioDataSize; OSStatus result = noErr; // if this buffer was using memory created by the library, free it now and initialize mData if (!mAppOwnsBufferMemory && (mData != NULL)) { free (mData); mData = NULL; } mData = (UInt8*) inAudioData; mDataSize = (UInt32) inAudioDataSize; result = FillInASBD(mDataFormat, format, freq); THROW_RESULT mPreConvertedDataFormat.SetFrom(mDataFormat); // make sure they are the same so original format info can be returned to caller } catch (OSStatus result) { mData = NULL; mAppOwnsBufferMemory = false; DebugMessageN1("AddAudioDataStatic Failed - err = %ld\n", (long int) result); alSetError(result); } catch (...) { mData = NULL; mAppOwnsBufferMemory = false; DebugMessage("AddAudioDataStatic Failed"); alSetError(AL_INVALID_OPERATION); } mAppOwnsBufferMemory = true; return noErr; }
STDMETHODIMP CAVIOutputPin::SetTimeFormat(const GUID * pFormat) { if (IsFormatSupported(pFormat) != S_OK) return E_FAIL; if (m_currentTimeFormat != *pFormat) m_pSplitter->SetTimeFormatForAllPins(pFormat); return S_OK; }
void FVulkanDevice::MapFormatSupport(EPixelFormat UEFormat, VkFormat VulkanFormat) { FPixelFormatInfo& FormatInfo = GPixelFormats[UEFormat]; FormatInfo.PlatformFormat = VulkanFormat; FormatInfo.Supported = IsFormatSupported(VulkanFormat); if(!FormatInfo.Supported) { UE_LOG(LogVulkanRHI, Warning, TEXT("EPixelFormat(%d) is not supported"), (int32)UEFormat); } }
HRESULT __stdcall CMyDataObject::QueryGetData(FORMATETC *pFormatEtc) { return IsFormatSupported(pFormatEtc) ? S_OK : DV_E_FORMATETC; }
STDMETHODIMP CStreamSwitcherPassThru::IsFormatSupported(const GUID* pFormat) { CallPeerSeeking(IsFormatSupported(pFormat)); }
STDMETHODIMP CAMRSplitter::IsUsingTimeFormat(const GUID* pFormat) {return IsFormatSupported(pFormat);}
STDMETHODIMP CAMRSplitter::SetTimeFormat(const GUID* pFormat) {return S_OK == IsFormatSupported(pFormat) ? S_OK : E_INVALIDARG;}
HRESULT CWASAPIRenderFilter::InitAudioClient() { Log("WASAPIRenderFilter::InitAudioClient"); HRESULT hr = S_OK; if (m_pSettings->m_hnsPeriod == 0 || m_pSettings->m_hnsPeriod == 1) { REFERENCE_TIME defaultPeriod(0); REFERENCE_TIME minimumPeriod(0); hr = m_pAudioClient->GetDevicePeriod(&defaultPeriod, &minimumPeriod); if (SUCCEEDED(hr)) { if (m_pSettings->m_hnsPeriod == 0) m_pSettings->m_hnsPeriod = defaultPeriod; else m_pSettings->m_hnsPeriod = minimumPeriod; Log("WASAPIRenderFilter::InitAudioClient using device period from driver %I64u ms", m_pSettings->m_hnsPeriod / 10000); } else { Log("WASAPIRenderFilter::InitAudioClient failed to get device period from driver (0x%08x) - using 50 ms", hr); m_pSettings->m_hnsPeriod = 500000; //50 ms is the best according to James @Slysoft } } WAVEFORMATEXTENSIBLE* pwfxAccepted = NULL; hr = IsFormatSupported(m_pInputFormat, &pwfxAccepted); if (FAILED(hr)) { SAFE_DELETE_WAVEFORMATEX(pwfxAccepted); return hr; } GetBufferSize((WAVEFORMATEX*)pwfxAccepted, &m_pSettings->m_hnsPeriod); if (SUCCEEDED(hr)) hr = m_pAudioClient->Initialize(m_pSettings->m_WASAPIShareMode, m_dwStreamFlags, m_pSettings->m_hnsPeriod, m_pSettings->m_hnsPeriod, (WAVEFORMATEX*)pwfxAccepted, NULL); if (FAILED(hr) && hr != AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) { Log("WASAPIRenderFilter::InitAudioClient Initialize failed (0x%08x)", hr); SAFE_DELETE_WAVEFORMATEX(pwfxAccepted); return hr; } if (hr == S_OK) { SAFE_RELEASE(m_pAudioClock); hr = m_pAudioClient->GetService(__uuidof(IAudioClock), (void**)&m_pAudioClock); if (SUCCEEDED(hr)) m_pAudioClock->GetFrequency(&m_nHWfreq); else Log("WASAPIRenderFilter::IAudioClock not found!"); } if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) { // if the buffer size was not aligned, need to do the alignment dance Log("WASAPIRenderFilter::InitAudioClient Buffer size not aligned. Realigning"); // get the buffer size, which will be aligned hr = m_pAudioClient->GetBufferSize(&m_nFramesInBuffer); // throw away this IAudioClient SAFE_RELEASE(m_pAudioClient); // calculate the new aligned periodicity m_pSettings->m_hnsPeriod = // hns = (REFERENCE_TIME)( 10000.0 * // (hns / ms) * 1000 * // (ms / s) * m_nFramesInBuffer / // frames / m_pInputFormat->Format.nSamplesPerSec // (frames / s) + 0.5 // rounding ); if (SUCCEEDED(hr)) hr = CreateAudioClient(); Log("WASAPIRenderFilter::InitAudioClient Trying again with periodicity of %I64u hundred-nanoseconds, or %u frames", m_pSettings->m_hnsPeriod, m_nFramesInBuffer); if (SUCCEEDED (hr)) hr = m_pAudioClient->Initialize(m_pSettings->m_WASAPIShareMode, m_dwStreamFlags, m_pSettings->m_hnsPeriod, m_pSettings->m_hnsPeriod, (WAVEFORMATEX*)pwfxAccepted, NULL); if (FAILED(hr)) { Log("WASAPIRenderFilter::InitAudioClient Failed to reinitialize the audio client"); SAFE_DELETE_WAVEFORMATEX(pwfxAccepted); return hr; } else { SAFE_RELEASE(m_pAudioClock); hr = m_pAudioClient->GetService(__uuidof(IAudioClock), (void**)&m_pAudioClock); if (FAILED(hr)) Log("WASAPIRenderFilter::IAudioClock not found!"); else m_pAudioClock->GetFrequency(&m_nHWfreq); } } // if (AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED == hr) // get the buffer size, which is aligned if (SUCCEEDED(hr)) hr = m_pAudioClient->GetBufferSize(&m_nFramesInBuffer); // calculate the new period if (SUCCEEDED (hr)) hr = m_pAudioClient->GetService(__uuidof(IAudioRenderClient), (void**)(&m_pRenderClient)); if (FAILED(hr)) Log("WASAPIRenderFilter::InitAudioClient service initialization failed (0x%08x)", hr); else Log("WASAPIRenderer::InitAudioClient service initialization success"); if (m_pSettings->m_bWASAPIUseEventMode) { hr = m_pAudioClient->SetEventHandle(m_hDataEvent); if (FAILED(hr)) { Log("WASAPIRenderFilter::InitAudioClient SetEventHandle failed (0x%08x)", hr); SAFE_DELETE_WAVEFORMATEX(pwfxAccepted); return hr; } } REFERENCE_TIME latency(0); m_pAudioClient->GetStreamLatency(&latency); Log("WASAPIRenderFilter::InitAudioClient device reported latency %I64u ms - buffer based latency %I64u ms", latency / 10000, Latency() / 10000); // Dynamic format change requires restart for the audio client if (m_state != StateStopped) StartAudioClient(); m_bDeviceInitialized = true; SAFE_DELETE_WAVEFORMATEX(pwfxAccepted); return hr; }
STDMETHODIMP CStreamDriveThruFilter::SetTimeFormat(const GUID* pFormat) { return S_OK == IsFormatSupported(pFormat) ? S_OK : E_INVALIDARG; }
STDMETHODIMP CBaseGraph::SetTimeFormat(const GUID* pFormat) { return S_OK == IsFormatSupported(pFormat) ? S_OK : E_INVALIDARG; }
STDMETHODIMP CBaseMuxerFilter::IsUsingTimeFormat(const GUID* pFormat) { return IsFormatSupported(pFormat); }
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ALUTAPI ALvoid ALUTAPIENTRY alutLoadWAVFile(ALbyte *file,ALenum *format,ALvoid **data,ALsizei *size,ALsizei *freq) { OSStatus err = noErr; AudioFileID audioFile = 0; FSRef fsRef; *data = NULL; // in case of failure, do not return some unitialized value as a bogus address if (IsRelativePath(file)) { char absolutePath[256]; // we need to make a full path here so FSPathMakeRef() works properly MakeAbsolutePath(file, absolutePath, 256); // create an fsref from the file parameter err = FSPathMakeRef ((const UInt8 *) absolutePath, &fsRef, NULL); } else err = FSPathMakeRef ((const UInt8 *) file, &fsRef, NULL); if (err == noErr) { err = AudioFileOpen(&fsRef, fsRdPerm, 0, &audioFile); if (err == noErr) { UInt32 dataSize; CAStreamBasicDescription asbd; dataSize = sizeof(CAStreamBasicDescription); AudioFileGetProperty(audioFile, kAudioFilePropertyDataFormat, &dataSize, &asbd); *format = GetOALFormatFromASBD(asbd); if (IsFormatSupported(*format)) { *freq = (UInt32) asbd.mSampleRate; SInt64 audioDataSize = 0; dataSize = sizeof(audioDataSize); err = AudioFileGetProperty(audioFile, kAudioFilePropertyAudioDataByteCount, &dataSize, &audioDataSize); if (err == noErr) { *size = audioDataSize; *data = NULL; *data = calloc(1, audioDataSize); if (*data) { dataSize = audioDataSize; err = AudioFileReadBytes(audioFile, false, 0, &dataSize, *data); if ((asbd.mFormatID == kAudioFormatLinearPCM) && (asbd.mBitsPerChannel > 8)) { // we just got 16 bit pcm data out of a WAVE file on a big endian platform, so endian swap the data AudioConverterRef converter; CAStreamBasicDescription outFormat = asbd; void * tempData = NULL; // ste format to big endian outFormat.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; // make some place for converted data tempData = calloc(1 , audioDataSize); err = AudioConverterNew(&asbd, &outFormat, &converter); if ((err == noErr) && (tempData != NULL)) { UInt32 bufferSize = audioDataSize; err = AudioConverterConvertBuffer(converter, audioDataSize, *data, &bufferSize, tempData); if (err == noErr) memcpy(*data, tempData, audioDataSize); AudioConverterDispose(converter); } if (tempData) free (tempData); } } } } err = AudioFileClose(audioFile); } } }
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OSStatus OALBuffer::AddAudioData(char* inAudioData, UInt32 inAudioDataSize, ALenum format, ALsizei freq, bool inPreConvertToHalFormat) { #if LOG_VERBOSE DebugMessageN6("OALBuffer::AddAudioData() - OALBuffer:inAudioData:inAudioDataSize:format:freq:inPreConvertToHalFormat = %ld:%p:%ld:%d:%d:%d", (long int) mSelfToken, inAudioData, (long int) inAudioDataSize, format, freq, inPreConvertToHalFormat); #endif // creates memory if needed // reallocs if needed // returns an error if buffer is in use CAGuard::Locker bufferLock(mBufferLock); try { if (!IsFormatSupported(format)) throw ((OSStatus) AL_INVALID_VALUE); // this is not a valid buffer token or is an invalid format #if USE_SOURCE_LIST_MUTEX bool wasLocked = mSourceListGuard.Lock(); #endif // don't allow if the buffer is in a queue UInt32 attachedCount = mAttachedSourceList->Size(); #if USE_SOURCE_LIST_MUTEX if (wasLocked) mSourceListGuard.Unlock(); #endif if (attachedCount > 0) { DebugMessage("WAITING: AddAudioData ---> WaitOneRenderCycle"); // Let a render cycle go by and try again WaitOneRenderCycle(); #if USE_SOURCE_LIST_MUTEX wasLocked = mSourceListGuard.Lock(); #endif attachedCount = mAttachedSourceList->Size(); #if USE_SOURCE_LIST_MUTEX if (wasLocked) mSourceListGuard.Unlock(); #endif if (attachedCount > 0){ DebugMessageN2("OALBuffer::AddAudioData: buffer ATTACHMENT > 0 - mSelfToken:mAttachedSourceList->Size() = %ld:%ld", (long int) mSelfToken, (long int) mAttachedSourceList->Size()); throw ((OSStatus) AL_INVALID_OPERATION); } } if (mAppOwnsBufferMemory) { mData = NULL; // we were using the apps memory before so just initialize mData incase we fail mAppOwnsBufferMemory = false; } mPreConvertedDataSize = (UInt32) inAudioDataSize; // do not pre-convert stereo sounds, let the AC do the deinterleaving OSStatus result = noErr; if (!inPreConvertToHalFormat || ((format == AL_FORMAT_STEREO16) || (format == AL_FORMAT_STEREO8))) { if (mData != NULL) { if (mDataSize != (UInt32) inAudioDataSize) { mDataSize = (UInt32) inAudioDataSize; void *newDataPtr = realloc(mData, mDataSize); mData = (UInt8 *) newDataPtr; } } else { mDataSize = (UInt32) inAudioDataSize; mData = (UInt8 *) malloc (mDataSize); } if (mData) { result = FillInASBD(mDataFormat, format, freq); THROW_RESULT mPreConvertedDataFormat.SetFrom(mDataFormat); // make sure they are the same so original format info can be returned to caller memcpy (mData, inAudioData, mDataSize); } } else { #if LOG_EXTRAS DebugMessage("alBufferData called: Converting Data Now"); #endif result = ConvertDataForBuffer(inAudioData, inAudioDataSize, format, freq); // convert the data to the mixer's format and copy to the buffer THROW_RESULT } } catch (OSStatus result) { DebugMessageN1("OALBuffer::AddAudioData Failed - err = %ld\n", (long int) result); alSetError(result); throw result; } catch (...) { DebugMessage("OALBuffer::AddAudioData Failed"); alSetError(AL_INVALID_OPERATION); throw -1; } return noErr; }
// Format negotiation HRESULT CWASAPIRenderFilter::NegotiateFormat(const WAVEFORMATEXTENSIBLE* pwfx, int nApplyChangesDepth, ChannelOrder* pChOrder) { if (!pwfx) return VFW_E_TYPE_NOT_ACCEPTED; if (FormatsEqual(pwfx, m_pInputFormat)) { *pChOrder = m_chOrder; return S_OK; } bool bApplyChanges = nApplyChangesDepth != 0; bool bitDepthForced = (m_pSettings->m_nForceBitDepth != 0 && m_pSettings->m_nForceBitDepth != pwfx->Format.wBitsPerSample); bool sampleRateForced = (m_pSettings->m_nForceSamplingRate != 0 && m_pSettings->m_nForceSamplingRate != pwfx->Format.nSamplesPerSec); if ((bitDepthForced || sampleRateForced) && pwfx->SubFormat == KSDATAFORMAT_SUBTYPE_IEC61937_DOLBY_DIGITAL || pwfx->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT && bitDepthForced) return VFW_E_TYPE_NOT_ACCEPTED; if (((bitDepthForced && m_pSettings->m_nForceBitDepth != pwfx->Format.wBitsPerSample) || (sampleRateForced && m_pSettings->m_nForceSamplingRate != pwfx->Format.nSamplesPerSec))) return VFW_E_TYPE_NOT_ACCEPTED; CAutoLock lock(&m_csResources); HRESULT hr = CreateAudioClient(); if (FAILED(hr)) { Log("CWASAPIRenderFilter::NegotiateFormat Error, audio client not initialized: (0x%08x)", hr); return VFW_E_CANNOT_CONNECT; } WAVEFORMATEXTENSIBLE* pwfxAccepted = NULL; hr = IsFormatSupported(pwfx, &pwfxAccepted); if (FAILED(hr)) { SAFE_DELETE_WAVEFORMATEX(pwfxAccepted); return hr; } if (bApplyChanges) { LogWaveFormat(pwfx, "REN - applying "); // Stop and discard audio client StopAudioClient(); SAFE_RELEASE(m_pRenderClient); SAFE_RELEASE(m_pAudioClock); SAFE_RELEASE(m_pAudioClient); // We must use incoming format so the WAVEFORMATEXTENSIBLE to WAVEFORMATEXT difference // that some audio drivers require is not causing an infonite loop of format changes SetInputFormat(pwfx); // Reinitialize audio client hr = CreateAudioClient(true); } else LogWaveFormat(pwfx, "Input format "); m_chOrder = *pChOrder = DS_ORDER; SAFE_DELETE_WAVEFORMATEX(pwfxAccepted); return hr; }
STDMETHODIMP CBaseGraph::IsUsingTimeFormat(const GUID* pFormat) { return IsFormatSupported(pFormat); }
STDMETHODIMP StaticSourceFilter::IsUsingTimeFormat(const GUID * pFormat) { return IsFormatSupported(pFormat); }
bool SoundBuffer::Create(AudioFormat format, unsigned int sampleCount, unsigned int sampleRate, const Int16* samples) { Destroy(); #if NAZARA_AUDIO_SAFE if (!IsFormatSupported(format)) { NazaraError("Audio format is not supported"); return false; } if (sampleCount == 0) { NazaraError("Sample rate must be different from zero"); return false; } if (sampleRate == 0) { NazaraError("Sample rate must be different from zero"); return false; } if (!samples) { NazaraError("Invalid sample source"); return false; } #endif // On vide le stack d'erreurs while (alGetError() != AL_NO_ERROR); ALuint buffer; alGenBuffers(1, &buffer); if (alGetError() != AL_NO_ERROR) { NazaraError("Failed to create OpenAL buffer"); return false; } alBufferData(buffer, OpenAL::AudioFormat[format], samples, sampleCount*sizeof(Int16), sampleRate); if (alGetError() != AL_NO_ERROR) { alDeleteBuffers(1, &buffer); NazaraError("Failed to set OpenAL buffer"); return false; } m_impl = new SoundBufferImpl; m_impl->buffer = buffer; m_impl->duration = (1000*sampleCount / (format * sampleRate)); m_impl->format = format; m_impl->sampleCount = sampleCount; m_impl->sampleRate = sampleRate; m_impl->samples.reset(new Int16[sampleCount]); std::memcpy(&m_impl->samples[0], samples, sampleCount*sizeof(Int16)); return true; }
STDMETHODIMP CStreamDriveThruFilter::IsUsingTimeFormat(const GUID* pFormat) { return IsFormatSupported(pFormat); }