Texture* MemoryCapture::LockTexture() { LPVOID address = NULL; if(!bInitialized || !copyData || !texture) return NULL; OSEnterMutex(hMemoryMutex); curTexture = copyData->lastRendered; if(curTexture < 2) { DWORD nextTexture = (curTexture == 1) ? 0 : 1; if(WaitForSingleObject(textureMutexes[curTexture], 0) == WAIT_OBJECT_0) hMutex = textureMutexes[curTexture]; else if(WaitForSingleObject(textureMutexes[nextTexture], 0) == WAIT_OBJECT_0) { hMutex = textureMutexes[nextTexture]; curTexture = nextTexture; } if(hMutex) { BYTE *lpData; UINT texPitch; if(texture->Map(lpData, texPitch)) { if(pitch == texPitch) SSECopy(lpData, textureBuffers[curTexture], pitch*height); else { UINT bestPitch = MIN(pitch, texPitch); LPBYTE input = textureBuffers[curTexture]; for(UINT y=0; y<height; y++) { LPBYTE curInput = ((LPBYTE)input) + (pitch*y); LPBYTE curOutput = ((LPBYTE)lpData) + (texPitch*y); SSECopy(curOutput, curInput, bestPitch); } } texture->Unmap(); } ReleaseMutex(hMutex); } hMutex = NULL; } OSLeaveMutex(hMemoryMutex); return texture; }
void MMDeviceAudioSource::ReleaseBuffer() { UINT sampleSizeFloats = sampleWindowSize*GetChannelCount(); if (inputBufferSize > sampleSizeFloats) SSECopy(inputBuffer.Array(), inputBuffer.Array()+sampleSizeFloats, (inputBufferSize-sampleSizeFloats)*sizeof(float)); inputBufferSize -= sampleSizeFloats; }
void DeviceSource::ReceiveMediaSample(IMediaSample *sample, bool bAudio) { if (!sample) return; if (bCapturing) { BYTE *pointer; if (!sample->GetActualDataLength()) return; if (SUCCEEDED(sample->GetPointer(&pointer))) { SampleData *data = NULL; if (bUseBuffering || !bAudio) { data = new SampleData; data->bAudio = bAudio; data->dataLength = sample->GetActualDataLength(); data->lpData = (LPBYTE)Allocate(data->dataLength);//pointer; // /*data->sample = sample; sample->AddRef();*/ SSECopy(data->lpData, pointer, data->dataLength); LONGLONG stopTime; sample->GetTime(&data->timestamp, &stopTime); } //Log(TEXT("timestamp: %lld, bAudio - %s"), data->timestamp, bAudio ? TEXT("true") : TEXT("false")); OSEnterMutex(hSampleMutex); if (bUseBuffering) { UINT id = GetSampleInsertIndex(data->timestamp); samples.Insert(id, data); } else if (bAudio) { if (audioOut) audioOut->ReceiveAudio(pointer, sample->GetActualDataLength()); } else { SafeRelease(latestVideoSample); latestVideoSample = data; } OSLeaveMutex(hSampleMutex); } } }
UINT AudioSource::QueryAudio(float curVolume) { LPVOID buffer; UINT numAudioFrames; QWORD newTimestamp; if(GetNextBuffer((void**)&buffer, &numAudioFrames, &newTimestamp)) { //------------------------------------------------------------ // convert to float float *captureBuffer; if(!bFloat) { UINT totalSamples = numAudioFrames*inputChannels; if(convertBuffer.Num() < totalSamples) convertBuffer.SetSize(totalSamples); if(inputBitsPerSample == 8) { float *tempConvert = convertBuffer.Array(); char *tempSByte = (char*)buffer; while(totalSamples--) { *(tempConvert++) = float(*(tempSByte++))/127.0f; } } else if(inputBitsPerSample == 16) { float *tempConvert = convertBuffer.Array(); short *tempShort = (short*)buffer; while(totalSamples--) { *(tempConvert++) = float(*(tempShort++))/32767.0f; } } else if(inputBitsPerSample == 24) { float *tempConvert = convertBuffer.Array(); BYTE *tempTriple = (BYTE*)buffer; TripleToLong valOut; while(totalSamples--) { TripleToLong &valIn = (TripleToLong&)tempTriple; valOut.wVal = valIn.wVal; valOut.tripleVal = valIn.tripleVal; if(valOut.tripleVal > 0x7F) valOut.lastByte = 0xFF; *(tempConvert++) = float(double(valOut.val)/8388607.0); tempTriple += 3; } } else if(inputBitsPerSample == 32) { float *tempConvert = convertBuffer.Array(); long *tempShort = (long*)buffer; while(totalSamples--) { *(tempConvert++) = float(double(*(tempShort++))/2147483647.0); } } captureBuffer = convertBuffer.Array(); } else captureBuffer = (float*)buffer; //------------------------------------------------------------ // channel upmix/downmix if(tempBuffer.Num() < numAudioFrames*2) tempBuffer.SetSize(numAudioFrames*2); float *dataOutputBuffer = tempBuffer.Array(); float *tempOut = dataOutputBuffer; if(inputChannels == 1) { UINT numFloats = numAudioFrames; float *inputTemp = (float*)captureBuffer; float *outputTemp = dataOutputBuffer; if((UPARAM(inputTemp) & 0xF) == 0 && (UPARAM(outputTemp) & 0xF) == 0) { UINT alignedFloats = numFloats & 0xFFFFFFFC; for(UINT i=0; i<alignedFloats; i += 4) { __m128 inVal = _mm_load_ps(inputTemp+i); __m128 outVal1 = _mm_unpacklo_ps(inVal, inVal); __m128 outVal2 = _mm_unpackhi_ps(inVal, inVal); _mm_store_ps(outputTemp+(i*2), outVal1); _mm_store_ps(outputTemp+(i*2)+4, outVal2); } numFloats -= alignedFloats; inputTemp += alignedFloats; outputTemp += alignedFloats*2; } while(numFloats--) { float inputVal = *inputTemp; *(outputTemp++) = inputVal; *(outputTemp++) = inputVal; inputTemp++; } } else if(inputChannels == 2) //straight up copy { SSECopy(dataOutputBuffer, captureBuffer, numAudioFrames*2*sizeof(float)); } else { //todo: downmix optimization, also support for other speaker configurations than ones I can merely "think" of. ugh. float *inputTemp = (float*)captureBuffer; float *outputTemp = dataOutputBuffer; if(inputChannelMask == KSAUDIO_SPEAKER_QUAD) { UINT numFloats = numAudioFrames*4; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float rearLeft = inputTemp[2]*surroundMix4; float rearRight = inputTemp[3]*surroundMix4; // When in doubt, use only left and right .... and rear left and rear right :) // Same idea as with 5.1 downmix *(outputTemp++) = (left + rearLeft) * attn4dotX; *(outputTemp++) = (right + rearRight) * attn4dotX; inputTemp += 4; } } else if(inputChannelMask == KSAUDIO_SPEAKER_2POINT1) { UINT numFloats = numAudioFrames*3; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; // Drop LFE since we don't need it //float lfe = inputTemp[2]*lowFreqMix; *(outputTemp++) = left; *(outputTemp++) = right; inputTemp += 3; } } else if(inputChannelMask == KSAUDIO_SPEAKER_4POINT1) { UINT numFloats = numAudioFrames*5; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; // Skip LFE , we don't really need it. //float lfe = inputTemp[2]; float rearLeft = inputTemp[3]*surroundMix4; float rearRight = inputTemp[4]*surroundMix4; // Same idea as with 5.1 downmix *(outputTemp++) = (left + rearLeft) * attn4dotX; *(outputTemp++) = (right + rearRight) * attn4dotX; inputTemp += 5; } } else if(inputChannelMask == KSAUDIO_SPEAKER_SURROUND) { UINT numFloats = numAudioFrames*4; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float frontCenter = inputTemp[2]; float rearCenter = inputTemp[3]; // When in doubt, use only left and right :) Seriously. // THIS NEEDS TO BE PROPERLY IMPLEMENTED! *(outputTemp++) = left; *(outputTemp++) = right; inputTemp += 4; } } // Both speakers configs share the same format, the difference is in rear speakers position // See: http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85).aspx // Probably for KSAUDIO_SPEAKER_5POINT1_SURROUND we will need a different coefficient for rear left/right else if(inputChannelMask == KSAUDIO_SPEAKER_5POINT1 || inputChannelMask == KSAUDIO_SPEAKER_5POINT1_SURROUND) { UINT numFloats = numAudioFrames*6; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float center = inputTemp[2]*centerMix; //We don't need LFE channel so skip it (see below) //float lowFreq = inputTemp[3]*lowFreqMix; float rearLeft = inputTemp[4]*surroundMix; float rearRight = inputTemp[5]*surroundMix; // According to ITU-R BS.775-1 recommendation, the downmix from a 3/2 source to stereo // is the following: // L = FL + k0*C + k1*RL // R = FR + k0*C + k1*RR // FL = front left // FR = front right // C = center // RL = rear left // RR = rear right // k0 = centerMix = dbMinus3 = 0.7071067811865476 [for k0 we can use dbMinus6 = 0.5 too, probably it's better] // k1 = surroundMix = dbMinus3 = 0.7071067811865476 // The output (L,R) can be out of (-1,1) domain so we attenuate it [ attn5dot1 = 1/(1 + centerMix + surroundMix) ] // Note: this method of downmixing is far from "perfect" (pretty sure it's not the correct way) but the resulting downmix is "okayish", at least no more bleeding ears. // (maybe have a look at http://forum.doom9.org/archive/index.php/t-148228.html too [ 5.1 -> stereo ] the approach seems almost the same [but different coefficients]) // http://acousticsfreq.com/blog/wp-content/uploads/2012/01/ITU-R-BS775-1.pdf // http://ir.lib.nctu.edu.tw/bitstream/987654321/22934/1/030104001.pdf *(outputTemp++) = (left + center + rearLeft) * attn5dot1; *(outputTemp++) = (right + center + rearRight) * attn5dot1; inputTemp += 6; } } // According to http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85).aspx // KSAUDIO_SPEAKER_7POINT1 is obsolete and no longer supported in Windows Vista and later versions of Windows // Not sure what to do about it, meh , drop front left of center/front right of center -> 5.1 -> stereo; else if(inputChannelMask == KSAUDIO_SPEAKER_7POINT1) { UINT numFloats = numAudioFrames*8; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float center = inputTemp[2] * centerMix; // Drop LFE since we don't need it //float lowFreq = inputTemp[3]*lowFreqMix; float rearLeft = inputTemp[4] * surroundMix; float rearRight = inputTemp[5] * surroundMix; // Drop SPEAKER_FRONT_LEFT_OF_CENTER , SPEAKER_FRONT_RIGHT_OF_CENTER //float centerLeft = inputTemp[6]; //float centerRight = inputTemp[7]; // Downmix from 5.1 to stereo *(outputTemp++) = (left + center + rearLeft) * attn5dot1; *(outputTemp++) = (right + center + rearRight) * attn5dot1; inputTemp += 8; } } // Downmix to 5.1 (easy stuff) then downmix to stereo as done in KSAUDIO_SPEAKER_5POINT1 else if(inputChannelMask == KSAUDIO_SPEAKER_7POINT1_SURROUND) { UINT numFloats = numAudioFrames*8; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float center = inputTemp[2] * centerMix; // Skip LFE we don't need it //float lowFreq = inputTemp[3]*lowFreqMix; float rearLeft = inputTemp[4]; float rearRight = inputTemp[5]; float sideLeft = inputTemp[6]; float sideRight = inputTemp[7]; // combine the rear/side channels first , baaam! 5.1 rearLeft = (rearLeft + sideLeft) * 0.5f; rearRight = (rearRight + sideRight) * 0.5f; // downmix to stereo as in 5.1 case *(outputTemp++) = (left + center + rearLeft * surroundMix) * attn5dot1; *(outputTemp++) = (right + center + rearRight * surroundMix) * attn5dot1; inputTemp += 8; } } } ReleaseBuffer(); //------------------------------------------------------------ // resample if(bResample) { UINT frameAdjust = UINT((double(numAudioFrames) * resampleRatio) + 1.0); UINT newFrameSize = frameAdjust*2; if(tempResampleBuffer.Num() < newFrameSize) tempResampleBuffer.SetSize(newFrameSize); SRC_DATA data; data.src_ratio = resampleRatio; data.data_in = tempBuffer.Array(); data.input_frames = numAudioFrames; data.data_out = tempResampleBuffer.Array(); data.output_frames = frameAdjust; data.end_of_input = 0; int err = src_process((SRC_STATE*)resampler, &data); if(err) { RUNONCE AppWarning(TEXT("AudioSource::QueryAudio: Was unable to resample audio for device '%s'"), GetDeviceName()); return NoAudioAvailable; } if(data.input_frames_used != numAudioFrames) { RUNONCE AppWarning(TEXT("AudioSource::QueryAudio: Failed to downsample buffer completely, which shouldn't actually happen because it should be using 10ms of samples")); return NoAudioAvailable; } numAudioFrames = data.output_frames_gen; } //----------------------------------------------------------------------------- // sort all audio frames into 10 millisecond increments (done because not all devices output in 10ms increments) // NOTE: 0.457+ - instead of using the timestamps from windows, just compare and make sure it stays within a 100ms of their timestamps if(!bFirstBaseFrameReceived) { lastUsedTimestamp = newTimestamp; bFirstBaseFrameReceived = true; } float *newBuffer = (bResample) ? tempResampleBuffer.Array() : tempBuffer.Array(); if (bSmoothTimestamps) { lastUsedTimestamp += 10; QWORD difVal = GetQWDif(newTimestamp, lastUsedTimestamp); if(difVal > 70) { //OSDebugOut(TEXT("----------------------------1\r\nlastUsedTimestamp before: %llu - device: %s\r\n"), lastUsedTimestamp, GetDeviceName()); lastUsedTimestamp = newTimestamp; //OSDebugOut(TEXT("lastUsedTimestamp after: %llu\r\n"), lastUsedTimestamp); } if(lastUsedTimestamp > lastSentTimestamp) { QWORD adjustVal = (lastUsedTimestamp-lastSentTimestamp); if(adjustVal < 10) lastUsedTimestamp += 10-adjustVal; AudioSegment *newSegment = new AudioSegment(newBuffer, numAudioFrames*2, lastUsedTimestamp); AddAudioSegment(newSegment, curVolume*sourceVolume); lastSentTimestamp = lastUsedTimestamp; } } else { // OSDebugOut(TEXT("newTimestamp: %llu\r\n"), newTimestamp); AudioSegment *newSegment = new AudioSegment(newBuffer, numAudioFrames*2, newTimestamp); AddAudioSegment(newSegment, curVolume*sourceVolume); } //----------------------------------------------------------------------------- return AudioAvailable; } return NoAudioAvailable; }
void HandleGLSceneUpdate(HDC hDC) { if(!bTargetAcquired && hdcAcquiredDC == NULL) { PIXELFORMATDESCRIPTOR pfd; hwndTarget = WindowFromDC(hDC); int pixFormat = GetPixelFormat(hDC); DescribePixelFormat(hDC, pixFormat, sizeof(pfd), &pfd); if(pfd.cColorBits == 32 && hwndTarget) { bTargetAcquired = true; hdcAcquiredDC = hDC; glcaptureInfo.format = GS_BGR; } } if(hDC == hdcAcquiredDC) { RECT rc; GetClientRect(hwndTarget, &rc); if(!bHasTextures || rc.right != glcaptureInfo.cx || rc.bottom != glcaptureInfo.cy) { if(!hwndReceiver) hwndReceiver = FindWindow(RECEIVER_WINDOWCLASS, NULL); if(hwndReceiver) { if(bHasTextures) glDeleteBuffers(2, gltextures); glcaptureInfo.cx = rc.right; glcaptureInfo.cy = rc.bottom; glGenBuffers(2, gltextures); DWORD dwSize = glcaptureInfo.cx*glcaptureInfo.cy*4; bool bSuccess = true; for(UINT i=0; i<2; i++) { UINT test = 0; glBindBuffer(GL_PIXEL_PACK_BUFFER, gltextures[i]); glBufferData(GL_PIXEL_PACK_BUFFER, dwSize, 0, GL_STREAM_READ); } glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); if(bSuccess) { glcaptureInfo.mapID = InitializeSharedMemory(dwSize, &glcaptureInfo.mapSize, ©Data, textureBuffers); if(!glcaptureInfo.mapID) bSuccess = false; } if(bSuccess) { bHasTextures = true; glcaptureInfo.captureType = CAPTURETYPE_MEMORY; glcaptureInfo.hwndSender = hwndSender; glcaptureInfo.pitch = glcaptureInfo.cx*4; glcaptureInfo.bFlip = TRUE; PostMessage(hwndReceiver, RECEIVER_NEWCAPTURE, 0, (LPARAM)&glcaptureInfo); } else glDeleteBuffers(2, gltextures); } else KillGLTextures(); } if(bHasTextures) { if(bCapturing) { GLuint texture = gltextures[curCapture]; DWORD nextCapture = curCapture == 0 ? 1 : 0; glReadBuffer(GL_BACK); glBindBuffer(GL_PIXEL_PACK_BUFFER, texture); glReadPixels(0, 0, glcaptureInfo.cx, glcaptureInfo.cy, GL_BGRA, GL_UNSIGNED_BYTE, 0); glBindBuffer(GL_PIXEL_PACK_BUFFER, gltextures[nextCapture]); GLubyte *data = (GLubyte*)glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY); if(data) { DWORD pitch = glcaptureInfo.cx*4; int lastRendered = -1; //under no circumstances do we -ever- allow this function to stall if(WaitForSingleObject(textureMutexes[curCapture], 0) == WAIT_OBJECT_0) lastRendered = curCapture; else if(WaitForSingleObject(textureMutexes[nextCapture], 0) == WAIT_OBJECT_0) lastRendered = nextCapture; LPBYTE outData = NULL; if(lastRendered != -1) { SSECopy(textureBuffers[lastRendered], data, pitch*glcaptureInfo.cy); textureBuffers[lastRendered][0] = 0x1a; ReleaseMutex(textureMutexes[lastRendered]); } glUnmapBuffer(GL_PIXEL_PACK_BUFFER); copyData->lastRendered = (UINT)lastRendered; } glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); curCapture = nextCapture; } else KillGLTextures(); } } }
HRESULT STDMETHODCALLTYPE SwapPresentHook(UINT syncInterval, UINT flags) { IDXGISwapChain *swap = (IDXGISwapChain*)this; if(lpCurrentSwap == NULL && !bTargetAcquired) { lpCurrentSwap = swap; SetupD3D11(swap); bTargetAcquired = true; } if(lpCurrentSwap == swap) { ID3D11Device *device = NULL; HRESULT chi; if(SUCCEEDED(chi = swap->GetDevice(__uuidof(ID3D11Device), (void**)&device))) { if(!lpCurrentDevice) { lpCurrentDevice = device; oldD3D11Release = GetVTable(device, (8/4)); newD3D11Release = ConvertClassProcToFarproc((CLASSPROC)&D3D11Override::DeviceReleaseHook); SetVTable(device, (8/4), newD3D11Release); } ID3D11DeviceContext *context; device->GetImmediateContext(&context); if(!bHasTextures && bCapturing) { if(dxgiFormat) { if(!hwndReceiver) hwndReceiver = FindWindow(RECEIVER_WINDOWCLASS, NULL); if(hwndReceiver) { D3D11_TEXTURE2D_DESC texDesc; ZeroMemory(&texDesc, sizeof(texDesc)); texDesc.Width = d3d11CaptureInfo.cx; texDesc.Height = d3d11CaptureInfo.cy; texDesc.MipLevels = 1; texDesc.ArraySize = 1; texDesc.Format = dxgiFormat; texDesc.SampleDesc.Count = 1; texDesc.Usage = D3D11_USAGE_STAGING; texDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ; bool bSuccess = true; UINT pitch; for(UINT i=0; i<2; i++) { HRESULT ching; if(FAILED(ching = device->CreateTexture2D(&texDesc, NULL, &d3d11Textures[i]))) { bSuccess = false; break; } if(i == 0) { ID3D11Resource *resource; if(FAILED(d3d11Textures[i]->QueryInterface(__uuidof(ID3D11Resource), (void**)&resource))) { bSuccess = false; break; } D3D11_MAPPED_SUBRESOURCE map; if(FAILED(context->Map(resource, 0, D3D11_MAP_READ, 0, &map))) { bSuccess = false; break; } pitch = map.RowPitch; context->Unmap(resource, 0); resource->Release(); } } if(bSuccess) { d3d11CaptureInfo.mapID = InitializeSharedMemory(pitch*d3d11CaptureInfo.cy, &d3d11CaptureInfo.mapSize, ©Data, textureBuffers); if(!d3d11CaptureInfo.mapID) bSuccess = false; } if(bSuccess) { bHasTextures = true; d3d11CaptureInfo.captureType = CAPTURETYPE_MEMORY; d3d11CaptureInfo.hwndSender = hwndSender; d3d11CaptureInfo.pitch = pitch; d3d11CaptureInfo.bFlip = FALSE; PostMessage(hwndReceiver, RECEIVER_NEWCAPTURE, 0, (LPARAM)&d3d11CaptureInfo); } else { for(UINT i=0; i<2; i++) { SafeRelease(d3d11Textures[i]); if(textureBuffers[i]) { free(textureBuffers[i]); textureBuffers[i] = NULL; } } } } } } if(bHasTextures) { if(bCapturing) { DWORD nextCapture = curCapture == 0 ? 1 : 0; ID3D11Texture2D *texture = d3d11Textures[curCapture]; ID3D11Resource *backBuffer = NULL; if(SUCCEEDED(swap->GetBuffer(0, IID_ID3D11Resource, (void**)&backBuffer))) { if(bIsMultisampled) context->ResolveSubresource(texture, 0, backBuffer, 0, dxgiFormat); else context->CopyResource(texture, backBuffer); backBuffer->Release(); ID3D11Texture2D *lastTexture = d3d11Textures[nextCapture]; ID3D11Resource *resource; if(SUCCEEDED(lastTexture->QueryInterface(__uuidof(ID3D11Resource), (void**)&resource))) { D3D11_MAPPED_SUBRESOURCE map; if(SUCCEEDED(context->Map(resource, 0, D3D11_MAP_READ, 0, &map))) { LPBYTE *pTextureBuffer = NULL; int lastRendered = -1; //under no circumstances do we -ever- allow a stall if(WaitForSingleObject(textureMutexes[curCapture], 0) == WAIT_OBJECT_0) lastRendered = (int)curCapture; else if(WaitForSingleObject(textureMutexes[nextCapture], 0) == WAIT_OBJECT_0) lastRendered = (int)nextCapture; if(lastRendered != -1) { SSECopy(textureBuffers[lastRendered], map.pData, map.RowPitch*d3d11CaptureInfo.cy); ReleaseMutex(textureMutexes[lastRendered]); } context->Unmap(resource, 0); copyData->lastRendered = (UINT)lastRendered; } resource->Release(); } } curCapture = nextCapture; } else ClearD3D11Data(); } device->Release(); context->Release(); } } gi11swapPresent.Unhook(); HRESULT hRes = swap->Present(syncInterval, flags); gi11swapPresent.Rehook(); return hRes; }
void D3D10Texture::SetImage(void *lpData, GSImageFormat imageFormat, UINT pitch) { if(!bDynamic) { AppWarning(TEXT("3D11Texture::SetImage: cannot call on a non-dynamic texture")); return; } bool bMatchingFormat = false; UINT pixelBytes = 0; switch(format) { case GS_ALPHA: bMatchingFormat = (imageFormat == GS_IMAGEFORMAT_A8); pixelBytes = 1; break; case GS_GRAYSCALE: bMatchingFormat = (imageFormat == GS_IMAGEFORMAT_L8); pixelBytes = 1; break; case GS_RGB: bMatchingFormat = (imageFormat == GS_IMAGEFORMAT_RGB || imageFormat == GS_IMAGEFORMAT_RGBX); pixelBytes = 4; break; case GS_RGBA: bMatchingFormat = (imageFormat == GS_IMAGEFORMAT_RGBA); pixelBytes = 4; break; case GS_BGR: bMatchingFormat = (imageFormat == GS_IMAGEFORMAT_BGR || imageFormat == GS_IMAGEFORMAT_BGRX); pixelBytes = 4; break; case GS_BGRA: bMatchingFormat = (imageFormat == GS_IMAGEFORMAT_BGRA); pixelBytes = 4; break; case GS_RGBA16F: bMatchingFormat = (imageFormat == GS_IMAGEFORMAT_RGBA16F); pixelBytes = 8; break; case GS_RGBA32F: bMatchingFormat = (imageFormat == GS_IMAGEFORMAT_RGBA32F); pixelBytes = 16; break; } if(!bMatchingFormat) { AppWarning(TEXT("D3D10Texture::SetImage: invalid or mismatching image format specified")); return; } HRESULT err; D3D10_MAPPED_TEXTURE2D map; if(FAILED(err = texture->Map(0, D3D10_MAP_WRITE_DISCARD, 0, &map))) { AppWarning(TEXT("D3D10Texture::SetImage: map failed, result = %08lX"), err); return; } //------------------------------------------------------------------------- if((format == GS_RGB || format == GS_BGR) && (imageFormat == GS_IMAGEFORMAT_BGR || imageFormat == GS_IMAGEFORMAT_RGB)) { if(pitch == (width*3) && map.RowPitch == (width*4)) CopyPackedRGB((BYTE*)map.pData, (BYTE*)lpData, width*height); else { for(UINT y=0; y<height; y++) { LPBYTE curInput = ((LPBYTE)lpData) + (pitch*y); LPBYTE curOutput = ((LPBYTE)map.pData) + (map.RowPitch*y); CopyPackedRGB(curOutput, curInput, width); } } } //------------------------------------------------------------------------- else { UINT rowWidth = width*pixelBytes; if(pitch == map.RowPitch) { if(App->SSE2Available()) SSECopy(map.pData, lpData, pitch*height); else mcpy(map.pData, lpData, pitch*height); } else { UINT bestPitch = MIN(pitch, map.RowPitch); if(App->SSE2Available()) { for(UINT y=0; y<height; y++) { LPBYTE curInput = ((LPBYTE)lpData) + (pitch*y); LPBYTE curOutput = ((LPBYTE)map.pData) + (map.RowPitch*y); SSECopy(curOutput, curInput, bestPitch); } } else { for(UINT y=0; y<height; y++) { LPBYTE curInput = ((LPBYTE)lpData) + (pitch*y); LPBYTE curOutput = ((LPBYTE)map.pData) + (map.RowPitch*y); mcpy(curOutput, curInput, bestPitch); } } } } texture->Unmap(0); }
UINT MMDeviceAudioSource::GetNextBuffer(float curVolume) { UINT captureSize = 0; HRESULT err = mmCapture->GetNextPacketSize(&captureSize); if(FAILED(err)) { RUNONCE AppWarning(TEXT("MMDeviceAudioSource::GetBuffer: GetNextPacketSize failed")); return NoAudioAvailable; } float *outputBuffer = NULL; if(captureSize) { LPBYTE captureBuffer; DWORD dwFlags = 0; UINT numAudioFrames = 0; UINT64 devPosition; UINT64 qpcTimestamp; err = mmCapture->GetBuffer(&captureBuffer, &numAudioFrames, &dwFlags, &devPosition, &qpcTimestamp); if(FAILED(err)) { RUNONCE AppWarning(TEXT("MMDeviceAudioSource::GetBuffer: GetBuffer failed")); return NoAudioAvailable; } QWORD newTimestamp; if(dwFlags & AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR) { RUNONCE AppWarning(TEXT("MMDeviceAudioSource::GetBuffer: woa woa woa, getting timestamp errors from the audio subsystem. device = %s"), GetDeviceName().Array()); if(!bBrokenTimestamp) newTimestamp = lastUsedTimestamp + numAudioFrames*1000/inputSamplesPerSec; } else { if(!bBrokenTimestamp) newTimestamp = qpcTimestamp/10000; /*UINT64 freq; mmClock->GetFrequency(&freq); Log(TEXT("position: %llu, numAudioFrames: %u, freq: %llu, newTimestamp: %llu, test: %llu"), devPosition, numAudioFrames, freq, newTimestamp, devPosition*8000/freq);*/ } //have to do this crap to account for broken devices or device drivers. absolutely unbelievable. if(!bFirstFrameReceived) { LARGE_INTEGER clockFreq; QueryPerformanceFrequency(&clockFreq); QWORD curTime = GetQPCTimeMS(clockFreq.QuadPart); if(newTimestamp < (curTime-1000) || newTimestamp > (curTime+1000)) { bBrokenTimestamp = true; Log(TEXT("MMDeviceAudioSource::GetNextBuffer: Got bad audio timestamp offset %lld from device: '%s', timestamps for this device will be calculated. curTime: %llu, newTimestamp: %llu"), (LONGLONG)(newTimestamp - curTime), GetDeviceName().Array(), curTime, newTimestamp); lastUsedTimestamp = newTimestamp = curTime; } else lastUsedTimestamp = newTimestamp; bFirstFrameReceived = true; } if(tempBuffer.Num() < numAudioFrames*2) tempBuffer.SetSize(numAudioFrames*2); outputBuffer = tempBuffer.Array(); float *tempOut = outputBuffer; //------------------------------------------------------------ // channel upmix/downmix if(inputChannels == 1) { UINT numFloats = numAudioFrames; float *inputTemp = (float*)captureBuffer; float *outputTemp = outputBuffer; if(App->SSE2Available() && (UPARAM(inputTemp) & 0xF) == 0 && (UPARAM(outputTemp) & 0xF) == 0) { UINT alignedFloats = numFloats & 0xFFFFFFFC; for(UINT i=0; i<alignedFloats; i += 4) { __m128 inVal = _mm_load_ps(inputTemp+i); __m128 outVal1 = _mm_unpacklo_ps(inVal, inVal); __m128 outVal2 = _mm_unpackhi_ps(inVal, inVal); _mm_store_ps(outputTemp+(i*2), outVal1); _mm_store_ps(outputTemp+(i*2)+4, outVal2); } numFloats -= alignedFloats; inputTemp += alignedFloats; outputTemp += alignedFloats*2; } while(numFloats--) { float inputVal = *inputTemp; *(outputTemp++) = inputVal; *(outputTemp++) = inputVal; inputTemp++; } } else if(inputChannels == 2) //straight up copy { if(App->SSE2Available()) SSECopy(outputBuffer, captureBuffer, numAudioFrames*2*sizeof(float)); else mcpy(outputBuffer, captureBuffer, numAudioFrames*2*sizeof(float)); } else { //todo: downmix optimization, also support for other speaker configurations than ones I can merely "think" of. ugh. float *inputTemp = (float*)captureBuffer; float *outputTemp = outputBuffer; if(inputChannelMask == KSAUDIO_SPEAKER_QUAD) { UINT numFloats = numAudioFrames*4; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float rear = (inputTemp[2]+inputTemp[3])*surroundMix; *(outputTemp++) = left - rear; *(outputTemp++) = right + rear; inputTemp += 4; } } else if(inputChannelMask == KSAUDIO_SPEAKER_2POINT1) { UINT numFloats = numAudioFrames*3; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float lfe = inputTemp[2]*lowFreqMix; *(outputTemp++) = left + lfe; *(outputTemp++) = right + lfe; inputTemp += 3; } } else if(inputChannelMask == KSAUDIO_SPEAKER_4POINT1) { UINT numFloats = numAudioFrames*5; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float lfe = inputTemp[2]*lowFreqMix; float rear = (inputTemp[3]+inputTemp[4])*surroundMix; *(outputTemp++) = left + lfe - rear; *(outputTemp++) = right + lfe + rear; inputTemp += 5; } } else if(inputChannelMask == KSAUDIO_SPEAKER_SURROUND) { UINT numFloats = numAudioFrames*4; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float center = inputTemp[2]*centerMix; float rear = inputTemp[3]*(surroundMix*dbMinus3); *(outputTemp++) = left + center - rear; *(outputTemp++) = right + center + rear; inputTemp += 4; } } //don't think this will work for both else if(inputChannelMask == KSAUDIO_SPEAKER_5POINT1) { UINT numFloats = numAudioFrames*6; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float center = inputTemp[2]*centerMix; float lowFreq = inputTemp[3]*lowFreqMix; float rear = (inputTemp[4]+inputTemp[5])*surroundMix; *(outputTemp++) = left + center + lowFreq - rear; *(outputTemp++) = right + center + lowFreq + rear; inputTemp += 6; } } //todo ------------------ //not sure if my 5.1/7.1 downmixes are correct else if(inputChannelMask == KSAUDIO_SPEAKER_5POINT1_SURROUND) { UINT numFloats = numAudioFrames*6; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float center = inputTemp[2]*centerMix; float lowFreq = inputTemp[3]*lowFreqMix; float sideLeft = inputTemp[4]*dbMinus3; float sideRight = inputTemp[5]*dbMinus3; *(outputTemp++) = left + center + sideLeft + lowFreq; *(outputTemp++) = right + center + sideRight + lowFreq; inputTemp += 6; } } else if(inputChannelMask == KSAUDIO_SPEAKER_7POINT1) { UINT numFloats = numAudioFrames*8; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float center = inputTemp[2]*(centerMix*dbMinus3); float lowFreq = inputTemp[3]*lowFreqMix; float rear = (inputTemp[4]+inputTemp[5])*surroundMix; float centerLeft = inputTemp[6]*dbMinus6; float centerRight = inputTemp[7]*dbMinus6; *(outputTemp++) = left + centerLeft + center + lowFreq - rear; *(outputTemp++) = right + centerRight + center + lowFreq + rear; inputTemp += 8; } } else if(inputChannelMask == KSAUDIO_SPEAKER_7POINT1_SURROUND) { UINT numFloats = numAudioFrames*8; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float center = inputTemp[2]*centerMix; float lowFreq = inputTemp[3]*lowFreqMix; float rear = (inputTemp[4]+inputTemp[5])*(surroundMix*dbMinus3); float sideLeft = inputTemp[6]*dbMinus6; float sideRight = inputTemp[7]*dbMinus6; *(outputTemp++) = left + sideLeft + center + lowFreq - rear; *(outputTemp++) = right + sideLeft + center + lowFreq + rear; inputTemp += 8; } } } mmCapture->ReleaseBuffer(numAudioFrames); //------------------------------------------------------------ // resample if(bResample) { UINT frameAdjust = UINT((double(numAudioFrames) * resampleRatio) + 1.0); UINT newFrameSize = frameAdjust*2; if(tempResampleBuffer.Num() < newFrameSize) tempResampleBuffer.SetSize(newFrameSize); SRC_DATA data; data.src_ratio = resampleRatio; data.data_in = tempBuffer.Array(); data.input_frames = numAudioFrames; data.data_out = tempResampleBuffer.Array(); data.output_frames = frameAdjust; data.end_of_input = 0; int err = src_process(resampler, &data); if(err) { RUNONCE AppWarning(TEXT("Was unable to resample audio")); return NoAudioAvailable; } if(data.input_frames_used != numAudioFrames) { RUNONCE AppWarning(TEXT("Failed to downsample buffer completely, which shouldn't actually happen because it should be using 10ms of samples")); return NoAudioAvailable; } numAudioFrames = data.output_frames_gen; } //----------------------------------------------------------------------------- // sort all audio frames into 10 millisecond increments (done because not all devices output in 10ms increments) // NOTE: 0.457+ - instead of using the timestamps from windows, just compare and make sure it stays within a 100ms of their timestamps float *newBuffer = (bResample) ? tempResampleBuffer.Array() : tempBuffer.Array(); if(storageBuffer.Num() == 0 && numAudioFrames == 441) { lastUsedTimestamp += 10; if(!bBrokenTimestamp) { QWORD difVal = GetQWDif(newTimestamp, lastUsedTimestamp); if(difVal > 70) lastUsedTimestamp = newTimestamp; } if(lastUsedTimestamp > lastSentTimestamp) { QWORD adjustVal = (lastUsedTimestamp-lastSentTimestamp); if(adjustVal < 10) lastUsedTimestamp += 10-adjustVal; AudioSegment &newSegment = *audioSegments.CreateNew(); newSegment.audioData.CopyArray(newBuffer, numAudioFrames*2); newSegment.timestamp = lastUsedTimestamp; MultiplyAudioBuffer(newSegment.audioData.Array(), numAudioFrames*2, curVolume); lastSentTimestamp = lastUsedTimestamp; } } else { UINT storedFrames = storageBuffer.Num(); storageBuffer.AppendArray(newBuffer, numAudioFrames*2); if(storageBuffer.Num() >= (441*2)) { lastUsedTimestamp += 10; if(!bBrokenTimestamp) { QWORD difVal = GetQWDif(newTimestamp, lastUsedTimestamp); if(difVal > 70) lastUsedTimestamp = newTimestamp - (QWORD(storedFrames)/2*1000/44100); } //------------------------ // add new data if(lastUsedTimestamp > lastSentTimestamp) { QWORD adjustVal = (lastUsedTimestamp-lastSentTimestamp); if(adjustVal < 10) lastUsedTimestamp += 10-adjustVal; AudioSegment &newSegment = *audioSegments.CreateNew(); newSegment.audioData.CopyArray(storageBuffer.Array(), (441*2)); newSegment.timestamp = lastUsedTimestamp; MultiplyAudioBuffer(newSegment.audioData.Array(), 441*2, curVolume); storageBuffer.RemoveRange(0, (441*2)); } //------------------------ // if still data pending (can happen) while(storageBuffer.Num() >= (441*2)) { lastUsedTimestamp += 10; if(lastUsedTimestamp > lastSentTimestamp) { QWORD adjustVal = (lastUsedTimestamp-lastSentTimestamp); if(adjustVal < 10) lastUsedTimestamp += 10-adjustVal; AudioSegment &newSegment = *audioSegments.CreateNew(); newSegment.audioData.CopyArray(storageBuffer.Array(), (441*2)); storageBuffer.RemoveRange(0, (441*2)); MultiplyAudioBuffer(newSegment.audioData.Array(), 441*2, curVolume); newSegment.timestamp = lastUsedTimestamp; lastSentTimestamp = lastUsedTimestamp; } } } } //----------------------------------------------------------------------------- return ContinueAudioRequest; } return NoAudioAvailable; }
bool MMDeviceAudioSource::GetNextBuffer(void **buffer, UINT *numFrames, QWORD *timestamp) { UINT captureSize = 0; bool bFirstRun = true; HRESULT hRes; while (true) { if (inputBufferSize >= sampleWindowSize*GetChannelCount()) { if (bFirstRun) lastQPCTimestamp += 10; firstTimestamp = GetTimestamp(lastQPCTimestamp); break; } //--------------------------------------------------------- hRes = mmCapture->GetNextPacketSize(&captureSize); if (FAILED(hRes)) { RUNONCE AppWarning(TEXT("MMDeviceAudioSource::GetBuffer: GetNextPacketSize failed, result = %08lX"), hRes); return false; } if (!captureSize) return false; //--------------------------------------------------------- LPBYTE captureBuffer; UINT32 numFramesRead; DWORD dwFlags = 0; UINT64 devPosition, qpcTimestamp; hRes = mmCapture->GetBuffer(&captureBuffer, &numFramesRead, &dwFlags, &devPosition, &qpcTimestamp); if (FAILED(hRes)) { RUNONCE AppWarning(TEXT("MMDeviceAudioSource::GetBuffer: GetBuffer failed, result = %08lX"), hRes); return false; } if (inputBufferSize) { double timeAdjust = double(inputBufferSize/GetChannelCount()); timeAdjust /= (double(GetSamplesPerSec())*0.0000001); qpcTimestamp -= UINT64(timeAdjust); } /*if (!bIsMic) { Log(TEXT("f: %u, i: %u, qpc: %llu"), numFramesRead, inputBufferSize != 0, qpcTimestamp); }*/ qpcTimestamp /= 10000; lastQPCTimestamp = qpcTimestamp; //--------------------------------------------------------- UINT totalFloatsRead = numFramesRead*GetChannelCount(); UINT newInputBufferSize = inputBufferSize + totalFloatsRead; if (newInputBufferSize > inputBuffer.Num()) inputBuffer.SetSize(newInputBufferSize); SSECopy(inputBuffer.Array()+inputBufferSize, captureBuffer, totalFloatsRead*sizeof(float)); inputBufferSize = newInputBufferSize; mmCapture->ReleaseBuffer(numFramesRead); bFirstRun = false; } *numFrames = sampleWindowSize; *buffer = (void*)inputBuffer.Array(); *timestamp = firstTimestamp; return true; }