BOOL CInfoChannel::PutUsrRecord(LPCTSTR xUsrFile) { CHAR* sptr; CHAR* pptr; int nLength; CHAR szFdTel[MAX_PHONE + 1]; for ( sptr = PageNumber, nLength = GetNextBuffer( sptr, pptr ); *sptr != '\0'; nLength = GetNextBuffer( sptr = pptr + 1, pptr ) ) { nLength = min( nLength, MAX_PHONE ); CopyMemory( szFdTel, sptr, nLength ); szFdTel[nLength] = '\0'; if ( TryPagerToGroup(_ttoi(szFdTel), xUsrFile) ) continue; CString strQuery; strQuery.Format( "insert into xuservoice" " (xPCMName,xPhoneNum,xRecDate,xCaller)" " values('%s','%s',getdate(),'%s')", xUsrFile, szFdTel, RemoteId ); TRACE( strQuery + "\n" ); xExecute(theApp.m_pConnection, (_bstr_t)strQuery, adExecuteNoRecords); if ( *pptr == '\0' ) break; } return TRUE; }
int CMailChannel::GetFxTeleCount() { CHAR* sptr; CHAR* pptr; int nCount = 0; for ( sptr = FaxNumber, GetNextBuffer( sptr, pptr ); *sptr != '\0'; GetNextBuffer( sptr = pptr + 1, pptr ) ) { nCount += 1; } return nCount; }
BOOL CMailChannel::PutUsrRecord(LPCTSTR xFaxFile) { CHAR* sptr; CHAR* pptr; int nLength; BOOL bMutiFax = FALSE; CHAR szFdTel[MAX_PHONE + 1]; for ( sptr = FaxNumber, nLength = GetNextBuffer( sptr, pptr ); *sptr != '\0'; nLength = GetNextBuffer( sptr = pptr + 1, pptr ) ) { if ( ! bMutiFax && *pptr != '\0' ) bMutiFax = TRUE; nLength = min( nLength, MAX_PHONE ); CopyMemory( szFdTel, sptr, nLength ); szFdTel[nLength] = '\0'; CString strQuery; if ( bMutiFax == FALSE ) strQuery.Format( "insert into xfax" " (xfaxpath,xPhoneNum,xRecDate,xCaller,xfeetableId)" " values('%s','%s',getdate(),'%s','%s')", xFaxFile, szFdTel, RemoteId, m_xFeeCode ); else strQuery.Format( "insert into xfax" " (xfaxpath,xPhoneNum,xRecDate,xCaller,xfeenumber)" " values('%s','%s',getdate(),'%s','%s')", xFaxFile, szFdTel, RemoteId, RemoteId ); TRACE( strQuery + "\n" ); xExecute(theApp.m_pConnection, (_bstr_t)strQuery, adExecuteNoRecords); CHAR xPath[MAX_PATH + 1]; xPath[ GetCurrentDirectory(MAX_PATH,xPath) ] = '\0'; #ifdef NPICKUP PutUsrEmail( szFdTel, RemoteId, Settings.General.PathFax+"\\1386787\\20060227212822906.tif" ); #else PutUsrEmail( szFdTel, RemoteId, xFaxFile ); #endif SetCurrentDirectory( xPath ); } return TRUE; }
CString CInfoChannel::MakeUsrLvFile(LPTSTR sExtTitle) { CSettings::Item* pItem = Settings.GetSetting( _T(".PathUsr") ); CString xVbDirect = *pItem->m_pString; CHAR* pptr; CHAR* sptr = PageNumber; int nLength = GetNextBuffer( sptr, pptr ); CHAR szFdTel[MAX_PHONE + 1]; nLength = min( nLength, MAX_PHONE ); CopyMemory( szFdTel, sptr, nLength ); szFdTel[nLength] = '\0'; if ( (nLength = _tcslen(szFdTel)) > 4 ) { xVbDirect += '\\'; for ( int i = 0; i < 4; i++ ) xVbDirect += szFdTel[i]; CreateDirectory( xVbDirect, NULL ); } CString xUsrFile; SYSTEMTIME pTime; GetLocalTime( &pTime ); xUsrFile.Format( "%s\\%04i%02i%02i%02i%02i%02i%03i.%s", xVbDirect, pTime.wYear, pTime.wMonth, pTime.wDay, pTime.wHour, pTime.wMinute, pTime.wSecond, pTime.wMilliseconds, sExtTitle ); MLOG( xUsrFile ); return xUsrFile; }
// Write one buffer into memory unsigned int PneumaticsTaskLog::PutOne(float pressure_in) { struct abuf *ob; // Output buffer // Get output buffer if ((ob = (struct abuf *)GetNextBuffer(sizeof(struct abuf)))) { // Fill it in. clock_gettime(CLOCK_REALTIME, &ob->tp); ob->pressure = pressure_in; return (sizeof(struct abuf)); } // Did not get a buffer. Return a zero length return (0); }
// Write one buffer into memory unsigned int CameraServoLog::PutOne(void) { struct abuf166 *ob; // Output buffer // Get output buffer if ((ob = (struct abuf166 *)GetNextBuffer(sizeof(struct abuf166)))) { // Fill it in. clock_gettime(CLOCK_REALTIME, &ob->tp); // Add any values to be logged here return (sizeof(struct abuf166)); } // Did not get a buffer. Return a zero length return (0); }
// Write one buffer into memory // <<CHANGEME>> unsigned int BridgeBalanceLog::PutOne(void) { struct abuf *ob; // Output buffer // Get output buffer if ((ob = (struct abuf *)GetNextBuffer(sizeof(struct abuf)))) { // Fill it in. clock_gettime(CLOCK_REALTIME, &ob->tp); // Add any values to be logged here // <<CHANGEME>> return (sizeof(struct abuf)); } // Did not get a buffer. Return a zero length return (0); }
// Write one buffer into memory unsigned int ProxyLog::PutOne(float battery, ProxyJoystick joy1, ProxyJoystick joy2, ProxyJoystick joy3) { struct abuf166 *ob; // Output buffer // Get output buffer if ((ob = (struct abuf166 *)GetNextBuffer(sizeof(struct abuf166)))) { // Fill it in. clock_gettime(CLOCK_REALTIME, &ob->tp); ob->battery = battery; ob->joy[1] = joy1; ob->joy[2] = joy2; ob->joy[3] = joy3; return (sizeof(struct abuf166)); } // Did not get a buffer. Return a zero length return (0); }
// Write one buffer into memory unsigned int SonarLog::PutOne(double f,double l,double r) { struct abuf *ob; // Output buffer // Get output buffer if ((ob = (struct abuf *)GetNextBuffer(sizeof(struct abuf)))) { // Fill it in. clock_gettime(CLOCK_REALTIME, &ob->tp); ob->f = f; ob->l = l; ob->r = r; return (sizeof(struct abuf)); } // Did not get a buffer. Return a zero length return (0); }
void MediaStreamer::ReadAll(uint8* buffer, uint32 maxBufferSize, uint32* bufferLength) { uint32 valuesWritten = 0; uint32 sampleBufferLength = 0; if (m_reader == nullptr) { return; } *bufferLength = 0; // If buffer isn't large enough, return if (maxBufferSize < m_maxStreamLengthInBytes) { return; } while (!GetNextBuffer(buffer + valuesWritten, maxBufferSize - valuesWritten, &sampleBufferLength)) { valuesWritten += sampleBufferLength; } *bufferLength = valuesWritten + sampleBufferLength; }
UINT AudioSource::QueryAudio(float curVolume) { LPVOID buffer; UINT numAudioFrames; QWORD newTimestamp; if(GetNextBuffer((void**)&buffer, &numAudioFrames, &newTimestamp)) { //------------------------------------------------------------ // convert to float float *captureBuffer; if(!bFloat) { UINT totalSamples = numAudioFrames*inputChannels; if(convertBuffer.Num() < totalSamples) convertBuffer.SetSize(totalSamples); if(inputBitsPerSample == 8) { float *tempConvert = convertBuffer.Array(); char *tempSByte = (char*)buffer; while(totalSamples--) { *(tempConvert++) = float(*(tempSByte++))/127.0f; } } else if(inputBitsPerSample == 16) { float *tempConvert = convertBuffer.Array(); short *tempShort = (short*)buffer; while(totalSamples--) { *(tempConvert++) = float(*(tempShort++))/32767.0f; } } else if(inputBitsPerSample == 24) { float *tempConvert = convertBuffer.Array(); BYTE *tempTriple = (BYTE*)buffer; TripleToLong valOut; while(totalSamples--) { TripleToLong &valIn = (TripleToLong&)tempTriple; valOut.wVal = valIn.wVal; valOut.tripleVal = valIn.tripleVal; if(valOut.tripleVal > 0x7F) valOut.lastByte = 0xFF; *(tempConvert++) = float(double(valOut.val)/8388607.0); tempTriple += 3; } } else if(inputBitsPerSample == 32) { float *tempConvert = convertBuffer.Array(); long *tempShort = (long*)buffer; while(totalSamples--) { *(tempConvert++) = float(double(*(tempShort++))/2147483647.0); } } captureBuffer = convertBuffer.Array(); } else captureBuffer = (float*)buffer; //------------------------------------------------------------ // channel upmix/downmix if(tempBuffer.Num() < numAudioFrames*2) tempBuffer.SetSize(numAudioFrames*2); float *dataOutputBuffer = tempBuffer.Array(); float *tempOut = dataOutputBuffer; if(inputChannels == 1) { UINT numFloats = numAudioFrames; float *inputTemp = (float*)captureBuffer; float *outputTemp = dataOutputBuffer; if((UPARAM(inputTemp) & 0xF) == 0 && (UPARAM(outputTemp) & 0xF) == 0) { UINT alignedFloats = numFloats & 0xFFFFFFFC; for(UINT i=0; i<alignedFloats; i += 4) { __m128 inVal = _mm_load_ps(inputTemp+i); __m128 outVal1 = _mm_unpacklo_ps(inVal, inVal); __m128 outVal2 = _mm_unpackhi_ps(inVal, inVal); _mm_store_ps(outputTemp+(i*2), outVal1); _mm_store_ps(outputTemp+(i*2)+4, outVal2); } numFloats -= alignedFloats; inputTemp += alignedFloats; outputTemp += alignedFloats*2; } while(numFloats--) { float inputVal = *inputTemp; *(outputTemp++) = inputVal; *(outputTemp++) = inputVal; inputTemp++; } } else if(inputChannels == 2) //straight up copy { SSECopy(dataOutputBuffer, captureBuffer, numAudioFrames*2*sizeof(float)); } else { //todo: downmix optimization, also support for other speaker configurations than ones I can merely "think" of. ugh. float *inputTemp = (float*)captureBuffer; float *outputTemp = dataOutputBuffer; if(inputChannelMask == KSAUDIO_SPEAKER_QUAD) { UINT numFloats = numAudioFrames*4; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float rearLeft = inputTemp[2]*surroundMix4; float rearRight = inputTemp[3]*surroundMix4; // When in doubt, use only left and right .... and rear left and rear right :) // Same idea as with 5.1 downmix *(outputTemp++) = (left + rearLeft) * attn4dotX; *(outputTemp++) = (right + rearRight) * attn4dotX; inputTemp += 4; } } else if(inputChannelMask == KSAUDIO_SPEAKER_2POINT1) { UINT numFloats = numAudioFrames*3; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; // Drop LFE since we don't need it //float lfe = inputTemp[2]*lowFreqMix; *(outputTemp++) = left; *(outputTemp++) = right; inputTemp += 3; } } else if(inputChannelMask == KSAUDIO_SPEAKER_4POINT1) { UINT numFloats = numAudioFrames*5; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; // Skip LFE , we don't really need it. //float lfe = inputTemp[2]; float rearLeft = inputTemp[3]*surroundMix4; float rearRight = inputTemp[4]*surroundMix4; // Same idea as with 5.1 downmix *(outputTemp++) = (left + rearLeft) * attn4dotX; *(outputTemp++) = (right + rearRight) * attn4dotX; inputTemp += 5; } } else if(inputChannelMask == KSAUDIO_SPEAKER_SURROUND) { UINT numFloats = numAudioFrames*4; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float frontCenter = inputTemp[2]; float rearCenter = inputTemp[3]; // When in doubt, use only left and right :) Seriously. // THIS NEEDS TO BE PROPERLY IMPLEMENTED! *(outputTemp++) = left; *(outputTemp++) = right; inputTemp += 4; } } // Both speakers configs share the same format, the difference is in rear speakers position // See: http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85).aspx // Probably for KSAUDIO_SPEAKER_5POINT1_SURROUND we will need a different coefficient for rear left/right else if(inputChannelMask == KSAUDIO_SPEAKER_5POINT1 || inputChannelMask == KSAUDIO_SPEAKER_5POINT1_SURROUND) { UINT numFloats = numAudioFrames*6; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float center = inputTemp[2]*centerMix; //We don't need LFE channel so skip it (see below) //float lowFreq = inputTemp[3]*lowFreqMix; float rearLeft = inputTemp[4]*surroundMix; float rearRight = inputTemp[5]*surroundMix; // According to ITU-R BS.775-1 recommendation, the downmix from a 3/2 source to stereo // is the following: // L = FL + k0*C + k1*RL // R = FR + k0*C + k1*RR // FL = front left // FR = front right // C = center // RL = rear left // RR = rear right // k0 = centerMix = dbMinus3 = 0.7071067811865476 [for k0 we can use dbMinus6 = 0.5 too, probably it's better] // k1 = surroundMix = dbMinus3 = 0.7071067811865476 // The output (L,R) can be out of (-1,1) domain so we attenuate it [ attn5dot1 = 1/(1 + centerMix + surroundMix) ] // Note: this method of downmixing is far from "perfect" (pretty sure it's not the correct way) but the resulting downmix is "okayish", at least no more bleeding ears. // (maybe have a look at http://forum.doom9.org/archive/index.php/t-148228.html too [ 5.1 -> stereo ] the approach seems almost the same [but different coefficients]) // http://acousticsfreq.com/blog/wp-content/uploads/2012/01/ITU-R-BS775-1.pdf // http://ir.lib.nctu.edu.tw/bitstream/987654321/22934/1/030104001.pdf *(outputTemp++) = (left + center + rearLeft) * attn5dot1; *(outputTemp++) = (right + center + rearRight) * attn5dot1; inputTemp += 6; } } // According to http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083(v=vs.85).aspx // KSAUDIO_SPEAKER_7POINT1 is obsolete and no longer supported in Windows Vista and later versions of Windows // Not sure what to do about it, meh , drop front left of center/front right of center -> 5.1 -> stereo; else if(inputChannelMask == KSAUDIO_SPEAKER_7POINT1) { UINT numFloats = numAudioFrames*8; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float center = inputTemp[2] * centerMix; // Drop LFE since we don't need it //float lowFreq = inputTemp[3]*lowFreqMix; float rearLeft = inputTemp[4] * surroundMix; float rearRight = inputTemp[5] * surroundMix; // Drop SPEAKER_FRONT_LEFT_OF_CENTER , SPEAKER_FRONT_RIGHT_OF_CENTER //float centerLeft = inputTemp[6]; //float centerRight = inputTemp[7]; // Downmix from 5.1 to stereo *(outputTemp++) = (left + center + rearLeft) * attn5dot1; *(outputTemp++) = (right + center + rearRight) * attn5dot1; inputTemp += 8; } } // Downmix to 5.1 (easy stuff) then downmix to stereo as done in KSAUDIO_SPEAKER_5POINT1 else if(inputChannelMask == KSAUDIO_SPEAKER_7POINT1_SURROUND) { UINT numFloats = numAudioFrames*8; float *endTemp = inputTemp+numFloats; while(inputTemp < endTemp) { float left = inputTemp[0]; float right = inputTemp[1]; float center = inputTemp[2] * centerMix; // Skip LFE we don't need it //float lowFreq = inputTemp[3]*lowFreqMix; float rearLeft = inputTemp[4]; float rearRight = inputTemp[5]; float sideLeft = inputTemp[6]; float sideRight = inputTemp[7]; // combine the rear/side channels first , baaam! 5.1 rearLeft = (rearLeft + sideLeft) * 0.5f; rearRight = (rearRight + sideRight) * 0.5f; // downmix to stereo as in 5.1 case *(outputTemp++) = (left + center + rearLeft * surroundMix) * attn5dot1; *(outputTemp++) = (right + center + rearRight * surroundMix) * attn5dot1; inputTemp += 8; } } } ReleaseBuffer(); //------------------------------------------------------------ // resample if(bResample) { UINT frameAdjust = UINT((double(numAudioFrames) * resampleRatio) + 1.0); UINT newFrameSize = frameAdjust*2; if(tempResampleBuffer.Num() < newFrameSize) tempResampleBuffer.SetSize(newFrameSize); SRC_DATA data; data.src_ratio = resampleRatio; data.data_in = tempBuffer.Array(); data.input_frames = numAudioFrames; data.data_out = tempResampleBuffer.Array(); data.output_frames = frameAdjust; data.end_of_input = 0; int err = src_process((SRC_STATE*)resampler, &data); if(err) { RUNONCE AppWarning(TEXT("AudioSource::QueryAudio: Was unable to resample audio for device '%s'"), GetDeviceName()); return NoAudioAvailable; } if(data.input_frames_used != numAudioFrames) { RUNONCE AppWarning(TEXT("AudioSource::QueryAudio: Failed to downsample buffer completely, which shouldn't actually happen because it should be using 10ms of samples")); return NoAudioAvailable; } numAudioFrames = data.output_frames_gen; } //----------------------------------------------------------------------------- // sort all audio frames into 10 millisecond increments (done because not all devices output in 10ms increments) // NOTE: 0.457+ - instead of using the timestamps from windows, just compare and make sure it stays within a 100ms of their timestamps if(!bFirstBaseFrameReceived) { lastUsedTimestamp = newTimestamp; bFirstBaseFrameReceived = true; } float *newBuffer = (bResample) ? tempResampleBuffer.Array() : tempBuffer.Array(); if (bSmoothTimestamps) { lastUsedTimestamp += 10; QWORD difVal = GetQWDif(newTimestamp, lastUsedTimestamp); if(difVal > 70) { //OSDebugOut(TEXT("----------------------------1\r\nlastUsedTimestamp before: %llu - device: %s\r\n"), lastUsedTimestamp, GetDeviceName()); lastUsedTimestamp = newTimestamp; //OSDebugOut(TEXT("lastUsedTimestamp after: %llu\r\n"), lastUsedTimestamp); } if(lastUsedTimestamp > lastSentTimestamp) { QWORD adjustVal = (lastUsedTimestamp-lastSentTimestamp); if(adjustVal < 10) lastUsedTimestamp += 10-adjustVal; AudioSegment *newSegment = new AudioSegment(newBuffer, numAudioFrames*2, lastUsedTimestamp); AddAudioSegment(newSegment, curVolume*sourceVolume); lastSentTimestamp = lastUsedTimestamp; } } else { // OSDebugOut(TEXT("newTimestamp: %llu\r\n"), newTimestamp); AudioSegment *newSegment = new AudioSegment(newBuffer, numAudioFrames*2, newTimestamp); AddAudioSegment(newSegment, curVolume*sourceVolume); } //----------------------------------------------------------------------------- return AudioAvailable; } return NoAudioAvailable; }