bool audio::orchestra::api::Core::open(uint32_t _device, audio::orchestra::mode _mode, uint32_t _channels, uint32_t _firstChannel, uint32_t _sampleRate, audio::format _format, uint32_t *_bufferSize, const audio::orchestra::StreamOptions& _options) { // Get device ID uint32_t nDevices = getDeviceCount(); if (nDevices == 0) { // This should not happen because a check is made before this function is called. ATA_ERROR("no devices found!"); return false; } if (_device >= nDevices) { // This should not happen because a check is made before this function is called. ATA_ERROR("device ID is invalid!"); return false; } AudioDeviceID deviceList[ nDevices/2 ]; uint32_t dataSize = sizeof(AudioDeviceID) * nDevices/2; AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &property, 0, nullptr, &dataSize, (void *) &deviceList); if (result != noErr) { ATA_ERROR("OS-X system error getting device IDs."); return false; } AudioDeviceID id = deviceList[ _device/2 ]; // Setup for stream mode. bool isInput = false; if (_mode == audio::orchestra::mode_input) { isInput = true; property.mScope = kAudioDevicePropertyScopeInput; } else { property.mScope = kAudioDevicePropertyScopeOutput; } // Get the stream "configuration". AudioBufferList *bufferList = nil; dataSize = 0; property.mSelector = kAudioDevicePropertyStreamConfiguration; result = AudioObjectGetPropertyDataSize(id, &property, 0, nullptr, &dataSize); if ( result != noErr || dataSize == 0) { ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream configuration info for device (" << _device << ")."); return false; } // Allocate the AudioBufferList. bufferList = (AudioBufferList *) malloc(dataSize); if (bufferList == nullptr) { ATA_ERROR("memory error allocating AudioBufferList."); return false; } result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, bufferList); if ( result != noErr || dataSize == 0) { ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream configuration for device (" << _device << ")."); return false; } // Search for one or more streams that contain the desired number of // channels. CoreAudio devices can have an arbitrary number of // streams and each stream can have an arbitrary number of channels. // For each stream, a single buffer of interleaved samples is // provided. orchestra prefers the use of one stream of interleaved // data or multiple consecutive single-channel streams. However, we // now support multiple consecutive multi-channel streams of // interleaved data as well. uint32_t iStream, offsetCounter = _firstChannel; uint32_t nStreams = bufferList->mNumberBuffers; bool monoMode = false; bool foundStream = false; // First check that the device supports the requested number of // channels. uint32_t deviceChannels = 0; for (iStream=0; iStream<nStreams; iStream++) { deviceChannels += bufferList->mBuffers[iStream].mNumberChannels; } if (deviceChannels < (_channels + _firstChannel)) { free(bufferList); ATA_ERROR("the device (" << _device << ") does not support the requested channel count."); return false; } // Look for a single stream meeting our needs. uint32_t firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0; for (iStream=0; iStream<nStreams; iStream++) { streamChannels = bufferList->mBuffers[iStream].mNumberChannels; if (streamChannels >= _channels + offsetCounter) { firstStream = iStream; channelOffset = offsetCounter; foundStream = true; break; } if (streamChannels > offsetCounter) { break; } offsetCounter -= streamChannels; } // If we didn't find a single stream above, then we should be able // to meet the channel specification with multiple streams. if (foundStream == false) { monoMode = true; offsetCounter = _firstChannel; for (iStream=0; iStream<nStreams; iStream++) { streamChannels = bufferList->mBuffers[iStream].mNumberChannels; if (streamChannels > offsetCounter) { break; } offsetCounter -= streamChannels; } firstStream = iStream; channelOffset = offsetCounter; int32_t channelCounter = _channels + offsetCounter - streamChannels; if (streamChannels > 1) { monoMode = false; } while (channelCounter > 0) { streamChannels = bufferList->mBuffers[++iStream].mNumberChannels; if (streamChannels > 1) { monoMode = false; } channelCounter -= streamChannels; streamCount++; } } free(bufferList); // Determine the buffer size. AudioValueRange bufferRange; dataSize = sizeof(AudioValueRange); property.mSelector = kAudioDevicePropertyBufferFrameSizeRange; result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &bufferRange); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") getting buffer size range for device (" << _device << ")."); return false; } if (bufferRange.mMinimum > *_bufferSize) { *_bufferSize = (uint64_t) bufferRange.mMinimum; } else if (bufferRange.mMaximum < *_bufferSize) { *_bufferSize = (uint64_t) bufferRange.mMaximum; } if (_options.flags.m_minimizeLatency == true) { *_bufferSize = (uint64_t) bufferRange.mMinimum; } // Set the buffer size. For multiple streams, I'm assuming we only // need to make this setting for the master channel. uint32_t theSize = (uint32_t) *_bufferSize; dataSize = sizeof(uint32_t); property.mSelector = kAudioDevicePropertyBufferFrameSize; result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &theSize); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") setting the buffer size for device (" << _device << ")."); return false; } // If attempting to setup a duplex stream, the bufferSize parameter // MUST be the same in both directions! *_bufferSize = theSize; if ( m_mode == audio::orchestra::mode_output && _mode == audio::orchestra::mode_input && *_bufferSize != m_bufferSize) { ATA_ERROR("system error setting buffer size for duplex stream on device (" << _device << ")."); return false; } m_bufferSize = *_bufferSize; m_nBuffers = 1; // Check and if necessary, change the sample rate for the device. double nominalRate; dataSize = sizeof(double); property.mSelector = kAudioDevicePropertyNominalSampleRate; result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &nominalRate); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") getting current sample rate."); return false; } // Only change the sample rate if off by more than 1 Hz. if (fabs(nominalRate - (double)_sampleRate) > 1.0) { // Set a property listener for the sample rate change double reportedRate = 0.0; AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; result = AudioObjectAddPropertyListener(id, &tmp, &rateListener, (void *) &reportedRate); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") setting sample rate property listener for device (" << _device << ")."); return false; } nominalRate = (double) _sampleRate; result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &nominalRate); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") setting sample rate for device (" << _device << ")."); return false; } // Now wait until the reported nominal rate is what we just set. uint32_t microCounter = 0; while (reportedRate != nominalRate) { microCounter += 5000; if (microCounter > 5000000) { break; } std::this_thread::sleep_for(std::chrono::milliseconds(5)); } // Remove the property listener. AudioObjectRemovePropertyListener(id, &tmp, &rateListener, (void *) &reportedRate); if (microCounter > 5000000) { ATA_ERROR("timeout waiting for sample rate update for device (" << _device << ")."); return false; } } // Now set the stream format for all streams. Also, check the // physical format of the device and change that if necessary. AudioStreamBasicDescription description; dataSize = sizeof(AudioStreamBasicDescription); property.mSelector = kAudioStreamPropertyVirtualFormat; result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &description); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream format for device (" << _device << ")."); return false; } // Set the sample rate and data format id. However, only make the // change if the sample rate is not within 1.0 of the desired // rate and the format is not linear pcm. bool updateFormat = false; if (fabs(description.mSampleRate - (double)_sampleRate) > 1.0) { description.mSampleRate = (double) _sampleRate; updateFormat = true; } if (description.mFormatID != kAudioFormatLinearPCM) { description.mFormatID = kAudioFormatLinearPCM; updateFormat = true; } if (updateFormat) { result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &description); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") setting sample rate or data format for device (" << _device << ")."); return false; } } // Now check the physical format. property.mSelector = kAudioStreamPropertyPhysicalFormat; result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &description); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream physical format for device (" << _device << ")."); return false; } //std::cout << "Current physical stream format:" << std::endl; //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl; //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl; //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl; //std::cout << " sample rate = " << description.mSampleRate << std::endl; if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16) { description.mFormatID = kAudioFormatLinearPCM; //description.mSampleRate = (double) sampleRate; AudioStreamBasicDescription testDescription = description; uint32_t formatFlags; // We'll try higher bit rates first and then work our way down. std::vector< std::pair<uint32_t, uint32_t> > physicalFormats; formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger; physicalFormats.push_back(std::pair<float, uint32_t>(32, formatFlags)); formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat; physicalFormats.push_back(std::pair<float, uint32_t>(32, formatFlags)); physicalFormats.push_back(std::pair<float, uint32_t>(24, formatFlags)); // 24-bit packed formatFlags &= ~(kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh); physicalFormats.push_back(std::pair<float, uint32_t>(24.2, formatFlags)); // 24-bit in 4 bytes, aligned low formatFlags |= kAudioFormatFlagIsAlignedHigh; physicalFormats.push_back(std::pair<float, uint32_t>(24.4, formatFlags)); // 24-bit in 4 bytes, aligned high formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat; physicalFormats.push_back(std::pair<float, uint32_t>(16, formatFlags)); physicalFormats.push_back(std::pair<float, uint32_t>(8, formatFlags)); bool setPhysicalFormat = false; for(uint32_t i=0; i<physicalFormats.size(); i++) { testDescription = description; testDescription.mBitsPerChannel = (uint32_t) physicalFormats[i].first; testDescription.mFormatFlags = physicalFormats[i].second; if ( (24 == (uint32_t)physicalFormats[i].first) && ~(physicalFormats[i].second & kAudioFormatFlagIsPacked)) { testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame; } else { testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame; } testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket; result = AudioObjectSetPropertyData(id, &property, 0, nullptr, dataSize, &testDescription); if (result == noErr) { setPhysicalFormat = true; //std::cout << "Updated physical stream format:" << std::endl; //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl; //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl; //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl; //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl; break; } } if (!setPhysicalFormat) { ATA_ERROR("system error (" << getErrorCode(result) << ") setting physical data format for device (" << _device << ")."); return false; } } // done setting virtual/physical formats. // Get the stream / device latency. uint32_t latency; dataSize = sizeof(uint32_t); property.mSelector = kAudioDevicePropertyLatency; if (AudioObjectHasProperty(id, &property) == true) { result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &latency); if (result == kAudioHardwareNoError) { m_latency[ _mode ] = latency; } else { ATA_ERROR("system error (" << getErrorCode(result) << ") getting device latency for device (" << _device << ")."); return false; } } // Byte-swapping: According to AudioHardware.h, the stream data will // always be presented in native-endian format, so we should never // need to byte swap. m_doByteSwap[modeToIdTable(_mode)] = false; // From the CoreAudio documentation, PCM data must be supplied as // 32-bit floats. m_userFormat = _format; m_deviceFormat[modeToIdTable(_mode)] = audio::format_float; if (streamCount == 1) { m_nDeviceChannels[modeToIdTable(_mode)] = description.mChannelsPerFrame; } else { // multiple streams m_nDeviceChannels[modeToIdTable(_mode)] = _channels; } m_nUserChannels[modeToIdTable(_mode)] = _channels; m_channelOffset[modeToIdTable(_mode)] = channelOffset; // offset within a CoreAudio stream m_deviceInterleaved[modeToIdTable(_mode)] = true; if (monoMode == true) { m_deviceInterleaved[modeToIdTable(_mode)] = false; } // Set flags for buffer conversion. m_doConvertBuffer[modeToIdTable(_mode)] = false; if (m_userFormat != m_deviceFormat[modeToIdTable(_mode)]) { m_doConvertBuffer[modeToIdTable(_mode)] = true; } if (m_nUserChannels[modeToIdTable(_mode)] < m_nDeviceChannels[modeToIdTable(_mode)]) { m_doConvertBuffer[modeToIdTable(_mode)] = true; } if (streamCount == 1) { if ( m_nUserChannels[modeToIdTable(_mode)] > 1 && m_deviceInterleaved[modeToIdTable(_mode)] == false) { m_doConvertBuffer[modeToIdTable(_mode)] = true; } } else if (monoMode) { m_doConvertBuffer[modeToIdTable(_mode)] = true; } m_private->iStream[modeToIdTable(_mode)] = firstStream; m_private->nStreams[modeToIdTable(_mode)] = streamCount; m_private->id[modeToIdTable(_mode)] = id; // Allocate necessary internal buffers. uint64_t bufferBytes; bufferBytes = m_nUserChannels[modeToIdTable(_mode)] * *_bufferSize * audio::getFormatBytes(m_userFormat); // m_userBuffer[modeToIdTable(_mode)] = (char *) calloc(bufferBytes, 1); m_userBuffer[modeToIdTable(_mode)].resize(bufferBytes, 0); if (m_userBuffer[modeToIdTable(_mode)].size() == 0) { ATA_ERROR("error allocating user buffer memory."); goto error; } // If possible, we will make use of the CoreAudio stream buffers as // "device buffers". However, we can't do this if using multiple // streams. if ( m_doConvertBuffer[modeToIdTable(_mode)] && m_private->nStreams[modeToIdTable(_mode)] > 1) { bool makeBuffer = true; bufferBytes = m_nDeviceChannels[modeToIdTable(_mode)] * audio::getFormatBytes(m_deviceFormat[modeToIdTable(_mode)]); if (_mode == audio::orchestra::mode_input) { if ( m_mode == audio::orchestra::mode_output && m_deviceBuffer) { uint64_t bytesOut = m_nDeviceChannels[0] * audio::getFormatBytes(m_deviceFormat[0]); if (bufferBytes <= bytesOut) { makeBuffer = false; } } } if (makeBuffer) { bufferBytes *= *_bufferSize; if (m_deviceBuffer) { free(m_deviceBuffer); m_deviceBuffer = nullptr; } m_deviceBuffer = (char *) calloc(bufferBytes, 1); if (m_deviceBuffer == nullptr) { ATA_ERROR("error allocating device buffer memory."); goto error; } } } m_sampleRate = _sampleRate; m_device[modeToIdTable(_mode)] = _device; m_state = audio::orchestra::state::stopped; ATA_VERBOSE("Set state as stopped"); // Setup the buffer conversion information structure. if (m_doConvertBuffer[modeToIdTable(_mode)]) { if (streamCount > 1) { setConvertInfo(_mode, 0); } else { setConvertInfo(_mode, channelOffset); } } if ( _mode == audio::orchestra::mode_input && m_mode == audio::orchestra::mode_output && m_device[0] == _device) { // Only one callback procedure per device. m_mode = audio::orchestra::mode_duplex; } else { #if defined(MAC_OS_X_VERSION_10_5) && (MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5) result = AudioDeviceCreateIOProcID(id, &audio::orchestra::api::Core::callbackEvent, this, &m_private->procId[modeToIdTable(_mode)]); #else // deprecated in favor of AudioDeviceCreateIOProcID() result = AudioDeviceAddIOProc(id, &audio::orchestra::api::Core::callbackEvent, this); #endif if (result != noErr) { ATA_ERROR("system error setting callback for device (" << _device << ")."); goto error; } if ( m_mode == audio::orchestra::mode_output && _mode == audio::orchestra::mode_input) { m_mode = audio::orchestra::mode_duplex; } else { m_mode = _mode; } } // Setup the device property listener for over/underload. property.mSelector = kAudioDeviceProcessorOverload; result = AudioObjectAddPropertyListener(id, &property, &audio::orchestra::api::Core::xrunListener, this); return true; error: m_userBuffer[0].clear(); m_userBuffer[1].clear(); if (m_deviceBuffer) { free(m_deviceBuffer); m_deviceBuffer = 0; } m_state = audio::orchestra::state::closed; ATA_VERBOSE("Set state as closed"); return false; }
void loop() { // New block to identify device count changes lastDeviceCount = deviceCount; deviceCount = getDeviceCount(); //end New block mycounter++; if(lastDeviceCount != deviceCount ) { //device count changes this never works oled.clear(ALL); // Clear the display's internal memory oled.display(); // Display what's in the buffer (splashscreen) oled.setCursor(0,0); //oled << "Count " << endl << "changed " << endl << lastDeviceCount << " " << deviceCount << endl; oled.display(); delay(5000); // Delay 1000 ms oled.clear(PAGE); // Clear the buffer. Serial << " The device Count Changed " << lastDeviceCount << " " << deviceCount << endl; } // only do these things every GETTEMPFEQ loops or before I get to the first GETTEMPFREQ if (mycounter % GETTEMPFEQ == 0 || mycounter < GETTEMPFEQ ) { deviceCount = getDeviceCount(); if ( deviceCount > 0 ) { temperatureJob(); // do the main temprature job } // I think this is wrong //lastDeviceCount = getDeviceCount(); // used to detect } buttonvalue = digitalRead(button); if( debug ) { Serial << mycounter << " freq: " << freqChecker() << "Hz | enocderPos: "; Serial << encoderPos << " | buttonvalue: " << buttonvalue << endl; } //encoder if (prevPos != encoderPos) { prevPos = encoderPos; Serial << "encoder position: " << encoderPos << endl; dispatchEncoder(); } if (encoderPos == 4 ) oPrintInfo(); if (encoderPos == 5 ) oPrintInfo5(); if (deviceCount == 0 && encoderPos < 4 && encoderPos > 0 ) oPrintNoDevices() ; lastime = thistime; delay(mydelay); thistime = millis(); }
unsigned initExecutionBluesteins(const unsigned size, const unsigned m) { allocateHostMemoryBluesteins(size, m); if (deviceCount) { printf("Initializing device(s).." ); // create the OpenCL context on available GPU devices init_cl_context(CL_DEVICE_TYPE_GPU); const cl_uint ciDeviceCount = getDeviceCount(); if (!ciDeviceCount) { printf("No opencl specific devices!\n"); return 0; } printf("Creating Command Queue...\n"); // create a command queue on device 1 for (unsigned i = 0; i < deviceCount; ++i) { createCommandQueue(i); } } return 1; }
CameraEngine* V4Linux2Camera::getCamera(CameraConfig *cam_cfg) { int cam_count = getDeviceCount(); if (cam_count==0) return NULL; if ((cam_cfg->device==SETTING_MIN) || (cam_cfg->device==SETTING_DEFAULT)) cam_cfg->device=0; else if (cam_cfg->device==SETTING_MAX) cam_cfg->device=cam_count-1; std::vector<CameraConfig> cfg_list = V4Linux2Camera::getCameraConfigs(cam_cfg->device); if (cfg_list.size()==0) return NULL; if (cam_cfg->cam_format==FORMAT_UNKNOWN) cam_cfg->cam_format = cfg_list[0].cam_format; setMinMaxConfig(cam_cfg,cfg_list); if (cam_cfg->force) return new V4Linux2Camera(cam_cfg); for (unsigned int i=0;i<cfg_list.size();i++) { if (cam_cfg->cam_format != cfg_list[i].cam_format) continue; if ((cam_cfg->cam_width >=0) && (cam_cfg->cam_width != cfg_list[i].cam_width)) continue; if ((cam_cfg->cam_height >=0) && (cam_cfg->cam_height != cfg_list[i].cam_height)) continue; if ((cam_cfg->cam_fps >=0) && (cam_cfg->cam_fps != cfg_list[i].cam_fps)) continue; return new V4Linux2Camera(cam_cfg); } return NULL; }
void printDeviceListNotification(OutputStream* outputStream, bool showOnlyProblem) { println(outputStream); int deviceCount = getDeviceCount(); int deviceIndex; for (deviceIndex = 0; deviceIndex < deviceCount; deviceIndex++) { Device* device = getDevice(deviceIndex); printDeviceNotification(outputStream, device); } }
const char * RedBook::getDeviceName(U32 idx) { if(idx >= getDeviceCount()) { setLastError("Invalid device index"); return(""); } return(smDeviceList[idx]->mDeviceName); }
void loop() { mycounter++; if(lastDeviceCount != deviceCount ) { //device count changes this never works oled.clear(ALL); // Clear the display's internal memory oled.display(); // Display what's in the buffer (splashscreen) //delay(1000); // Delay 1000 ms oled.clear(PAGE); // Clear the buffer. Serial.println("\n\n The device Count Changed "); } // only do these things every GETTEMPFEQ loops if (mycounter % GETTEMPFEQ == 0 ) { deviceCount = getDeviceCount(); temperatureJob(); // do the main temprature job lastDeviceCount = getDeviceCount(); // used to detect } if( debug ) Serial << "the freq is: " << freqChecker() << "Hz" << endl; //encoder if (prevPos != encoderPos) { prevPos = encoderPos; Serial << "encoder position: " << encoderPos << endl; dispatchEncoder(); } //*************** testing by hardcoded address Array ****** /* for (int i = 0; i < 4; i++ ){ Serial << "the array hardcode version of: " << deviceNames[i] << " " << sensor.getTempF(*deviceAddressArray[i]) << endl; }*/ //****************************************************/ lastime = thistime; //thistime = millis(); //delay(500); thistime = millis(); }
//-------------------------------------------------------------- int ofxCLEye::getDeviceID(const GUID & guid){ int id = -1; int deviceCount = getDeviceCount(); for(int i = 0; i < deviceCount; i++){ if(CLEyeGetCameraUUID(i) == guid){ id = i; } } return id; }
/** * * @param avctx * @return */ static bool probeCuda() { ADM_info( "Probing cuda\n"); if(!loadCuda()) { ADM_warning("Cannot load cuda\n"); return false; } ADM_warning("Cuda loaded, probing..\n"); if(!cudaCall(init(0))) return false; int deviceCount=0; if(!cudaCall(getDeviceCount(&deviceCount))) return false; if (!deviceCount) { ADM_warning( "No Cuda device available\n"); return false; } ADM_info( "found %d CUDA devices \n", deviceCount); for (int i = 0; i < deviceCount; ++i) { CUdevice dev; char chipName[128]; int major,minor,ver; cudaAbortOnFail(getDevice(&dev,i)); cudaAbortOnFail(getDeviceName(chipName,sizeof(chipName),dev)); cudaAbortOnFail(getDeviceCapabilities(&major,&minor,dev)); ver = (major << 4) | minor; ADM_info("Found chip, GPU %s, SM %d.d",chipName,major,minor); if(ver>=0x30) { ADM_info(" this chip has nvenc"); if(!nvEncAvailable) { nvEncAvailable=true; selectedDevice=dev; } } } return nvEncAvailable; abortCudaProbe: return false; }
//-------------------------------------------------------------- void ofxCLEye::setDeviceID(int deviceID){ if(deviceID > getDeviceCount() || deviceID < 0){ ofLogWarning(OFX_CLEYE_MODULE_NAME) << "setDeviceID(): can't find a device with this ID."; return; } if(initialized){ ofLogWarning(OFX_CLEYE_MODULE_NAME) << "setDeviceID(): can't set device while grabber is running."; return; } requestedDeviceID = deviceID; }
void setup() { oled.begin(); // Initialize the OLED oled.clear(ALL); // Clear the display's internal memory oled.display(); // Display what's in the buffer (splashscreen) delay(1000); // Delay 1000 ms oled.clear(PAGE); // Clear the buffer. // display the version at boot for 2 seconds oled.setFontType(1); oled.setCursor(0,8); oled.print(FILENAME); oled.setCursor(0,24); oled.print(MYVERSION); oled.display(); oled.setFontType(0); delay(5000); request.port = 80; request.hostname = "things.ubidots.com"; Serial.begin(9600); sensor.begin(); Particle.variable("count_devices", &deviceCount, INT); Particle.function("q", queryDevices); Particle.function("setmode", setModeFunc); Particle.function("printEEProm", printEEPROMFunc); Particle.function("relay", relayFunc); //Need to set the device Index Array at startup deviceCount = getDeviceCount(); queryDevices("auto"); Particle.publish("reboot",Time.timeStr() ); //encoder pinMode(encoderA, INPUT_PULLUP); pinMode(encoderB, INPUT_PULLUP); pinMode(button,INPUT_PULLUP); pinMode(relay, OUTPUT); attachInterrupt(encoderA, doEncoderA, CHANGE); attachInterrupt(encoderB, doEncoderB, CHANGE); // temperatureJob(); // do this one time so the first screen gets displayed }
~Manager() { // Destructors should not through exceptions try { for(int i = 0; i < getDeviceCount(); i++) { setDevice(i); garbageCollect(); } pinnedGarbageCollect(); } catch (AfError &ex) { const char* perr = getenv("AF_PRINT_ERRORS"); if(perr && perr[0] != '0') { fprintf(stderr, "%s\n", ex.what()); } } }
//Clean up ofxffmv::~ofxffmv() { camNum = 1; if(getDeviceCount() > 0){ for(int i=0;i<camNum;i++) { // Stop the camera. This does not destroy the context. This simply stops // the grabbing of images from the camera. This should always be called // prior to calling flycaptureDestroyContext(). // flycaptureStop( context[i] ); // // Destroy the context. This should always be called before exiting // the application to prevent memory leaks. // flycaptureDestroyContext( context[i] ); }//end of for loop } }
uint32_t audio::orchestra::api::Core::getDefaultOutputDevice() { uint32_t nDevices = getDeviceCount(); if (nDevices <= 1) { return 0; } AudioDeviceID id; uint32_t dataSize = sizeof(AudioDeviceID); AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &property, 0, nullptr, &dataSize, &id); if (result != noErr) { ATA_ERROR("OS-X system error getting device."); return 0; } dataSize = sizeof(AudioDeviceID) * nDevices; AudioDeviceID deviceList[ nDevices ]; property.mSelector = kAudioHardwarePropertyDevices; result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &property, 0, nullptr, &dataSize, (void*)&deviceList); if (result != noErr) { ATA_ERROR("OS-X system error getting device IDs."); return 0; } for (uint32_t iii=0; iii<nDevices; iii++) { if (id == deviceList[iii]) { return iii*2; } } ATA_ERROR("No default device found!"); return 0; }
Device AudioManager::getDeviceByName(std::string name) { int i, num = getDeviceCount(); const PaDeviceInfo *deviceInfo; Device d; d.id = -1; for(i=0; i<num; i++) { deviceInfo = Pa_GetDeviceInfo(i); if (deviceInfo->name == name) { d.name = deviceInfo->name; d.latency = deviceInfo->defaultLowOutputLatency; d.sampleRate = deviceInfo->defaultSampleRate; d.channels = deviceInfo->maxOutputChannels; d.id = i; d.isDefault = (i == Pa_GetDefaultOutputDevice()); } } return d; }
//-------------------------------------------------------------- vector <ofVideoDevice> ofxCLEye::listDevices(){ int numCams = getDeviceCount(); if(numCams == 0){ ofLogWarning(OFX_CLEYE_MODULE_NAME) << "No PS Eye cameras found!"; } ofLogVerbose(OFX_CLEYE_MODULE_NAME) << numCams << " PS Eye camera(s) found"; vector <ofVideoDevice> devices; for(int i = 0; i < numCams; i++){ //GUID guidCamera; //guidCamera = CLEyeGetCameraUUID(i); // TODO: properly fullfil the ofVideoDevice with the specs from the SDK. // See https://codelaboratories.com/research/view/cl-eye-muticamera-api } ofLogWarning(OFX_CLEYE_MODULE_NAME) << "Function not implemented yet"; return devices; }
AudioManager::DeviceList AudioManager::getDeviceList() { AudioManager::DeviceList list; int i, num = getDeviceCount(); int def = Pa_GetDefaultOutputDevice(); const PaDeviceInfo *deviceInfo; for(i=0; i<num; i++) { deviceInfo = Pa_GetDeviceInfo(i); if (deviceInfo->maxOutputChannels > 0) { Device d; d.name = deviceInfo->name; d.latency = deviceInfo->defaultLowOutputLatency; d.sampleRate = deviceInfo->defaultSampleRate; d.channels = deviceInfo->maxOutputChannels; d.id = i; d.isDefault = (i == def); list.push_back(d); } } return list; }
bool videoInputCamera::initCamera() { int dev_count = getDeviceCount(); if (dev_count==0) return false; if ((cfg->device==SETTING_MIN) || (cfg->device==SETTING_DEFAULT)) cfg->device=0; else if (cfg->device==SETTING_MAX) cfg->device=dev_count-1; std::vector<CameraConfig> cfg_list = videoInputCamera::getCameraConfigs(cfg->device); if (cfg_list.size()==0) return false; if (cfg->cam_format==FORMAT_UNKNOWN) cfg->cam_format = cfg_list[0].cam_format; setMinMaxConfig(cfg,cfg_list); HRESULT hr = setupDevice(); if(FAILED(hr)) return false; setupFrame(); if (cfg->frame) cam_buffer = new unsigned char[cfg->cam_width*cfg->cam_height*cfg->src_format]; else cam_buffer = new unsigned char[cfg->cam_width*cfg->cam_height*cfg->src_format]; return true; }
void setup() { oled.begin(); // Initialize the OLED oled.clear(ALL); // Clear the display's internal memory oled.display(); // Display what's in the buffer (splashscreen) delay(1000); // Delay 1000 ms oled.clear(PAGE); // Clear the buffer. request.port = 80; request.hostname = "things.ubidots.com"; Serial.begin(9600); sensor.begin(); Particle.variable("count_devices", &deviceCount, INT); Particle.function("q", queryDevices); Particle.function("setmode", setModeFunc); Particle.function("printEEProm", printEEPROMFunc); Particle.function("regDevice", regDeviceFunc); //Need to set the device Index Array at startup deviceCount = getDeviceCount(); queryDevices("auto"); Particle.publish("reboot",Time.timeStr() ); //encoder pinMode(encoderA, INPUT_PULLUP); pinMode(encoderB, INPUT_PULLUP); attachInterrupt(encoderA, doEncoderA, CHANGE); attachInterrupt(encoderB, doEncoderB, CHANGE); }
void loop() { // New block to identify device count changes lastDeviceCount = deviceCount; deviceCount = getDeviceCount(); //end New block mycounter++; if(lastDeviceCount != deviceCount ) { //device count changes this never works oled.clear(ALL); // Clear the display's internal memory oled.display(); // Display what's in the buffer (splashscreen) oled.setCursor(0,0); //oled << "Count " << endl << "changed " << endl << lastDeviceCount << " " << deviceCount << endl; oled.display(); //delay(5000); // Delay 1000 ms oled.clear(PAGE); // Clear the buffer. Serial << " The device Count Changed " << lastDeviceCount << " " << deviceCount << endl; } // only do these things every GETTEMPFEQ loops or before I get to the first GETTEMPFREQ if (mycounter % GETTEMPFEQ == 0 || mycounter < GETTEMPFEQ ) { deviceCount = getDeviceCount(); if ( deviceCount > 0 ) { temperatureJob(); // do the main temprature job } // I think this is wrong // lastDeviceCount = getDeviceCount(); // used to detect } buttonvalue = digitalRead(button); if( debug ) { Serial <<" mycounter: " << mycounter << " freq: " << freqChecker() << "Hz | encoderPos: "; Serial << encoderPos << " | buttonvalue: " << buttonvalue << " displayMode: " << displayMode << endl; } //encoder if (prevPos != encoderPos) { prevPos = encoderPos; Serial << "encoder position: " << encoderPos << endl; dispatchEncoder(); } if (encoderPos == 4 ) oPrintInfo(); if (encoderPos == 5 ) oPrintInfo5(); if (encoderPos == 6 ) oPrintRelayMode(); if (encoderPos == 7 ) oPrintMoisture(); // Don't intrrupt info screens to report no device if (deviceCount == 0 && encoderPos < 4 && encoderPos > 0 ) oPrintNoDevices() ; // check the moisure every moistureCheckerFreq (60 -about 20 seconds) currently 7200 ~ 1 hour if ( mycounter % moistureCheckerFreq == 0 ) { int lastDispalyMode = displayMode; oPrintMoisture(); displayMode = encoderPos = lastDispalyMode; } // always last lastime = thistime; // for frequency checker delay(mydelay); thistime = millis(); }
void setup() { oled.begin(); // Initialize the OLED oled.clear(ALL); // Clear the display's internal memory oled.display(); // Display what's in the buffer (splashscreen) delay(1000); // Delay 1000 ms oled.clear(PAGE); // Clear the buffer. // display the version at boot for 2 seconds oled.setFontType(1); oled.setCursor(0,8); oled.print(FILENAME); oled.setCursor(0,24); oled.print(MYVERSION); oled.display(); oled.setFontType(0); delay(5000); request.port = 80; request.hostname = "things.ubidots.com"; Serial.begin(9600); sensor.begin(); Particle.variable("devices",deviceCount); Particle.variable("m1pct",M1PCT); Particle.variable("version",MYVERSION); Particle.variable("file",FILENAME); Particle.variable("outside_temp",temp1); Particle.variable("room_temp",temp2); Particle.variable("pit_temp",temp3); Particle.variable("board_temp",temp4); Particle.variable("m1desc",M1DESC); Particle.function("q", queryDevices); Particle.function("setmode", setModeFunc); Particle.function("printEEProm", printEEPROMFunc); Particle.function("relay", relayFunc); //Need to set the device Index Array at startup deviceCount = getDeviceCount(); queryDevices("auto"); Particle.publish("reboot",Time.timeStr() ); //encoder pinMode(encoderA, INPUT_PULLUP); pinMode(encoderB, INPUT_PULLUP); pinMode(button,INPUT_PULLUP); pinMode(M1,INPUT_PULLDOWN); pinMode(relay, OUTPUT); pinMode(M1POWER, OUTPUT); attachInterrupt(encoderA, doEncoderA, CHANGE); attachInterrupt(encoderB, doEncoderB, CHANGE); IPAddress myIP = WiFi.localIP(); String ipStr = String(myIP[0])+"."+String(myIP[1])+"."+String(myIP[2])+"."+String(myIP[3]); Particle.publish("LocalIP", ipStr, 60,PRIVATE); String myVersion = System.version().c_str(); delay(2000); Particle.publish("Version", myVersion, 60,PRIVATE); Particle.publish("rssi", String( WiFi.RSSI()), 60, PRIVATE); Particle.publish("SSID", String( WiFi.SSID()), 60, PRIVATE); }
bool hasGpus() { int deviceCount = getDeviceCount(); return deviceCount > 0; }
audio::orchestra::DeviceInfo audio::orchestra::api::Core::getDeviceInfo(uint32_t _device) { audio::orchestra::DeviceInfo info; // Get device ID uint32_t nDevices = getDeviceCount(); if (nDevices == 0) { ATA_ERROR("no devices found!"); info.clear(); return info; } if (_device >= nDevices) { ATA_ERROR("device ID is invalid!"); info.clear(); return info; } info.input = false; if (_device%2 == 1) { info.input = true; } // The /2 corespond of not mixing input and output ... ==< then the user number of devide is twice the number of real device ... AudioDeviceID deviceList[nDevices/2]; uint32_t dataSize = sizeof(AudioDeviceID) * nDevices/2; AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster }; OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &property, 0, nullptr, &dataSize, (void*)&deviceList); if (result != noErr) { ATA_ERROR("OS-X system error getting device IDs."); info.clear(); return info; } AudioDeviceID id = deviceList[ _device/2 ]; // ------------------------------------------------ // Get the device name. // ------------------------------------------------ info.name.erase(); CFStringRef cfname; dataSize = sizeof(CFStringRef); property.mSelector = kAudioObjectPropertyManufacturer; result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &cfname); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") getting device manufacturer."); info.clear(); return info; } //const char *mname = CFStringGetCStringPtr(cfname, CFStringGetSystemEncoding()); int32_t length = CFStringGetLength(cfname); std::vector<char> name; name.resize(length * 3 + 1, '\0'); CFStringGetCString(cfname, &name[0], length * 3 + 1, CFStringGetSystemEncoding()); info.name.append(&name[0], strlen(&name[0])); info.name.append(": "); CFRelease(cfname); property.mSelector = kAudioObjectPropertyName; result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &cfname); if (result != noErr) { ATA_ERROR("system error (" << getErrorCode(result) << ") getting device name."); info.clear(); return info; } //const char *name = CFStringGetCStringPtr(cfname, CFStringGetSystemEncoding()); length = CFStringGetLength(cfname); name.resize(length * 3 + 1, '\0'); CFStringGetCString(cfname, &name[0], length * 3 + 1, CFStringGetSystemEncoding()); info.name.append(&name[0], strlen(&name[0])); CFRelease(cfname); // ------------------------------------------------ // Get the output stream "configuration". // ------------------------------------------------ property.mSelector = kAudioDevicePropertyStreamConfiguration; if (info.input == false) { property.mScope = kAudioDevicePropertyScopeOutput; } else { property.mScope = kAudioDevicePropertyScopeInput; } AudioBufferList *bufferList = nullptr; dataSize = 0; result = AudioObjectGetPropertyDataSize(id, &property, 0, nullptr, &dataSize); if (result != noErr || dataSize == 0) { ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream configuration info for device (" << _device << ")."); info.clear(); return info; } // Allocate the AudioBufferList. bufferList = (AudioBufferList *) malloc(dataSize); if (bufferList == nullptr) { ATA_ERROR("memory error allocating AudioBufferList."); info.clear(); return info; } result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, bufferList); if ( result != noErr || dataSize == 0) { free(bufferList); ATA_ERROR("system error (" << getErrorCode(result) << ") getting stream configuration for device (" << _device << ")."); info.clear(); return info; } // Get channel information. for (size_t iii=0; iii<bufferList->mNumberBuffers; ++iii) { for (size_t jjj=0; jjj<bufferList->mBuffers[iii].mNumberChannels; ++jjj) { info.channels.push_back(audio::channel_unknow); } } free(bufferList); if (info.channels.size() == 0) { ATA_DEBUG("system error (" << getErrorCode(result) << ") getting stream configuration for device (" << _device << ") ==> no channels."); info.clear(); return info; } // ------------------------------------------------ // Determine the supported sample rates. // ------------------------------------------------ property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates; result = AudioObjectGetPropertyDataSize(id, &property, 0, nullptr, &dataSize); if ( result != kAudioHardwareNoError || dataSize == 0) { ATA_ERROR("system error (" << getErrorCode(result) << ") getting sample rate info."); info.clear(); return info; } uint32_t nRanges = dataSize / sizeof(AudioValueRange); AudioValueRange rangeList[ nRanges ]; result = AudioObjectGetPropertyData(id, &property, 0, nullptr, &dataSize, &rangeList); if (result != kAudioHardwareNoError) { ATA_ERROR("system error (" << getErrorCode(result) << ") getting sample rates."); info.clear(); return info; } double minimumRate = 100000000.0, maximumRate = 0.0; for (uint32_t i=0; i<nRanges; i++) { if (rangeList[i].mMinimum < minimumRate) { minimumRate = rangeList[i].mMinimum; } if (rangeList[i].mMaximum > maximumRate) { maximumRate = rangeList[i].mMaximum; } } info.sampleRates.clear(); for (auto &it : audio::orchestra::genericSampleRate()) { if ( it >= minimumRate && it <= maximumRate) { info.sampleRates.push_back(it); } } if (info.sampleRates.size() == 0) { ATA_ERROR("No supported sample rates found for device (" << _device << ")."); info.clear(); return info; } // ------------------------------------------------ // Determine the format. // ------------------------------------------------ // CoreAudio always uses 32-bit floating point data for PCM streams. // Thus, any other "physical" formats supported by the device are of // no interest to the client. info.nativeFormats.push_back(audio::format_float); // ------------------------------------------------ // Determine the default channel. // ------------------------------------------------ if (info.input == false) { if (getDefaultOutputDevice() == _device) { info.isDefault = true; } } else { if (getDefaultInputDevice() == _device) { info.isDefault = true; } } info.isCorrect = true; return info; }
void deviceSystemHandleRawData(char header, InputStream* inputStream, OutputStream* outputStream) { if (header == COMMAND_PING) { // data ackCommand(outputStream, SYSTEM_DEVICE_HEADER, COMMAND_PING); // Read and write in output the pingIndex (to control that it's the right which does the response) unsigned char pingIndex = readHex2(inputStream); appendHex2(outputStream, pingIndex); } // Last Error else if (header == COMMAND_GET_LAST_ERROR) { ackCommand(outputStream, SYSTEM_DEVICE_HEADER, COMMAND_GET_LAST_ERROR); unsigned int lastError = getLastError(); appendHex4(outputStream, lastError); } else if (header == COMMAND_CLEAR_LAST_ERROR) { ackCommand(outputStream, SYSTEM_DEVICE_HEADER, COMMAND_CLEAR_LAST_ERROR); clearLastError(); } // Device list else if (header == COMMAND_DEVICE_LIST) { ackCommand(outputStream, SYSTEM_DEVICE_HEADER, COMMAND_DEVICE_LIST); printDeviceList(getInfoOutputStreamLogger()); // Usage } else if (header == COMMAND_USAGE) { ackCommand(outputStream, SYSTEM_DEVICE_HEADER, COMMAND_USAGE); printDeviceListUsage(getInfoOutputStreamLogger(), false); } else if (header == COMMAND_USAGE_PROBLEM) { ackCommand(outputStream, SYSTEM_DEVICE_HEADER, COMMAND_USAGE_PROBLEM); printDeviceListUsage(getInfoOutputStreamLogger(), true); } else if (header == COMMAND_USAGE_SPECIFIC_DEVICE) { ackCommand(outputStream, SYSTEM_DEVICE_HEADER, COMMAND_USAGE_SPECIFIC_DEVICE); char deviceHeader = readBinaryChar(inputStream); int size = getDeviceCount(); int i; for (i = 0; i < size; i++) { Device* device = getDevice(i); if (deviceHeader == device->deviceInterface->deviceHeader) { println(getInfoOutputStreamLogger()); printDeviceUsage(getInfoOutputStreamLogger(), device, false); return; } } appendString(getErrorOutputStreamLogger(), "Device Not Found ! "); } else if (header == COMMAND_CLS) { ackCommand(outputStream, SYSTEM_DEVICE_HEADER, COMMAND_CLS); #ifdef PC_COMPILER system("cls"); #else appendString(outputStream, "Unsupported Operation"); #endif // PC_COMPILER } else if (header == COMMAND_RESET) { ackCommand(outputStream, SYSTEM_DEVICE_HEADER, COMMAND_RESET); #ifdef PC_COMPILER appendString(outputStream, "Unsupported Operation"); #else // goto 0; #endif // PC_COMPILER } // Notifications else if (header == COMMAND_NOTIFICATION) { ackCommand(outputStream, SYSTEM_DEVICE_HEADER, COMMAND_NOTIFICATION); printDeviceListNotification(getInfoOutputStreamLogger(), false); } else if (header == COMMAND_WAIT) { appendAck(outputStream); int mSec = readHex4(inputStream); delaymSec(mSec); append(outputStream, SYSTEM_DEVICE_HEADER); append(outputStream, COMMAND_WAIT); } else if (header == COMMAND_BOARD_NAME) { appendString(getInfoOutputStreamLogger(), getBoardName()); println(getInfoOutputStreamLogger()); ackCommand(outputStream, SYSTEM_DEVICE_HEADER, COMMAND_BOARD_NAME); } }
int devicecount() { return getDeviceCount(); }
std::vector<CameraConfig> videoInputCamera::getCameraConfigs(int dev_id) { std::vector<CameraConfig> cfg_list; int count = getDeviceCount(); if (count==0) return cfg_list; comInit(); HRESULT hr; ICaptureGraphBuilder2 *lpCaptureGraphBuilder; IGraphBuilder *lpGraphBuilder; IBaseFilter *lpInputFilter; IAMStreamConfig *lpStreamConfig; char nDeviceName[255]; WCHAR wDeviceName[255]; for (int cam_id=0;cam_id<count;cam_id++) { if ((dev_id>=0) && (dev_id!=cam_id)) continue; hr = CoCreateInstance(CLSID_CaptureGraphBuilder2, NULL, CLSCTX_INPROC_SERVER, IID_ICaptureGraphBuilder2, (void **)&lpCaptureGraphBuilder); if (FAILED(hr)) // FAILED is a macro that tests the return value { printf("ERROR - Could not create the Filter Graph Manager\n"); comUnInit(); return cfg_list; } // Create the Filter Graph Manager. hr = CoCreateInstance(CLSID_FilterGraph, 0, CLSCTX_INPROC_SERVER,IID_IGraphBuilder, (void**)&lpGraphBuilder); if (FAILED(hr)) { printf("ERROR - Could not add the graph builder!\n"); lpCaptureGraphBuilder->Release(); comUnInit(); return cfg_list; } hr = lpCaptureGraphBuilder->SetFiltergraph(lpGraphBuilder); if (FAILED(hr)) { printf("ERROR - Could not set filtergraph\n"); lpGraphBuilder->Release(); lpCaptureGraphBuilder->Release(); comUnInit(); return cfg_list; } memset(wDeviceName, 0, sizeof(WCHAR) * 255); memset(nDeviceName, 0, sizeof(char) * 255); hr = getDevice(&lpInputFilter, cam_id, wDeviceName, nDeviceName); if (SUCCEEDED(hr)){ hr = lpGraphBuilder->AddFilter(lpInputFilter, wDeviceName); }else{ printf("ERROR - Could not find specified video device\n"); lpGraphBuilder->Release(); lpCaptureGraphBuilder->Release(); comUnInit(); return cfg_list; } hr = lpCaptureGraphBuilder->FindInterface(&PIN_CATEGORY_CAPTURE, &MEDIATYPE_Video, lpInputFilter, IID_IAMStreamConfig, (void **)&lpStreamConfig); if(FAILED(hr)){ printf("ERROR: Couldn't config the stream!\n"); lpInputFilter->Release(); lpGraphBuilder->Release(); lpCaptureGraphBuilder->Release(); comUnInit(); return cfg_list; } CameraConfig cam_cfg; CameraTool::initCameraConfig(&cam_cfg); cam_cfg.driver = DRIVER_DEFAULT; cam_cfg.device = cam_id; sprintf(cam_cfg.name, "%s", nDeviceName); int iCount = 0; int iSize = 0; hr = lpStreamConfig->GetNumberOfCapabilities(&iCount, &iSize); std::vector<CameraConfig> fmt_list; if (iSize == sizeof(VIDEO_STREAM_CONFIG_CAPS)) { GUID lastFormat = MEDIASUBTYPE_None; for (int iFormat = 0; iFormat < iCount; iFormat+=2) { VIDEO_STREAM_CONFIG_CAPS scc; AM_MEDIA_TYPE *pmtConfig; hr = lpStreamConfig->GetStreamCaps(iFormat, &pmtConfig, (BYTE*)&scc); if (SUCCEEDED(hr)){ if ( pmtConfig->subtype != lastFormat) { if (fmt_list.size()>0) { std::sort(fmt_list.begin(), fmt_list.end()); cfg_list.insert( cfg_list.end(), fmt_list.begin(), fmt_list.end() ); fmt_list.clear(); } cam_cfg.cam_format = getMediaSubtype(pmtConfig->subtype); lastFormat = pmtConfig->subtype; } int stepX = scc.OutputGranularityX; int stepY = scc.OutputGranularityY; if(stepX < 1 || stepY < 1) continue; else if ((stepX==1) && (stepY==1)) { cam_cfg.cam_width = scc.InputSize.cx; cam_cfg.cam_height = scc.InputSize.cy; int maxFrameInterval = scc.MaxFrameInterval; if (maxFrameInterval==0) maxFrameInterval = 10000000; float last_fps=-1; VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)pmtConfig->pbFormat; for (int iv=scc.MinFrameInterval;iv<=maxFrameInterval;iv=iv*2) { pVih->AvgTimePerFrame = iv; hr = lpStreamConfig->SetFormat(pmtConfig); if (hr==S_OK) { hr = lpStreamConfig->GetFormat(&pmtConfig); float fps = ((int)floor(100000000.0f/(float)pVih->AvgTimePerFrame + 0.5f))/10.0f; if (fps!=last_fps) { cam_cfg.cam_fps = fps; fmt_list.push_back(cam_cfg); last_fps=fps; } } } } else { int x,y; for (x=scc.MinOutputSize.cx,y=scc.MinOutputSize.cy;x<=scc.MaxOutputSize.cx,y<=scc.MaxOutputSize.cy;x+=stepX,y+=stepY) { cam_cfg.cam_width = x; cam_cfg.cam_height = y; int maxFrameInterval = scc.MaxFrameInterval; if (maxFrameInterval==0) maxFrameInterval = 10000000; float last_fps=-1; VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)pmtConfig->pbFormat; for (int iv=scc.MinFrameInterval;iv<=maxFrameInterval;iv=iv*2) { pVih->AvgTimePerFrame = iv; hr = lpStreamConfig->SetFormat(pmtConfig); if (hr==S_OK) { hr = lpStreamConfig->GetFormat(&pmtConfig); float fps = ((int)floor(100000000.0f/(float)pVih->AvgTimePerFrame + 0.5f))/10.0f; if (fps!=last_fps) { cam_cfg.cam_fps = fps; fmt_list.push_back(cam_cfg); last_fps=fps; } } } } } deleteMediaType(pmtConfig); } } } if (fmt_list.size()>0) { std::sort(fmt_list.begin(), fmt_list.end()); cfg_list.insert( cfg_list.end(), fmt_list.begin(), fmt_list.end() ); fmt_list.clear(); } lpStreamConfig->Release(); lpInputFilter->Release(); lpGraphBuilder->Release(); lpCaptureGraphBuilder->Release(); } comUnInit(); return cfg_list; }
void MeshWSN::print(void) { printf("STATS:-----------------------------------------------------\n"); printf("Transmits: %d\n", mTXs); printf("Avg. RX per TX: %f\n", double(mRXs) / mTXs); printf("Corruption rate: %.2f%%\n", 100.0 * double(mCorrupted) / mRXs); printf("Cluster heads: %d\n", mCHs); printf("Cluster head rate: %f\n", double(mCHs) / getDeviceCount()); uint32_t loners = 0; std::vector<ClusterMeshDev*> clusterHeads; for (Device* pDev : mDevices) { ClusterMeshDev* pMDev = (ClusterMeshDev*)pDev; if (pMDev->mNeighbors.size() == 0) ++loners; if (pMDev->isCH()) clusterHeads.push_back(pMDev); } printf("Loners: %d\n", loners); struct ch { ClusterMeshDev* pCH; uint32_t conns; }; std::vector<struct ch> clusterHeadSubscribers(clusterHeads.size()); uint32_t symmetric = 0; for (connection_t& conn : mConnections) { if (conn.symmetric) symmetric++; ClusterMeshDev* chsInConn[] = { (ClusterMeshDev*)conn.pFirst, (ClusterMeshDev*)conn.pSecond }; for (uint8_t i = 0; i < 2; ++i) { if (chsInConn[i]->isCH()) { bool exists = false; for (struct ch& chs : clusterHeadSubscribers) { if (chs.pCH == chsInConn[i]) { chs.conns++; exists = true; break; } } if (!exists) clusterHeadSubscribers.push_back({ chsInConn[i], 1 }); } } } struct ch *pMaxCh=NULL; uint32_t totalCHSubs = 0; for (struct ch& chs : clusterHeadSubscribers) { if (pMaxCh == NULL || chs.conns > pMaxCh->conns) pMaxCh = &chs; totalCHSubs += chs.conns; } printf("Symmetric connection rate: %.2f%%\n", 100.0 * double(symmetric) / mConnections.size()); printf("Max CH subs: %d\n", pMaxCh->conns); printf("Useless CHs: %d\n", clusterHeadSubscribers.size() - clusterHeads.size()); printf("Avg CH subs: %.2f\n", double(totalCHSubs) / clusterHeads.size()); double totalRadioDutyCycle = 0.0; for (Device* pDev : mDevices) { totalRadioDutyCycle += pDev->getRadio()->getTotalDutyCycle(); } printf("Average radio duty cycle: %.2f%%\n", totalRadioDutyCycle / double(mDevices.size()) * 100.0); double totPowerUsage = 0.0; double maxPowerUsage = 0.0; double minPowerUsage = 100000000.0; double peukert = 1.15; std::string minDev, maxDev; for (auto it = mDevices.begin(); it != mDevices.end(); it++) { double usage = (*it)->getPowerUsageAvg(MESH_STABILIZATION_TIME, getEnvironment()->getTimestamp(), peukert); // don't count the search totPowerUsage += usage; if (usage > maxPowerUsage) { maxPowerUsage = usage; maxDev = (*it)->mName; } if (usage < minPowerUsage) { minPowerUsage = usage; minDev = (*it)->mName; } } totPowerUsage *= (getEnvironment()->getTimestamp() - MESH_STABILIZATION_TIME) / float(HOURS); // mA -> mAh maxPowerUsage *= (getEnvironment()->getTimestamp() - MESH_STABILIZATION_TIME) / float(HOURS); // mA -> mAh minPowerUsage *= (getEnvironment()->getTimestamp() - MESH_STABILIZATION_TIME) / float(HOURS); // mA -> mAh printf("Avg power usage: %.5fmAh\n", totPowerUsage / mDevices.size()); printf("Max power usage: %.5fmAh\n", maxPowerUsage); printf("Min power usage: %.5fmAh\n", minPowerUsage); timestamp_t firstDeath = BATTERY_DRAINAGE_TIME_h * (pow(BATTERY_CAPACITY_mAh, peukert) / pow(maxPowerUsage * BATTERY_DRAINAGE_TIME_h, peukert)); // in hours timestamp_t lastDeath = BATTERY_DRAINAGE_TIME_h * (pow(BATTERY_CAPACITY_mAh, peukert) / pow(minPowerUsage * BATTERY_DRAINAGE_TIME_h, peukert)); // in hours printf("First dead node (%s): %d years, %d days and %d hours\n", maxDev.c_str(),uint32_t(firstDeath / (24ULL * 365ULL)), uint32_t((firstDeath / 24ULL) % 365ULL), uint32_t(firstDeath % 24ULL)); printf("Last dead node (%s): %d years, %d days and %d hours\n", minDev.c_str(), uint32_t(lastDeath / (24ULL * 365ULL)), uint32_t((lastDeath / 24ULL) % 365ULL), uint32_t(lastDeath % 24ULL)); }
int DeviceGroupWidget::totalItemCount() { LOGNS(Gui, gui, debug, "device group widget total item count returns: " + Poco::NumberFormatter::format(getDeviceCount())); return getDeviceCount(); }