int main(int argc, char **argv) { if (argc != 2) { fprintf(stderr, "Usage: %s DRIVER\n" "Where DRIVER is one of:\n", argv[0]); const TCHAR *name; for (unsigned i = 0; (name = devRegisterGetName(i)) != NULL; ++i) _ftprintf(stderr, _T("\t%s\n"), name); return 1; } PathName driver_name(argv[1]); device.Driver = devGetDriver(driver_name); if (device.Driver == NULL) { fprintf(stderr, "No such driver: %s\n", argv[1]); return 1; } NullPort port(*(Port::Handler *)NULL); device.Com = &port; device.enable_baro = true; if (!device.Open()) { fprintf(stderr, "Failed to open driver: %s\n", argv[1]); return 1; } char buffer[1024]; while (fgets(buffer, sizeof(buffer), stdin) != NULL) device.LineReceived(buffer); Dump(device_blackboard.Basic()); }
void Set(const DeviceConfig &config, const DeviceDescriptor &device, const NMEAInfo &basic) { /* if a DeviceDescriptor is "unconfigured" but its DeviceConfig contains a valid configuration, then it got disabled by DeviceConfigOverlaps(), i.e. it's duplicate */ duplicate = !config.IsDisabled() && !device.IsConfigured(); switch (device.GetState()) { case PortState::READY: open = true; error = false; break; case PortState::FAILED: open = false; error = true; break; case PortState::LIMBO: open = false; error = false; break; } alive = basic.alive; location = basic.location_available; gps = basic.gps.fix_quality_available; baro = basic.baro_altitude_available || basic.pressure_altitude_available || basic.static_pressure_available; airspeed = basic.airspeed_available; vario = basic.total_energy_vario_available; traffic = basic.flarm.IsDetected(); debug = device.IsDumpEnabled(); }
static bool DeviceDeclare(DeviceDescriptor &dev, const Declaration &declaration, const Waypoint *home) { if (dev.IsOccupied()) return false; if (ShowMessageBox(_("Declare task?"), dev.GetDisplayName(), MB_YESNO | MB_ICONQUESTION) != IDYES) return false; if (!dev.Borrow()) return false; const TCHAR *caption = dev.GetDisplayName(); if (caption == NULL) caption = _("Declare task"); bool success = DoDeviceDeclare(dev, declaration, home); dev.Return(); if (!success) { ShowMessageBox(_("Error occured,\nTask NOT declared!"), caption, MB_OK | MB_ICONERROR); return false; } ShowMessageBox(_("Task declared!"), caption, MB_OK | MB_ICONINFORMATION); return true; }
void MPICommunicatorImpl::Initialize(const std::vector<NDArrayViewPtr>& values) { assert(CPUDEVICE < 0); // just in case somebody decides to change CPUDEVICE macro. DeviceDescriptor lastGpuDevice = DeviceDescriptor::CPUDevice(); m_gpuDataTransferers.resize(values.size()); m_intermediateCPUBuffers.resize(values.size()); for (auto i = 0; i < values.size(); ++i) { auto view = values[i]; auto device = view->Device(); // Make sure none of the values are sparse - we currently do not support aggregation of sparse matrices if (view->GetStorageFormat() != StorageFormat::Dense) RuntimeError("Aggregation for sparse matrices is currently not supported!"); // TODO: device.Type should be called Kind. if (device.Type() != DeviceKind::GPU) { m_intermediateCPUBuffers[i] = Buffer(); m_gpuDataTransferers[i] = nullptr; } else { if (lastGpuDevice.Type() == DeviceKind::CPU) lastGpuDevice = device; else if (device.Id() != lastGpuDevice.Id()) // For the time being, assume all devices have the same id. LogicError("Not all values are on the same GPU device id"); auto requiredSize = GetBufferSize(view); m_gpuDataTransferers[i] = std::make_shared<GPUDataTransferer>(device.Id(), true); if (m_intermediateCPUBuffers[i].totalSize < requiredSize) m_intermediateCPUBuffers[i] = AllocateIntermediateBuffer(device.Id(), requiredSize); } } }
static bool devInitOne(DeviceDescriptor &device, const DeviceConfig &config, DeviceDescriptor *&nmeaout) { if (config.port_type == DeviceConfig::INTERNAL) { #ifdef ANDROID if (is_simulator()) return true; device.internal_gps = InternalGPS::create(Java::GetEnv(), native_view, device.GetIndex()); return device.internal_gps != NULL; #else return false; #endif } const struct DeviceRegister *Driver = devGetDriver(config.driver_name); if (Driver == NULL) return false; Port *Com = OpenPort(config, device); if (Com == NULL) return false; if (!device.Open(Com, Driver)) { delete Com; return false; } if (nmeaout == NULL && (Driver->Flags & (1l << dfNmeaOut))) nmeaout = &device; return true; }
void TestCheckpointing(const DeviceDescriptor& device) { auto featureStreamName = L"features"; auto labelsStreamName = L"labels"; size_t inputDim = 784; size_t numOutputClasses = 10; auto features1 = InputVariable({ inputDim }, false /*isSparse*/, DataType::Float, featureStreamName); auto labels1 = InputVariable({ numOutputClasses }, DataType::Float, labelsStreamName); auto net1_1 = BuildFFClassifierNet(features1, numOutputClasses, device, 1); FunctionPtr net1_2; if (device.Type() == DeviceKind::GPU) { // TODO: instead of cloning here, reset curand generator to make sure that parameters are initialized to the same state. for (auto& p : net1_1->Parameters()) { // make sure all parameters are initialized assert(p.Value() != nullptr); } net1_2 = net1_1->Clone(); } else { net1_2 = BuildFFClassifierNet(features1, numOutputClasses, device, 1); } auto minibatchSource1 = TextFormatMinibatchSource(L"Train-28x28_cntk_text.txt", { { featureStreamName, inputDim }, { labelsStreamName, numOutputClasses } }, 1000, false); TestTrainingWithCheckpointing(net1_1, net1_2, labels1, minibatchSource1, device); inputDim = 2000; numOutputClasses = 5; auto features2 = InputVariable({ inputDim }, true /*isSparse*/, DataType::Float, featureStreamName); auto labels2 = InputVariable({ numOutputClasses }, DataType::Float, labelsStreamName, { Axis::DefaultBatchAxis() }); auto net2_1 = BuildLSTMClassifierNet(features2, numOutputClasses, device, 1); FunctionPtr net2_2; if (device.Type() == DeviceKind::GPU) { // TODO: instead of cloning here, reset curand generator to make sure that parameters are initialized to the same state. for (auto& p : net2_1->Parameters()) { // make sure all parameters are initialized assert(p.Value() != nullptr); } net2_2 = net2_1->Clone(); } else { net2_2 = BuildLSTMClassifierNet(features2, numOutputClasses, device, 1); } auto minibatchSource2 = TextFormatMinibatchSource(L"Train.ctf", { { featureStreamName, inputDim, true, L"x" }, { labelsStreamName, numOutputClasses, false, L"y" } }, 1000, false); TestTrainingWithCheckpointing(net2_1, net2_2, labels2, minibatchSource2, device); }
void devTick(const NMEA_INFO &basic, const DERIVED_INFO &calculated) { int i; for (i = 0; i < NUMDEV; i++) { DeviceDescriptor *d = &DeviceList[i]; d->OnSysTicker(basic, calculated); } }
void devTick(const DerivedInfo &calculated) { int i; for (i = 0; i < NUMDEV; i++) { DeviceDescriptor *d = &DeviceList[i]; d->OnSysTicker(calculated); } }
static void devInitOne(DeviceDescriptor &device, const DeviceConfig &config) { device.SetConfig(config); /* this OperationEnvironment instance must be persistent, because DeviceDescriptor::Open() is asynchronous */ static PopupOperationEnvironment env; device.ResetFailureCounter(); device.Open(env); }
bool UsbDmxPlugin::AddDeviceDescriptor(int fd) { vector<DeviceDescriptor*>::const_iterator iter = m_descriptors.begin(); for (; iter != m_descriptors.end(); ++iter) { if ((*iter)->ReadDescriptor() == fd) return true; } DeviceDescriptor *socket = new DeviceDescriptor(fd); socket->SetOnData(NewCallback(this, &UsbDmxPlugin::SocketReady)); m_plugin_adaptor->AddReadDescriptor(socket); m_descriptors.push_back(socket); return true; }
void Manager::devicePlugged( DeviceDescriptor * physicalDevice ) { bool matchedExisting = false; Index * existing = findIndexWithPhysicalDevice(physicalDevice); if( existing ) return; //look for matching deviceless index for( tIndices::iterator i = mIndices.begin(); i != mIndices.end() ; i++ ) { Index * index = * i; if( !index->getPhysicalDevice() ) { DeviceDescriptor * dd = index->recallDevice(); if( dd && dd->fuzzyCompareType( physicalDevice ) ) { index->setPhysicalDevice( physicalDevice ); index->forgetDevice(); matchedExisting = true; BOOST_LOG_TRIVIAL(trace) << "Reconnected physical device (" << physicalDevice->getVendorProductCombo() << ") to Player [" << index->getPlayer() << "] with existing index \"" << index->getName() << "\"" << std::endl; } } } if( !matchedExisting ) { //build look for matching index delcaration and create an index for( ast::hidCollapseList::iterator d = indexDefinitions.begin(); d != indexDefinitions.end() ; d++ ) { //build a comparable device descriptor ot compare with the ones //generated by the operating system specific code DeviceDescriptor declaredDevice = boost::apply_visitor( makeDescriptor() , d->device ); if( declaredDevice.fuzzyCompareType( physicalDevice ) > DeviceDescriptor::MATCH_THRESHOLD ) { //build an index for this device //element mapping occurrs at an OS-aware level //so let the implementaiton of createIndex and createElements handle that Index * newIndex = new Index( this, d->entries , d->index ); newIndex->setPhysicalDevice( physicalDevice ); mIndices.push_back( newIndex ); putInNextAvailablePlayerSlot( newIndex ); return; } } } }
void Manager::buildIndices() { /* BOOST_LOG_TRIVIAL(trace) << "Initially available physical devices: " << std::endl; if( mPhysicalDevices.size() == 0 ) BOOST_LOG_TRIVIAL(trace) << "\t No physical devices." << std::endl; for( tPhysicalDevices::iterator i= mPhysicalDevices.begin() ; i!= mPhysicalDevices.end() ; i++ ) { DeviceDescriptor * dd = *i; BOOST_LOG_TRIVIAL(trace) << "\t" << dd->getVendorProductCombo() << std::endl; } */ for( ast::hidCollapseList::iterator d = indexDefinitions.begin(); d != indexDefinitions.end() ; d++ ) { //build a comparable device descriptor ot compare with the ones //generated by the operating system specific code DeviceDescriptor declaredDevice = boost::apply_visitor( makeDescriptor() , d->device ); for( tPhysicalDevices::iterator i= mPhysicalDevices.begin() ; i!= mPhysicalDevices.end() ; i++ ) { DeviceDescriptor * physicalDevice = *i; Index * existing = findIndexWithPhysicalDevice( physicalDevice ); if( !existing ) { if( declaredDevice.fuzzyCompareType( physicalDevice ) > DeviceDescriptor::MATCH_THRESHOLD ) { //build an index for this device //element mapping occurrs at an OS-aware level //so let the implementaiton of createIndex and createElements handle that Index * newIndex = new Index( this, d->entries , d->index ); newIndex->setPhysicalDevice( physicalDevice ); mIndices.push_back( newIndex ); putInNextAvailablePlayerSlot(newIndex); } } } } }
tPtr<DeviceHandleImpl> DriverMIDI::connect(const DeviceDescriptor& device_) { M_LOG("[DriverMIDI] connecting to " << device_.name() << ":" << device_.vendorId() << ":" << device_.productId()); try { return tPtr<DeviceHandleImpl>(new DeviceHandleMIDI(device_)); } catch (RtMidiError& error) { std::string strError(error.getMessage()); M_LOG("[DriverMIDI] RtMidiError: " << strError); return nullptr; } }
int main(int argc, char **argv) { if (argc != 3) { fprintf(stderr, "Usage: %s DRIVER FILE.IGC\n" "Where DRIVER is one of:\n", argv[0]); const TCHAR *name; for (unsigned i = 0; (name = devRegisterGetName(i)) != NULL; ++i) _ftprintf(stderr, _T("\t%s\n"), name); return 1; } PathName driver_name(argv[1]); device.Driver = devGetDriver(driver_name); if (device.Driver == NULL) { fprintf(stderr, "No such driver: %s\n", argv[1]); return 1; } NullPort port(*(Port::Handler *)NULL); device.Com = &port; device.enable_baro = true; if (!device.Open()) { fprintf(stderr, "Failed to open driver: %s\n", argv[1]); return 1; } char buffer[1024]; for (unsigned i = 0; i < 10 && fgets(buffer, sizeof(buffer), stdin) != NULL; ++i) device.LineReceived(buffer); PathName igc_path(argv[2]); IGCWriter writer(igc_path, device_blackboard.Basic()); writer.header(device_blackboard.Basic().DateTime, _T("Manfred Mustermann"), _T("Ventus"), _T("D-1234"), _T("Foo"), driver_name); GPSClock log_clock(fixed(1)); while (fgets(buffer, sizeof(buffer), stdin) != NULL) { device.LineReceived(buffer); if (log_clock.check_advance(device_blackboard.Basic().Time)) writer.LogPoint(device_blackboard.Basic()); } }
void Set(const DeviceDescriptor &device, const NMEAInfo &basic) { switch (device.GetState()) { case PortState::READY: open = true; error = false; break; case PortState::FAILED: open = false; error = true; break; case PortState::LIMBO: open = false; error = false; break; } alive = basic.alive; location = basic.location_available; gps = basic.gps.fix_quality_available; baro = basic.baro_altitude_available || basic.pressure_altitude_available || basic.static_pressure_available; airspeed = basic.airspeed_available; vario = basic.total_energy_vario_available; traffic = basic.flarm.IsDetected(); }
/// <summary> /// Инициализирует канал связи с указанным устройством. /// До вызова этой функции никакие настроики порта или операции ввода-вывода проводиться не могут. /// </summary> /// <param name="d"> Описатель устройства. Экземпляр класса, позволяющего идентефицировать устройство, к которому надо подключиться. /// Как получить список доступных устройств, см: <see cref="DeviceDescriptor::GetDevicesList()">Класс перечисления совместимых устройств</see></param> /// <returns> Результат попытки открытия канала</returns> bool COM_CommCh::Init(DeviceDescriptor d) { this->setDeviceName(d.GetCOMname()); std::cout<<"Device COM name = "<<this->deviceName().toStdString()<<std::endl; if (this->AbstractSerial::open(this->ReadWrite | AbstractSerial::Unbuffered)) { // при небуферизованном режиме, необходимо выставить не нулевое время таймаутов !!! // ИНАЧЕ НИЧЕГО ЧИТАТЬСЯ НЕ БУДЕТ!!! const int CharIntervalTimeout__ = 10; const int TotalReadConstantTimeout__ = 10; this->setCharIntervalTimeout(CharIntervalTimeout__); this->setTotalReadConstantTimeout(TotalReadConstantTimeout__); std::cout<<"Device opened."<<std::endl; // устанавливаем состояние opened = true; return true; } else { std::cout<<"Failed to open Device"<<std::endl; // устанавливаем состояние opened = false; return false; } };
void ShowPortMonitor(SingleWindow &parent, const DialogLook &dialog_look, const TerminalLook &terminal_look, DeviceDescriptor &device) { /* create the dialog */ WindowStyle dialog_style; dialog_style.Hide(); dialog_style.ControlParent(); TCHAR buffer[64]; StaticString<128> caption; caption.Format(_T("%s: %s"), _("Port monitor"), device.GetConfig().GetPortName(buffer, ARRAY_SIZE(buffer))); WndForm dialog(dialog_look); dialog.Create(parent, caption, dialog_style); ContainerWindow &client_area = dialog.GetClientAreaWindow(); PortMonitorGlue glue(device, terminal_look); ButtonPanel buttons(client_area, dialog_look); buttons.Add(_("Close"), dialog, mrOK); glue.CreateButtons(buttons); glue.CreateTerminal(client_area, buttons.UpdateLayout()); /* run it */ dialog.ShowModal(); }
DeviceDescriptor* get_device_descriptor(char *name) { // find the descriptor of the device in the device-list by index const DeviceCollector::DeviceList& dl = DEVICE_COLLECTOR::instance().getDeviceList(); DeviceDescriptor* dd = 0; int index = 0; for(DeviceCollector::DeviceList::const_iterator i = dl.begin(); i != dl.end(); i++, index++) { dd = *i; if (!name || !stricmp(name, "default") ) return dd; if (strstr((char *) dd->getName().c_str(), name) != NULL) return dd; } GF_LOG(GF_LOG_ERROR, GF_LOG_MODULE, ("[VideoCapture] Cannot find capture driver %s\n", name)); return NULL; }
static void devInitOne(DeviceDescriptor &device) { /* this OperationEnvironment instance must be persistent, because DeviceDescriptor::Open() is asynchronous */ static PopupOperationEnvironment env; device.Open(env); }
void Set(const DeviceDescriptor &device, const NMEAInfo &basic) { open = device.IsOpen(); alive = basic.alive; location = basic.location_available; gps = basic.gps.fix_quality_available; baro = basic.baro_altitude_available || basic.pressure_altitude_available || basic.static_pressure_available; airspeed = basic.airspeed_available; vario = basic.total_energy_vario_available; traffic = basic.flarm.IsDetected(); }
static bool DeviceDeclare(DeviceDescriptor &dev, const Declaration &declaration, const Waypoint *home) { if (dev.IsOccupied()) return false; if (ShowMessageBox(_("Declare task?"), dev.GetDisplayName(), MB_YESNO | MB_ICONQUESTION) != IDYES) return false; if (!dev.Borrow()) return false; const TCHAR *caption = dev.GetDisplayName(); if (caption == NULL) caption = _("Declare task"); auto result = DoDeviceDeclare(dev, declaration, home); dev.Return(); switch (result) { case TriStateJobResult::SUCCESS: ShowMessageBox(_("Task declared!"), caption, MB_OK | MB_ICONINFORMATION); return true; case TriStateJobResult::ERROR: ShowMessageBox(_("Error occured,\nTask NOT declared!"), caption, MB_OK | MB_ICONERROR); return false; case TriStateJobResult::CANCELLED: return false; } gcc_unreachable(); }
/// <summary> /// Shows how to use Clone() to share function parameters among multi evaluation threads. /// </summary> /// <description> /// It first creates a new function with parameters, then spawns multi threads. Each thread uses Clone() to create a new /// instance of function and then use this instance to do evaluation. /// All cloned functions share the same parameters. /// </description> void MultiThreadsEvaluationWithClone(const DeviceDescriptor& device, const int threadCount) { using namespace std::placeholders; const size_t inputDim = 937; const size_t numOutputClasses = 9304; const size_t numHiddenLayers = 6; const size_t hiddenLayersDim = 2048; auto inputVar = InputVariable({inputDim}, DataType::Float, L"features"); assert(numHiddenLayers >= 1); auto classifierRoot = SetupFullyConnectedDNNLayer(inputVar, hiddenLayersDim, device, std::bind(Sigmoid, _1, L"")); for (size_t i = 1; i < numHiddenLayers; ++i) { classifierRoot = SetupFullyConnectedDNNLayer(classifierRoot, hiddenLayersDim, device, std::bind(Sigmoid, _1, L"")); } auto outputTimesParam = Parameter(NDArrayView::RandomUniform<float>({numOutputClasses, hiddenLayersDim}, -0.5, 0.5, 1, device)); auto classifierFunc = Times(outputTimesParam, classifierRoot, L"classifierOutput"); // Now test the structure if (classifierFunc->Parameters().size() != ((numHiddenLayers * 2) + 1)) { throw std::runtime_error("MultiThreadsEvaluationWithClone: Function does not have expected Parameter count"); } OutputFunctionInfo(classifierFunc); fprintf(stderr, "MultiThreadsEvaluationWithClone on device=%d\n", device.Id()); // Run evaluation in parallel std::vector<std::thread> threadList(threadCount); for (int th = 0; th < threadCount; ++th) { threadList[th] = std::thread(RunEvaluationClassifier, classifierFunc->Clone(), device); } for (int th = 0; th < threadCount; ++th) { threadList[th].join(); fprintf(stderr, "thread %d joined.\n", th); fflush(stderr); } }
void ShowPortMonitor(DeviceDescriptor &device) { const Look &look = UIGlobals::GetLook(); TCHAR buffer[64]; StaticString<128> caption; caption.Format(_T("%s: %s"), _("Port monitor"), device.GetConfig().GetPortName(buffer, ARRAY_SIZE(buffer))); PortMonitorWidget widget(device, look.terminal); WidgetDialog dialog(look.dialog); dialog.CreateFull(UIGlobals::GetMainWindow(), caption, &widget); dialog.AddButton(_("Close"), mrOK); widget.CreateButtons(dialog); dialog.ShowModal(); dialog.StealWidget(); }
void TestFunctionsForEquality(const DeviceDescriptor& device) { // TODO: add GPU version (need to reset cuda random generator each time a new function is created). assert(device.Type() == DeviceKind::CPU); auto inputVar = InputVariable({ 2 }, false, DataType::Float, L"features"); auto f1 = BuildFFClassifierNet(inputVar, 3, device, /*seed*/ 1); auto f2 = BuildFFClassifierNet(inputVar, 3, device, /*seed*/ 1); if (!AreEqual(f1, f2)) { throw std::runtime_error("TestFunctionsForEquality: two functions built with the same seed values are not identical."); } auto f3 = BuildFFClassifierNet(inputVar, 3, device, /*seed*/ 2); auto f4 = BuildFFClassifierNet(inputVar, 3, device, /*seed*/ 3); if (AreEqual(f3, f4)) { throw std::runtime_error("TestFunctionsForEquality: two functions built with different seed values are identical."); } }
/// <summary> /// Shows how to use LoadLegacyModel() and Clone() to share function parameters among multi evaluation threads. /// </summary> /// <description> /// It first loads a model, then spawns multi threads. Each thread uses Clone() to create a new /// instance of function and then use this instance to do evaluation. /// All cloned functions share the same parameters. /// Note: It uses the model trained by Examples\Image\GettingStarted\01_OneHidden.cntk as example. Instructions /// to train the model is described in Examples\Image\GettingStarted\README.md. /// The pre-trained model file 01_OneHidden needs to be in the current directory. /// </description> void MultiThreadsEvaluationWithLoadModel(const DeviceDescriptor& device, const int threadCount) { // The model file will be trained and copied to the current runtime directory first. auto modelFuncPtr = CNTK::Function::LoadModel(DataType::Float, L"01_OneHidden", device); OutputFunctionInfo(modelFuncPtr); fprintf(stderr, "MultiThreadsEvaluationWithLoadModel on device=%d\n", device.Id()); // Run evaluation in parallel. std::vector<std::thread> threadList(threadCount); for (int th = 0; th < threadCount; ++th) { threadList[th] = std::thread(RunEvaluationOneHidden, modelFuncPtr->Clone(), device); } for (int th = 0; th < threadCount; ++th) { threadList[th].join(); fprintf(stderr, "thread %d joined.\n", th); fflush(stderr); } }
void TestNDArrayView(size_t numAxes, const DeviceDescriptor& device) { srand(1); size_t maxDimSize = 15; NDShape viewShape(numAxes); for (size_t i = 0; i < numAxes; ++i) viewShape[i] = (rand() % maxDimSize) + 1; // Create a NDArrayView over a std::array std::array<ElementType, 1> arrayData = { 3 }; auto arrayDataView = MakeSharedObject<NDArrayView>(NDShape({}), arrayData); if (arrayDataView->template DataBuffer<ElementType>() != arrayData.data()) throw std::runtime_error("The DataBuffer of the NDArrayView does not match the original buffer it was created over"); std::vector<ElementType> data(viewShape.TotalSize()); ElementType scale = 19.0; ElementType offset = -4.0; for (size_t i = 0; i < viewShape.TotalSize(); ++i) data[i] = offset + ((((ElementType)rand()) / RAND_MAX) * scale); auto cpuDataView = MakeSharedObject<NDArrayView>(viewShape, data); if (cpuDataView->template DataBuffer<ElementType>() != data.data()) throw std::runtime_error("The DataBuffer of the NDArrayView does not match the original buffer it was created over"); NDArrayViewPtr dataView; if ((device.Type() == DeviceKind::CPU)) dataView = cpuDataView; else { dataView = MakeSharedObject<NDArrayView>(AsDataType<ElementType>(), viewShape, device); dataView->CopyFrom(*cpuDataView); } if (dataView->Device() != device) throw std::runtime_error("Device of NDArrayView does not match 'device' it was created on"); // Test clone auto clonedView = dataView->DeepClone(false); ElementType* first = nullptr; const ElementType* second = cpuDataView->template DataBuffer<ElementType>(); NDArrayViewPtr temp1CpuDataView, temp2CpuDataView; if ((device.Type() == DeviceKind::CPU)) { if (dataView->DataBuffer<ElementType>() != data.data()) throw std::runtime_error("The DataBuffer of the NDArrayView does not match the original buffer it was created over"); first = clonedView->WritableDataBuffer<ElementType>(); } else { temp1CpuDataView = MakeSharedObject<NDArrayView>(AsDataType<ElementType>(), viewShape, DeviceDescriptor::CPUDevice()); temp1CpuDataView->CopyFrom(*clonedView); first = temp1CpuDataView->WritableDataBuffer<ElementType>(); } for (size_t i = 0; i < viewShape.TotalSize(); ++i) { if (first[i] != second[i]) throw std::runtime_error("The contents of the clone do not match expected"); } first[0] += 1; if ((device.Type() != DeviceKind::CPU)) clonedView->CopyFrom(*temp1CpuDataView); if ((device.Type() == DeviceKind::CPU)) { first = clonedView->WritableDataBuffer<ElementType>(); second = dataView->DataBuffer<ElementType>(); } else { temp1CpuDataView = MakeSharedObject<NDArrayView>(AsDataType<ElementType>(), viewShape, DeviceDescriptor::CPUDevice()); temp1CpuDataView->CopyFrom(*clonedView); first = temp1CpuDataView->WritableDataBuffer<ElementType>(); temp2CpuDataView = MakeSharedObject<NDArrayView>(AsDataType<ElementType>(), viewShape, DeviceDescriptor::CPUDevice()); temp2CpuDataView->CopyFrom(*dataView); second = temp2CpuDataView->DataBuffer<ElementType>(); } if (first[0] != (second[0] + 1)) throw std::runtime_error("The clonedView's contents do not match expected"); // Test alias auto aliasView = clonedView->Alias(true); const ElementType* aliasViewBuffer = aliasView->DataBuffer<ElementType>(); const ElementType* clonedDataBuffer = clonedView->DataBuffer<ElementType>(); if (aliasViewBuffer != clonedDataBuffer) throw std::runtime_error("The buffers underlying the alias view and the view it is an alias of are different!"); clonedView->CopyFrom(*dataView); if (aliasViewBuffer != clonedDataBuffer) throw std::runtime_error("The buffers underlying the alias view and the view it is an alias of are different!"); // Test readonliness auto errorMsg = "Was incorrectly able to get a writable buffer pointer from a readonly view"; // Should not be able to get the WritableDataBuffer for a read-only view VerifyException([&aliasView]() { ElementType* aliasViewBuffer = aliasView->WritableDataBuffer<ElementType>(); aliasViewBuffer; }, errorMsg); // Should not be able to copy into a read-only view VerifyException([&aliasView, &dataView]() { aliasView->CopyFrom(*dataView); }, errorMsg); }
bool devHasBaroSource() { return device.IsBaroSource(); }
void ExternalLogger::DownloadFlightFrom(DeviceDescriptor &device) { MessageOperationEnvironment env; // Download the list of flights that the logger contains RecordedFlightList flight_list; switch (DoReadFlightList(device, flight_list)) { case TriStateJobResult::SUCCESS: break; case TriStateJobResult::ERROR: device.EnableNMEA(env); ShowMessageBox(_("Failed to download flight list."), _("Download flight"), MB_OK | MB_ICONERROR); return; case TriStateJobResult::CANCELLED: return; } // The logger seems to be empty -> cancel if (flight_list.empty()) { device.EnableNMEA(env); ShowMessageBox(_("Logger is empty."), _("Download flight"), MB_OK | MB_ICONINFORMATION); return; } while (true) { // Show list of the flights const RecordedFlightInfo *flight = ShowFlightList(flight_list); if (!flight) break; // Download chosen IGC file into temporary file TCHAR path[MAX_PATH]; LocalPath(path, _T("logs"), _T("temp.igc")); switch (DoDownloadFlight(device, *flight, path)) { case TriStateJobResult::SUCCESS: break; case TriStateJobResult::ERROR: // Delete temporary file File::Delete(path); ShowMessageBox(_("Failed to download flight."), _("Download flight"), MB_OK | MB_ICONERROR); continue; case TriStateJobResult::CANCELLED: // Delete temporary file File::Delete(path); continue; } /* read the IGC header and build the final IGC file name with it */ IGCHeader header; BrokenDate date; ReadIGCMetaData(path, header, date); if (header.flight == 0) header.flight = GetFlightNumber(flight_list, *flight); TCHAR name[64]; FormatIGCFilenameLong(name, date, header.manufacturer, header.id, header.flight); TCHAR final_path[MAX_PATH]; LocalPath(final_path, _T("logs"), name); // Remove a file with the same name if it exists if (File::Exists(final_path)) File::Delete(final_path); // Rename the temporary file to the actual filename File::Rename(path, final_path); if (ShowMessageBox(_("Do you want to download another flight?"), _("Download flight"), MB_YESNO | MB_ICONQUESTION) != IDYES) break; } device.EnableNMEA(env); }
void TestTimesAndPlus(size_t inputDim, size_t outputDim, size_t numSamples, const DeviceDescriptor& device, size_t numIterations, bool usePreAllocatedOutputs, bool outputOnSpecifiedDevice, bool testSaveAndReLoad, unsigned int seed = 1) { Parameter timesParam(MakeSharedObject<NDArrayView>((ElementType)0.5, NDShape({ outputDim, inputDim }), device), L"timesParameters"); Parameter plusParam(MakeSharedObject<NDArrayView>((ElementType)1.2, std::initializer_list<size_t>({ outputDim }), device), L"plusParameters"); Variable inputVar({ inputDim }, AsDataType<ElementType>(), L"input"); auto timesAndPlusFunc = Plus(plusParam, Times(timesParam, inputVar)); if (testSaveAndReLoad) SaveAndReloadModel<ElementType>(timesAndPlusFunc, { &inputVar, ×Param, &plusParam }, device); srand(seed); for (size_t iterIdx = 0; iterIdx < numIterations; ++iterIdx) { std::vector<ElementType> inputData(inputDim * numSamples); for (size_t i = 0; i < inputData.size(); ++i) inputData[i] = ((ElementType)rand()) / RAND_MAX; NDShape inputShape = inputVar.Shape().AppendShape({ 1, numSamples }); ValuePtr inputValue = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(inputShape, inputData.data(), inputData.size(), DeviceDescriptor::CPUDevice(), true)); NDShape outputShape = timesAndPlusFunc->Output().Shape().AppendShape({ 1, numSamples }); std::vector<ElementType> outputData(outputShape.TotalSize()); ValuePtr outputValue; if (usePreAllocatedOutputs) { auto outputAllocationDevice = outputOnSpecifiedDevice ? device : DeviceDescriptor::CPUDevice(); if (outputAllocationDevice.Type() == DeviceKind::CPU) outputValue = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(outputShape, outputData.data(), outputData.size(), outputAllocationDevice, false)); else outputValue = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(AsDataType<ElementType>(), outputShape, outputAllocationDevice)); } std::unordered_map<Variable, ValuePtr> outputs = { { timesAndPlusFunc->Output(), outputValue } }; auto backpropState = timesAndPlusFunc->Forward({ { inputVar, inputValue } }, outputs, device, { timesAndPlusFunc->Output() }); if (!usePreAllocatedOutputs) outputValue = outputs[timesAndPlusFunc->Output()]; // Perform backprop std::vector<ElementType> rootGradientsData(outputShape.TotalSize(), 1); ValuePtr rootGradientValue; if (device.Type() == DeviceKind::CPU) rootGradientValue = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(outputShape, rootGradientsData.data(), rootGradientsData.size(), device, true)); else { NDArrayViewPtr cpuArrayView = MakeSharedObject<NDArrayView>(outputShape, rootGradientsData.data(), rootGradientsData.size(), DeviceDescriptor::CPUDevice(), true); NDArrayViewPtr gpuArrayView = MakeSharedObject<NDArrayView>(AsDataType<ElementType>(), outputShape, device); gpuArrayView->CopyFrom(*cpuArrayView); rootGradientValue = MakeSharedObject<Value>(gpuArrayView); } std::vector<ElementType> plusParameterGradientData(plusParam.Shape().TotalSize()); std::vector<ElementType> timesParameterGradientData(timesParam.Shape().TotalSize()); ValuePtr plusParameterGradientValue, timesParameterGradientValue; if (usePreAllocatedOutputs) { auto outputAllocationDevice = outputOnSpecifiedDevice ? device : DeviceDescriptor::CPUDevice(); if (outputAllocationDevice.Type() == DeviceKind::CPU) { plusParameterGradientValue = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(plusParam.Shape(), plusParameterGradientData.data(), plusParameterGradientData.size(), outputAllocationDevice, false)); timesParameterGradientValue = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(timesParam.Shape(), timesParameterGradientData.data(), timesParameterGradientData.size(), outputAllocationDevice, false)); } else { plusParameterGradientValue = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(AsDataType<ElementType>(), plusParam.Shape(), outputAllocationDevice)); timesParameterGradientValue = MakeSharedObject<Value>(MakeSharedObject<NDArrayView>(AsDataType<ElementType>(), timesParam.Shape(), outputAllocationDevice)); } } std::unordered_map<Variable, ValuePtr> paramGradients = { { plusParam, plusParameterGradientValue }, { timesParam, timesParameterGradientValue } }; timesAndPlusFunc->Backward(backpropState, { { timesAndPlusFunc->Output(), rootGradientValue } }, paramGradients); if (!usePreAllocatedOutputs) { plusParameterGradientValue = paramGradients[plusParam]; timesParameterGradientValue = paramGradients[timesParam]; } // Verify forward prop results if (!usePreAllocatedOutputs || (outputOnSpecifiedDevice && (device.Type() != DeviceKind::CPU))) { NDArrayViewPtr cpuArrayView = MakeSharedObject<NDArrayView>(outputShape, outputData.data(), outputData.size(), DeviceDescriptor::CPUDevice(), false); cpuArrayView->CopyFrom(*outputValue->Data()); } std::vector<ElementType> expectedOutputValues(outputShape.TotalSize()); for (size_t i = 0; i < numSamples; ++i) { ElementType expectedVal = (ElementType)1.2; for (size_t j = 0; j < inputDim; ++j) expectedVal += (ElementType)(inputData[i * inputDim + j] * 0.5); for (size_t j = 0; j < outputDim; ++j) expectedOutputValues[i * outputDim + j] = expectedVal; } FloatingPointVectorCompare(outputData, expectedOutputValues, "TestTimesAndPlus: Forward prop results do not match expected results"); // Verify backward prop results if (device.Type() != DeviceKind::CPU) { NDArrayViewPtr cpuArrayView = MakeSharedObject<NDArrayView>(AsDataType<ElementType>(), plusParam.Shape(), DeviceDescriptor::CPUDevice()); cpuArrayView->CopyFrom(*plusParameterGradientValue->Data()); const ElementType* cpuArrayViewBuffer = cpuArrayView->DataBuffer<ElementType>(); memcpy(plusParameterGradientData.data(), cpuArrayViewBuffer, plusParam.Shape().TotalSize() * sizeof(ElementType)); cpuArrayView = MakeSharedObject<NDArrayView>(AsDataType<ElementType>(), timesParam.Shape(), DeviceDescriptor::CPUDevice()); cpuArrayView->CopyFrom(*timesParameterGradientValue->Data()); cpuArrayViewBuffer = cpuArrayView->DataBuffer<ElementType>(); memcpy(timesParameterGradientData.data(), cpuArrayViewBuffer, timesParam.Shape().TotalSize() * sizeof(ElementType)); } for (size_t i = 0; i < outputDim; ++i) if (plusParameterGradientData[i] != numSamples) throw std::runtime_error("TestTimesAndPlus: Backprop prop results do not match expected results for Plus params gradients"); std::vector<ElementType> expectedTimesParamsGradientValues(timesParam.Shape().TotalSize()); for (size_t i = 0; i < inputDim; ++i) { ElementType expectedVal = 0; for (size_t j = 0; j < numSamples; ++j) expectedVal += inputData[j * inputDim + i]; for (size_t j = 0; j < outputDim; ++j) expectedTimesParamsGradientValues[i * outputDim + j] = expectedVal; } FloatingPointVectorCompare(timesParameterGradientData, expectedTimesParamsGradientValues, "TestTimesAndPlus: Backprop prop results do not match expected results for Times params gradients"); } }
bool HaveCondorDevice() { return device.IsCondor(); }