ComponentResult AUInstrumentBase::Render( AudioUnitRenderActionFlags & ioActionFlags, const AudioTimeStamp & inTimeStamp, UInt32 inNumberFrames) { PerformEvents(inTimeStamp); UInt32 numOutputs = Outputs().GetNumberOfElements(); for (UInt32 j = 0; j < numOutputs; ++j) { AudioBufferList& bufferList = GetOutput(j)->GetBufferList(); for (UInt32 k = 0; k < bufferList.mNumberBuffers; ++k) { memset(bufferList.mBuffers[k].mData, 0, bufferList.mBuffers[k].mDataByteSize); } } UInt32 numGroups = Groups().GetNumberOfElements(); for (UInt32 j = 0; j < numGroups; ++j) { SynthGroupElement *group = (SynthGroupElement*)Groups().GetElement(j); OSStatus err = group->Render(inNumberFrames); if (err) return err; } mAbsoluteSampleFrame += inNumberFrames; return noErr; }
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // karoke::karoke //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ karoke::karoke(AudioUnit component) : AUEffectBase(component, false) { CreateElements(); CAStreamBasicDescription streamDescIn; streamDescIn.SetCanonical(NUM_INPUTS, false); // number of input channels streamDescIn.mSampleRate = GetSampleRate(); CAStreamBasicDescription streamDescOut; streamDescOut.SetCanonical(NUM_OUTPUTS, false); // number of output channels streamDescOut.mSampleRate = GetSampleRate(); Inputs().GetIOElement(0)->SetStreamFormat(streamDescIn); Outputs().GetIOElement(0)->SetStreamFormat(streamDescOut); Globals()->UseIndexedParameters(kNumberOfParameters); SetParameter(kParam_One, kDefaultValue_ParamOne ); #if AU_DEBUG_DISPATCHER mDebugDispatcher = new AUDebugDispatcher (this); #endif mLeftFilter = new FirFilter(200); mLeftFilter->setCoeffecients(lp_200, 200); mRightFilter = new FirFilter(200); mRightFilter->setCoeffecients(lp_200, 200); }
OSStatus AUInstrumentBase::Render( AudioUnitRenderActionFlags & ioActionFlags, const AudioTimeStamp & inTimeStamp, UInt32 inNumberFrames) { PerformEvents(inTimeStamp); AUScope &outputs = Outputs(); UInt32 numOutputs = outputs.GetNumberOfElements(); for (UInt32 j = 0; j < numOutputs; ++j) { GetOutput(j)->PrepareBuffer(inNumberFrames); // AUBase::DoRenderBus() only does this for the first output element AudioBufferList& bufferList = GetOutput(j)->GetBufferList(); for (UInt32 k = 0; k < bufferList.mNumberBuffers; ++k) { memset(bufferList.mBuffers[k].mData, 0, bufferList.mBuffers[k].mDataByteSize); } } UInt32 numGroups = Groups().GetNumberOfElements(); for (UInt32 j = 0; j < numGroups; ++j) { SynthGroupElement *group = (SynthGroupElement*)Groups().GetElement(j); OSStatus err = group->Render((SInt64)inTimeStamp.mSampleTime, inNumberFrames, outputs); if (err) return err; } mAbsoluteSampleFrame += inNumberFrames; return noErr; }
double MultiLayerNN::TestAll() { double* e = new double[mOutputsNumber]; double* aux = new double[mOutputsNumber];//to calculate output error with learning data int errors = 0; std::vector<double> Outputs(mOutputsNumber); //Test each patron for(int sample=0; sample < mLearnBook.size(); ++sample) { LearnInfo& info = mLearnBook[sample]; std::copy(info.mInputs.begin(), info.mInputs.end(), mInputs); Outputs = Process(); std::fill(aux,aux+mOutputsNumber, -1.0); aux[(int)info.mOutput] = 1.0; for(int j=0; j < mOutputsNumber; ++j) { e[j] = mErrorFunction(mSignFunction(Outputs[j]), aux[j]); if( e[j] != 0 ) e[j] = e[j]/e[j]; } if(std::find(e,e+mOutputsNumber,1) != e+mOutputsNumber) ++errors; } delete[] aux; return ( errors/(double)mLearnBook.size() ); }
std::vector<double> MultiLayerNN::Process() { std::vector<double> Outputs(this->mOutputsNumber, 0.0); int maxInputs = max(mInputCount, mNeuronPerLayer); double* CurrentInputs = new double[maxInputs]; double* NextInputs = new double[maxInputs]; std::copy(mInputs, mInputs+mInputCount, CurrentInputs); std::fill(CurrentInputs+mInputCount, CurrentInputs+maxInputs, 0.0); // Hiden Layers for(int i=0; i < (mLayerCount-1); ++i) { for(int j=0; j < mNeuronPerLayer ; ++j) NextInputs[j] = mNeuronMatrix[i][j].Process(CurrentInputs, mPartitionFunction); std::swap(CurrentInputs,NextInputs); } // Front Layer: Apply Sigmoid for(int j=0; j < mOutputsNumber; ++j) Outputs[j] = mNeuronMatrix[mLayerCount-1][j].Process(CurrentInputs, mPartitionFunction); delete[] NextInputs; delete[] CurrentInputs; return Outputs; }
void TF_2Port::Evaluate (Path* path) { Transfer(); for (int i = 0; i < Outputs(); ++i) { if (ChangedOutput(i)) { StateVar* output = GetOutput(i); Connector* conn = GetBinding(output); conn->Transmit(path); } } }
void ForceFeedbackDevice::ThreadFunc() { Common::SetCurrentThreadName("ForceFeedback update thread"); while (m_run_thread.IsSet()) { m_update_event.Wait(); for (auto output : Outputs()) { auto& force = *static_cast<Force*>(output); force.UpdateOutput(); } } for (auto output : Outputs()) { auto& force = *static_cast<Force*>(output); force.Release(); } }
void IOWindow::UpdateOptionList() { m_option_list->clear(); const auto device = g_controller_interface.FindDevice(m_devq); if (device == nullptr) return; if (m_reference->IsInput()) { for (const auto* input : device->Inputs()) { m_option_list->addItem(QString::fromStdString(input->GetName())); } } else { for (const auto* output : device->Outputs()) { m_option_list->addItem(QString::fromStdString(output->GetName())); } } }
bool ForceFeedbackDevice::InitForceFeedback(const LPDIRECTINPUTDEVICE8 device, int axis_count) { if (axis_count == 0) return false; // We just use the X axis (for wheel left/right). // Gamepads seem to not care which axis you use. // These are temporary for creating the effect: std::array<DWORD, 1> rgdwAxes = {DIJOFS_X}; std::array<LONG, 1> rglDirection = {-200}; DIEFFECT eff{}; eff.dwSize = sizeof(eff); eff.dwFlags = DIEFF_CARTESIAN | DIEFF_OBJECTOFFSETS; eff.dwDuration = DI_SECONDS / 1000 * RUMBLE_LENGTH_MS; eff.dwSamplePeriod = 0; eff.dwGain = DI_FFNOMINALMAX; eff.dwTriggerButton = DIEB_NOTRIGGER; eff.dwTriggerRepeatInterval = 0; eff.cAxes = DWORD(rgdwAxes.size()); eff.rgdwAxes = rgdwAxes.data(); eff.rglDirection = rglDirection.data(); eff.dwStartDelay = 0; // Initialize parameters with zero force (their current state). DICONSTANTFORCE diCF{}; diCF.lMagnitude = 0; DIRAMPFORCE diRF{}; diRF.lStart = diRF.lEnd = 0; DIPERIODIC diPE{}; diPE.dwMagnitude = 0; diPE.lOffset = 0; diPE.dwPhase = 0; diPE.dwPeriod = DI_SECONDS / 1000 * RUMBLE_PERIOD_MS; for (auto& f : force_type_names) { if (f.guid == GUID_ConstantForce) { eff.cbTypeSpecificParams = sizeof(DICONSTANTFORCE); eff.lpvTypeSpecificParams = &diCF; } else if (f.guid == GUID_RampForce) { eff.cbTypeSpecificParams = sizeof(DIRAMPFORCE); eff.lpvTypeSpecificParams = &diRF; } else { // All other forces need periodic parameters: eff.cbTypeSpecificParams = sizeof(DIPERIODIC); eff.lpvTypeSpecificParams = &diPE; } LPDIRECTINPUTEFFECT pEffect; if (SUCCEEDED(device->CreateEffect(f.guid, &eff, &pEffect, nullptr))) { if (f.guid == GUID_ConstantForce) AddOutput(new ForceConstant(this, f.name, pEffect, diCF)); else if (f.guid == GUID_RampForce) AddOutput(new ForceRamp(this, f.name, pEffect, diRF)); else AddOutput(new ForcePeriodic(this, f.name, pEffect, diPE)); } } // Disable autocentering: if (Outputs().size()) { DIPROPDWORD dipdw; dipdw.diph.dwSize = sizeof(DIPROPDWORD); dipdw.diph.dwHeaderSize = sizeof(DIPROPHEADER); dipdw.diph.dwObj = 0; dipdw.diph.dwHow = DIPH_DEVICE; dipdw.dwData = DIPROPAUTOCENTER_OFF; device->SetProperty(DIPROP_AUTOCENTER, &dipdw.diph); m_run_thread.Set(); m_update_thread = std::thread(&ForceFeedbackDevice::ThreadFunc, this); } return true; }
double MultiLayerNN::LearnCrossValidation(int maxIteration, int k, double& ErrorAverage, double& ErrorStandarDesviation, bool doFeedBack, std::ostream& feedbackStream) throw(std::exception) { if( k >= mLearnBook.size() ) throw (new std::exception("No se puede hacer leave-k-out con k mas grande o igual a la particion universal")); std::stringstream firstNetwork;//Save the first Network Serialize(firstNetwork); //Create necesary variables double** Y = NULL; double** ro = NULL; double* e = NULL; std::vector< std::vector< std::vector<double> > > deltaWeights(mLayerCount); std::vector< std::vector< std::vector<double> > > deltaWeightsPrevious(mLayerCount); _LearnInicialization(Y,ro,e,deltaWeights,deltaWeightsPrevious);//reserve initialize variables int PartitionsNumber = ceil(mLearnBook.size()/(double)k); double* aux = new double[mOutputsNumber];//to calculate output error with learning data int* errors = new int[PartitionsNumber]; std::fill(errors,errors+PartitionsNumber, 0); double errorsThreshold = 0; Partition validationPartition(mLearnBook); validationPartition.InitLeaveKOut(k); for(int curPart=0;;++curPart)//until no more leave-k-out partitions { std::vector<int> shuffle(validationPartition.mLearnPartition.size());//creates shuffe here could be optimized validationPartition.GetShuffle(shuffle); std::vector<LearnInfo*>& LearnData = validationPartition.mLearnPartition; Load(firstNetwork);//each partition start again if( doFeedBack ) feedbackStream<<"Particion Numero: "<<curPart<<std::endl; for(int k=0; k < maxIteration; ++k)//iterates the learning of the patrons { //Learn each patron for(int sample=0; sample < LearnData.size(); ++sample) { LearnInfo& info = *LearnData[shuffle[sample]]; std::copy(info.mInputs.begin(), info.mInputs.end(), mInputs); _ProcessAll(Y); std::fill(aux,aux+mOutputsNumber, -1.0); aux[(int)info.mOutput] = 1.0; for(int j=0; j < mOutputsNumber; ++j) e[j] = mErrorFunction(Y[mLayerCount-1][j], aux[j]); _Retropropagate(Y,ro,e,deltaWeights,deltaWeightsPrevious); } } std::vector<double> Outputs(mOutputsNumber); std::vector<LearnInfo*>& TestData = validationPartition.mTestPartition; //Test each patron for(int sample=0; sample < TestData.size(); ++sample) { LearnInfo& info = *TestData[sample]; std::copy(info.mInputs.begin(), info.mInputs.end(), mInputs); Outputs = Process(); std::fill(aux,aux+mOutputsNumber, -1.0); aux[(int)info.mOutput] = 1.0; for(int j=0; j < mOutputsNumber; ++j) { e[j] = mErrorFunction(mSignFunction(Outputs[j]), aux[j]); if( e[j] != 0 ) e[j] = e[j]/e[j]; } //Errors to check with tolerance double newErrors = 0.0; for(int j=0; j < mOutputsNumber; ++j) aux[j] = abs(e[j]); Math::Sum(aux,mOutputsNumber,newErrors); errorsThreshold += newErrors; if(std::find(e,e+mOutputsNumber,1) != e+mOutputsNumber) ++errors[curPart]; } //Next Partitions if( !validationPartition.NextLeaveKOut() ) break; } //errors / (double)PartitionsNumber; //errorsThreshold / (double)PartitionsNumber; ErrorAverage = 0.0; Math::Sum(errors, PartitionsNumber,ErrorAverage); ErrorAverage /= (double)PartitionsNumber; ErrorStandarDesviation = 0; for(int i=0; i < PartitionsNumber; ++i) ErrorStandarDesviation += errors[i]*errors[i]; ErrorStandarDesviation = sqrt(ErrorStandarDesviation)/(double)PartitionsNumber; _LearnFinish(Y,ro,e,deltaWeights,deltaWeightsPrevious); Load(firstNetwork);//Keep the first Network firstNetwork.clear(); delete[] aux; delete[] errors; return 1.0; //return ( errors/(double)mLearnBook.size() ); }