QStringList AudioCdRecord::arguments() const { QStringList args; /*! --------------------- Normal Arguments --------------------!*/ args << "-v"; if( publicRead() ) args << "-r"; if( dummy() ) args << "-dummy"; if( force() ) args << "-force"; if( sessionAtOnce() ) args << "-sao"; if( trackAtOnce() ) args << "-tao"; if( swab() ) args << "-swab"; if( multiSession() ) args << "-multi"; if( pad() ) args << "-pad"; if( pad() && padSize() > 0 ) args << QString::number( padSize() ); if( eject() ) args << "-eject"; if( ringBuffer() > 0 ) args << "fs=" + QString::number( ringBuffer() ); if( minimumBuffer() > 0 ) args << "minbuf=" + QString::number( minimumBuffer() ); if( speed() > 0 ) args << "speed=" + QString::number( speed() ); args << "dev=" + currentDevice().toQString(); args << "-audio"; args << files(); /*! -----------------------------------------------------------!*/ return args; }
int TestRingBuffer() { PrepareData(); RingBuffer<int> ringBuffer(100); // test 1, test overwrite size_t numWritten = ringBuffer.Write(testBuffer, 1024); assert(ringBuffer.Capacity() == 100); assert(ringBuffer.Size() == 100); for (int i = 0; i < 100; i++) { assert(ringBuffer[i] == testBuffer[numWritten - 1 - i]); } // test 2, test partial write ringBuffer.Reset(); assert(ringBuffer.Capacity() == 100); assert(ringBuffer.Size() == 0); numWritten = ringBuffer.Write(testBuffer, 10); numWritten = ringBuffer.Write(testBuffer + 10, 10); numWritten = ringBuffer.Write(testBuffer + 20, 80); for (int i = 0; i < 100; i++) { assert(ringBuffer[i] == 99 - i); } assert(ringBuffer.WriteIndex() == 0); ringBuffer.Write(testBuffer, 10); assert(ringBuffer.WriteIndex() == 10); return 0; };
TEST_F(RingBufferTest, shouldPreventPublishersOvertakingEventProcessorWrapPoint) { const int ringBufferSize = 4; CountDownLatch latch(ringBufferSize); std::atomic_bool publisherComplete(false); MultiThreadedClaimStrategy claimStrategy(ringBufferSize); BlockingWaitStrategy waitStrategy; RingBuffer<StubEvent> ringBuffer(claimStrategy, waitStrategy); std::unique_ptr<SequenceBarrier> sequenceBarrier = ringBuffer.newBarrier({}); TestEventProcessor processor(*sequenceBarrier); ringBuffer.setGatingSequences({ &processor.getSequence() }); std::thread thread([&] { for (int i = 0; i <= ringBufferSize; i++) { long sequence = ringBuffer.next(); StubEvent& event = ringBuffer.get(sequence); event.setValue(i); ringBuffer.publish(sequence); latch.countDown(); } publisherComplete.store(true); }); latch.await(); EXPECT_EQ((long)ringBuffer.getCursor(), (long)(ringBufferSize - 1)); ASSERT_FALSE(publisherComplete.load()); processor(); thread.join(); ASSERT_TRUE(publisherComplete.load()); }
void AsiHpiDevice::readData() { #ifdef ASIHPI uint16_t state=0; uint32_t buffer_size=0; uint32_t data_recorded=0; uint32_t samples_recorded=0; uint32_t aux_data_recorded=0; HpiLog(HPI_InStreamGetInfoEx(NULL,asihpi_input_stream,&state,&buffer_size, &data_recorded,&samples_recorded, &aux_data_recorded)); if(state==HPI_STATE_RECORDING) { if(HpiLog(HPI_InStreamReadBuf(NULL,asihpi_input_stream,asihpi_pcm_buffer, data_recorded))==0) { for(unsigned i=0; i<ringBufferQuantity(); i++) { ringBuffer(i)->write((float *)asihpi_pcm_buffer, data_recorded/(sizeof(float)*channels())); } } } else { Log(LOG_WARNING,"not in recording state"+ QString().sprintf(" [state: %u]",state)); } #endif // ASIHPI }
int main(int argc, const char * argv[]) { TFPerformanceTimer performaceTimer; { performaceTimer.start(); for (int i = 0; i < OBJECT_COUNT; i++) { new TestObject(); } std::cout << "Took " << (double)performaceTimer.stop()/1000000 << "ms to new " << OBJECT_COUNT << " objects" << std::endl; } { TFObjectPool<TestObject> objectPool(OBJECT_COUNT); performaceTimer.start(); for (int i = 0; i < OBJECT_COUNT; i++) { objectPool.allocateObject(); } std::cout << "Took " << (double)performaceTimer.stop()/1000000 << "ms to allocate " << OBJECT_COUNT << " objects from pool" << std::endl; } { TFObjectPool<TestObject> objectPool(OBJECT_COUNT); std::vector<TestObject *> m_objects(OBJECT_COUNT); for (int i = 0; i < OBJECT_COUNT; i++) { m_objects.push_back(objectPool.allocateObject()); } std::vector<TestObject *>::const_iterator it = m_objects.begin(); performaceTimer.start(); for (;it != m_objects.end(); it++) { objectPool.release(*it); } std::cout << "Took " << (double)performaceTimer.stop()/1000000 << "ms to release " << OBJECT_COUNT << " objects from pool" << std::endl; } { TFObjectPool<TestObject> objectPool(1); performaceTimer.start(); for (int i = 0; i < OBJECT_COUNT; i++) { objectPool.allocateObject(); } std::cout << "Took " << (double)performaceTimer.stop()/1000000 << "ms to allocate " << OBJECT_COUNT << " objects from pool" << std::endl; } performaceTimer.start(); TFRingBuffer<TestObject> ringBuffer(20); std::cout << "Took " << (double)performaceTimer.stop()/1000000 << "ms to allocate " << OBJECT_COUNT << " objects from pool" << std::endl; return 0; }
MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent), ui(new Ui::MainWindow) { ui->setupUi(this); /*CircularBuffer circularBuffer(4); circularBuffer.readFromBuffer(6);*/ RingBuffer<double> ringBuffer(4); ringBuffer.write(1.0); ringBuffer.write(2.0); qDebug() << ringBuffer.read(); qDebug() << ringBuffer.read(); ringBuffer.write(3.0); ringBuffer.write(4.0); ringBuffer.write(5.0); ringBuffer.printValues(); }
int inverseCochlearLayer::updateState(double time, double dt){ update_timer->start(); // if (time >= nextDisplayTime) { // nextDisplayTime += cochlearLayer->getDisplayPeriod(); const PVLayerLoc * loc = getLayerLoc(); int nx = loc->nx; int ny = loc->ny; int nf = loc->nf; //This layer must be 1X1X(INVERSECOCHLEARLAYER_NF) assert(nx == 1 && ny == 1 && nf == INVERSECOCHLEARLAYER_NF); int num_input_neurons = inputLayer->getNumNeurons(); int num_output_neurons = getNumNeurons(); //num_output_neurons should be only INVERSECOCHLEARLAYER_NF assert(num_output_neurons == INVERSECOCHLEARLAYER_NF); timehistory[ringBufferLevel] = time; for (int k=0; k<cochlearLayer->getLayerLoc()->nx; k++) { xhistory[ringBufferLevel][k] = (inputLayer->getLayerData()[k]) / cochlearLayer->getCochlearScales()[k]; // //std::cout << "xvalues " << inputLayer->getLayerData()[k] << "\n" ; //std::cout << " cochlearscales" << cochlearLayer->getCochlearScales()[k] << "\n" ; //std::cout << "radianfreqs" << radianFreqs[k] << "\n" ; } // memcpy? double sumreal = 0.0; double sumimag = 0.0; for (int j=0; j<bufferLength; j++) { for (int k=0; k<numFrequencies; k++) { //sumreal += Mreal[j][k]*xhistory[ringBuffer(j)][k]; sumimag += Mimag[j][k]*xhistory[ringBuffer(j)][k]; } } //sumreal /= (2*PI); sumimag /= (2*PI); //Reset pointer of gSynHead to point to the excitatory channel // pvdata_t * inA = inputLayer->getCLayer()->activity->data; pvdata_t * outV = getV(); outV[0] = sumimag / 10; //outV[1] = sumreal; //*outV is where the output data should go //Copy V to A buffer PV::HyPerLayer::setActivity(); // clayer->activity->data[0] *= 0.25; // With bufferLength 1, sound is reproduced well but at a higher amplitude // clayer->activity->data[1] *= 0.25; // This corrects the amplitude to approximately its original value // But I think the correction factor depends on frequency. --pfs Jun 23, 2014 ringBufferLevel++; if (ringBufferLevel == bufferLength) { ringBufferLevel = 0; } //} // end nextdisplaytime update_timer->stop(); return PV_SUCCESS; }