int main(int argc, char *argv[]) { PABLIO_Stream *outStream; double theta = 0.0; double theta1 = 0.0; double delta, delta1; double frequency1 = FREQUENCY1; double attenuation = 0.1; if (argc > 1) frequency1 = atof(argv[1]); if (argc > 2) attenuation = atof(argv[2]); delta = FREQUENCY * 2.0 * 3.1415 / (double) SAMPLE_RATE; delta1 = frequency1 * 2.0 * 3.1415 / (double) SAMPLE_RATE; OpenAudioStream(&outStream, SAMPLE_RATE, paFloat32, PABLIO_WRITE|PABLIO_MONO); int x; while(1) { // synthesize and output samples forever for(x= 0; x<NUM_FRAMES; x++) { // synthesize a buffer full of samples buffer[x] = sin(theta) * 0.9 + sin(theta1) * attenuation; theta+= delta; theta1+= delta1; } // blocking write provides flow control WriteAudioStream(outStream, buffer, NUM_FRAMES); } CloseAudioStream(outStream); }
int main(void) { int i, j, k, dlength, bufferIndex=0; float scale[8] = // E scale frequencies {164.81,184.99,207.65,220.00, 246.94,277.18,311.12,329.62}; short wg_l[DMAX], wg_r[DMAX]; // Waveguide delay lines short output; long temp; PABLIO_Stream *outStream; short buffer[NUM_FRAMES]; OpenAudioStream(&outStream, SAMPLE_RATE, paInt16, PABLIO_WRITE|PABLIO_MONO); for (j=0;j<8;j++) { // Play up the scale dlength = SAMPLE_RATE / scale[j] / 2; // Setup waveguide length for (k=0;k<=dlength/2;k++) { // Setup pluck temp = 20000 * (long) k * 2; temp /= dlength; wg_l[k] = temp; wg_l[dlength-k] = temp; wg_r[k] = temp; wg_r[dlength-k] = temp; } for (i=0;i<SAMPLE_RATE;i++) { // Play each note for 1 second temp = wg_l[0]; // stash delay outputs output = wg_r[0]; for (k=0;k<dlength-1;k++) { // Do delay lines wg_l[k] = wg_l[k+1]; wg_r[k] = wg_r[k+1]; } wg_l[dlength-1] = output*-0.99; // Do reflections wg_r[dlength-1] = -temp; buffer[bufferIndex++] = output; if (bufferIndex >= NUM_FRAMES){ WriteAudioStream(outStream, buffer, NUM_FRAMES); bufferIndex = 0; } } } if (bufferIndex > 0) WriteAudioStream(outStream, buffer, bufferIndex+1); CloseAudioStream(outStream); }
int main(void) { int i; PaError err; PABLIO_Stream *aInStream; PABLIO_Stream *aOutStream; int index; printf("Full duplex sound test using PABLIO\n"); fflush(stdout); /* Open simplified blocking I/O layer on top of PortAudio. */ /* Open input first so it can start to fill buffers. */ err = OpenAudioStream( &aInStream, SAMPLE_RATE, SAMPLE_TYPE, (PABLIO_READ | PABLIO_STEREO) ); if( err != paNoError ) goto error; /* printf("opened input\n"); fflush(stdout); /**/ err = OpenAudioStream( &aOutStream, SAMPLE_RATE, SAMPLE_TYPE, (PABLIO_WRITE | PABLIO_STEREO) ); if( err != paNoError ) goto error; /* printf("opened output\n"); fflush(stdout); /**/ /* Process samples in the foreground. */ index = 0; for( i=0; i<(NUM_SECONDS * SAMPLE_RATE); i++ ) { /* Write old frame of data to output. */ /* samples[index][1] = (i&256) * (1.0f/256.0f); /* sawtooth */ WriteAudioStream( aOutStream, &samples[index][0], 1 ); /* Read one frame of data into sample array for later output. */ ReadAudioStream( aInStream, &samples[index][0], 1 ); index += 1; if( index >= NUM_ECHO_FRAMES ) index = 0; if( (i & 0xFFFF) == 0 ) printf("i = %d\n", i ); fflush(stdout); /**/ } CloseAudioStream( aOutStream ); CloseAudioStream( aInStream ); printf("R/W echo sound test complete.\n" ); fflush(stdout); return 0; error: fprintf( stderr, "An error occured while using PortAudio\n" ); fprintf( stderr, "Error number: %d\n", err ); fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); return -1; }
int main(void) { int i,j; PaError err; PABLIO_Stream *aOutStream; printf("Generate sawtooth waves using PABLIO.\n"); fflush(stdout); /* Open simplified blocking I/O layer on top of PortAudio. */ err = OpenAudioStream( &aOutStream, SAMPLE_RATE, paFloat32, (PABLIO_WRITE | PABLIO_STEREO) ); if( err != paNoError ) goto error; /* Initialize oscillator phases. */ phases[0] = 0.0; phases[1] = 0.0; for( i=0; i<(NUM_SECONDS * SAMPLE_RATE); i += FRAMES_PER_BLOCK ) { /* Generate sawtooth waveforms in a block for efficiency. */ for( j=0; j<FRAMES_PER_BLOCK; j++ ) { /* Generate a sawtooth wave by incrementing a variable. */ phases[0] += PHASE_INCREMENT; /* The signal range is -1.0 to +1.0 so wrap around if we go over. */ if( phases[0] > 1.0f ) phases[0] -= 2.0f; samples[j][0] = phases[0]; /* On the second channel, generate a sawtooth wave a fifth higher. */ phases[1] += PHASE_INCREMENT * (3.0f / 2.0f); if( phases[1] > 1.0f ) phases[1] -= 2.0f; samples[j][1] = phases[1]; } /* Write samples to output. */ WriteAudioStream( aOutStream, samples, FRAMES_PER_BLOCK ); } CloseAudioStream( aOutStream ); printf("Sawtooth sound test complete.\n" ); fflush(stdout); return 0; error: fprintf( stderr, "An error occured while using PABLIO\n" ); fprintf( stderr, "Error number: %d\n", err ); fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); return -1; }
int main(void) { int i,j; PaError err; PABLIO_Stream *aOutStream; printf("Generate unsigned 8 bit sawtooth waves using PABLIO.\n"); fflush(stdout); /* Open simplified blocking I/O layer on top of PortAudio. */ err = OpenAudioStream( &aOutStream, SAMPLE_RATE, paUInt8, (PABLIO_WRITE | PABLIO_STEREO) ); if( err != paNoError ) goto error; /* Initialize oscillator phases to "ground" level for paUInt8. */ phases[0] = 128; phases[1] = 128; for( i=0; i<(NUM_SECONDS * SAMPLE_RATE); i += FRAMES_PER_BLOCK ) { /* Generate sawtooth waveforms in a block for efficiency. */ for( j=0; j<FRAMES_PER_BLOCK; j++ ) { /* Generate a sawtooth wave by incrementing a variable. */ phases[0] += 1; /* We don't have to do anything special to wrap when using paUint8 because * 8 bit arithmetic automatically wraps. */ samples[j][0] = phases[0]; /* On the second channel, generate a higher sawtooth wave. */ phases[1] += 3; samples[j][1] = phases[1]; } /* Write samples to output. */ WriteAudioStream( aOutStream, samples, FRAMES_PER_BLOCK ); } CloseAudioStream( aOutStream ); printf("Sawtooth sound test complete.\n" ); fflush(stdout); return 0; error: fprintf( stderr, "An error occured while using PABLIO\n" ); fprintf( stderr, "Error number: %d\n", err ); fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); return -1; }
int main(void) { PABLIO_Stream *outStream; OpenAudioStream(&outStream, SAMPLE_RATE, paFloat32, PABLIO_WRITE|PABLIO_MONO); while(1) { // synthesize and output samples forever before = GetAudioStreamWriteable(outStream); WriteAudioStream(outStream, &buffer, NUM_FRAMES); after = GetAudioStreamWriteable(outStream); printf("%ld, %ld\n", before, after); sleep(2); } }
int main(void) { PABLIO_Stream *inStream, *outStream; OpenAudioStream(&inStream, 44100, paInt16, PABLIO_MONO|PABLIO_READ); OpenAudioStream(&outStream, 44100, paInt16, PABLIO_MONO|PABLIO_WRITE); while(1) { // pass audio in to audio out one buffer at a time forever // using blocking read and write calls which provide flow control ReadAudioStream( inStream, buffer, NUM_FRAMES); // process samples in buffer here as desired WriteAudioStream(outStream, buffer, NUM_FRAMES); } }
int main(void) { PABLIO_Stream *outStream; double theta = 0.0, delta = FREQUENCY * 2.0 * 3.1415 / (double)SAMPLE_RATE; int x; OpenAudioStream(&outStream, SAMPLE_RATE, paFloat32, PABLIO_WRITE|PABLIO_MONO); while(1) { // synthesize and output samples forever for(x= 0; x<NUM_FRAMES; x++) { // synthesize a buffer full of samples buffer[x] = sin(theta); // ugly, I know... theta+= delta; } // blocking write provides flow control WriteAudioStream(outStream, buffer, NUM_FRAMES); } }
void outputAudio( double sampleRate, double duration, Source& source, Sequencer& sequencer ){ static const int length = 64; float output[length]; double secondsPerVector = (double)length / sampleRate; sequencer.beginPlayback(); PABLIO_Stream *stream; OpenAudioStream( &stream, sampleRate, paFloat32, PABLIO_WRITE | PABLIO_MONO ); unsigned long count = (sampleRate / length) * duration; for( unsigned long i=0; i<count; ++i ){ std::fill_n( output, length, 0.f ); // zero output source.synthesize( output, length ); sequencer.update( secondsPerVector ); WriteAudioStream( stream, output, length ); } CloseAudioStream( stream ); }
int main(void) { PABLIO_Stream *outStream; int bufferSamples; float *buffer = createChirp(SAMPLE_RATE, 440.0, PERIOD, &bufferSamples); printf("buffernumSamples = %d\n", bufferSamples); OpenAudioStream(&outStream, SAMPLE_RATE, paFloat32, PABLIO_WRITE|PABLIO_MONO); while (1) { // synthesize and output samples forever // before = GetAudioStreamWriteable(outStream); WriteAudioStream(outStream, buffer, bufferSamples); // after = GetAudioStreamWriteable(outStream); // printf("%ld, %ld\n", before, after); sleep(1); } free(buffer); }
int main(void) { int i; SAMPLE samples[SAMPLES_PER_FRAME * FRAMES_PER_BLOCK]; PaError err; PABLIO_Stream *aStream; printf("Full duplex sound test using PortAudio and RingBuffers\n"); fflush(stdout); /* Open simplified blocking I/O layer on top of PortAudio. */ err = OpenAudioStream( &aStream, SAMPLE_RATE, SAMPLE_TYPE, (PABLIO_READ_WRITE | PABLIO_STEREO) ); if( err != paNoError ) goto error; /* Process samples in the foreground. */ for( i=0; i<(NUM_SECONDS * SAMPLE_RATE); i += FRAMES_PER_BLOCK ) { /* Read one block of data into sample array from audio input. */ ReadAudioStream( aStream, samples, FRAMES_PER_BLOCK ); /* Write that same block of data to output. */ WriteAudioStream( aStream, samples, FRAMES_PER_BLOCK ); } CloseAudioStream( aStream ); printf("Full duplex sound test complete.\n" ); fflush(stdout); return 0; error: Pa_Terminate(); fprintf( stderr, "An error occured while using the portaudio stream\n" ); fprintf( stderr, "Error number: %d\n", err ); fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); return -1; }
int main(int argc, char *argv[]) { PABLIO_Stream *outStream; double theta = 0.0; double delta = 2.0 * 3.1415 / (double)SAMPLE_RATE; double frequency, attenuation = 1.0; char scale = '\0'; int x,y; frequency = 440.0; // default frequency double power = pow(2., 1./12.); // 12th root of 2 double pythag = 0.0; double cents, oldcents = 0.0; switch(argc) { case 1: break; case 2: if (argv[1][0]=='-') scale = argv[1][1]; break; default: printf("usage: %s [-h(armonic),p(ythagorean)]\n", argv[1]); exit(-1); } OpenAudioStream(&outStream, SAMPLE_RATE, paFloat32, PABLIO_WRITE|PABLIO_MONO); while(1) { // synthesize and output samples forever for(y= 0; y<26; y++) { for(x= 0; x<NUM_FRAMES; x++) { // synthesize a buffer full of samples buffer[x] = attenuation * sin(theta); theta += frequency * delta; } // blocking write provides flow control WriteAudioStream(outStream, buffer, NUM_FRAMES); cents = 1200. * log(frequency/440.) / log(2.); printf("note: %-2s frequency: %7.3f cents: %8.3f step: %7.3f\n",strings[y<13?y:25-y],frequency,cents,cents-oldcents); oldcents = cents; if (y < 12) if (scale == 'h') frequency = 440. * harmonics[y+1]; else if (scale == 'p') { pythag = (pythag <= 5 ? pythag + 7 : pythag - 5); frequency = 440. * pow(3./2., pythag); // 3/2 to the 7th while (frequency > 893.0) frequency /= 2.; } else frequency *= power; else if (y > 12 && y < 25) if (scale == 'h') frequency = 440.0 * harmonics[24-y]; else if (scale == 'p') { pythag = (pythag < 7 ? pythag + 5 : pythag - 7); frequency = 440. * pow(3./2., pythag); // 3/2 to the 7th while (frequency > 893.0) frequency /= 2.; } else frequency /= power; } } CloseAudioStream(outStream); }
int main(int argc, char* argv[]) { unsigned char TuneSelection; unsigned char CurrentOctave; unsigned char ScriptIndex; unsigned char BassDrumIsOn; unsigned short BassDrumPointerIncrement; unsigned char BassDrumDurationCount; unsigned short BassDrumOutput; unsigned char CurrentNoteEvent; unsigned char NoteDurationCount; unsigned char NoteIndex; unsigned short NotePointerIncrement; unsigned short NoteWavePointer; unsigned short NoteAndBDOutput; unsigned char NoteSynthOutput; unsigned short TempoCounter; unsigned short OverFlow; unsigned char *WaveRAM; unsigned char WaveINDEX; unsigned char temp1 = 255; signed char temp2; short buffer[NUM_FRAMES]; PABLIO_Stream *outStream; if (argc != 2){ //printf("RockGuitar: Needs number 0-5\n"); exit; } temp2 = temp1; //wave table data for note synthesizer has the routine variables as its origin WaveRAM = &TuneSelection; OpenAudioStream(&outStream, SAMPLE_RATE, paInt16, PABLIO_WRITE|PABLIO_MONO); printf("Starting Synthesis!\n"); //client input, tune selector Jukebox TuneSelection = (unsigned char) atoi (argv[1]); printf("Selected Tune Script is %d\n",TuneSelection); //first time initialization CurrentOctave = 3; //octave state ScriptIndex = 0; //index to Tune script elements BassDrumIsOn = 0xff; //0xff = True, 0x00 = False BassDrumPointerIncrement = 0x07ff; //pitch for first bass drum event BassDrumDurationCount = 0x30; //duration for Bass Drum event CurrentNoteEvent = TuneScripts[TuneSelection][ScriptIndex++]; //prime pump, Sequencer gets first script element while(CurrentNoteEvent != 0xff){ //0xff is the end of script element if( ((CurrentNoteEvent & 0xf0)) == 0xf0){ // control element only octave is currently defined if( (CurrentNoteEvent & 0x0f) < 8){ // if note event CurrentOctave = CurrentNoteEvent & 0x0f; CurrentOctave = 7 - CurrentOctave; //invert it no division for top octave } } else { // note event, set it up and synthesize //number of ticks for this note event NoteDurationCount = Durations[ (CurrentNoteEvent & 0x0f)]; NoteIndex = (CurrentNoteEvent&0xf0)>>4; NotePointerIncrement = PitchIncrements[NoteIndex] >> CurrentOctave; while(NoteDurationCount){ //synthesis loop|PABLIO_MONO //synthesize bass drum BassDrumSynthesizer(&BassDrumPointerIncrement,&BassDrumOutput, &BassDrumIsOn); NoteAndBDOutput = BassDrumOutput; //FuzzGuitar tone generator NoteWavePointer += NotePointerIncrement; WaveINDEX = NoteWavePointer >> 8; NoteSynthOutput = *(WaveRAM+(WaveINDEX)); //power of two mixing NoteAndBDOutput += NoteSynthOutput >> 1; //write 8 bit unsigned raw Output to file for 6000 kHz playback sampling rate buffer[0] = (short)((NoteAndBDOutput - 0x7F)<<8); WriteAudioStream(outStream, buffer, 1); //scale tick to provide tempo control OverFlow = TempoCounter; TempoCounter += 0x0301; if(TempoCounter < OverFlow){ //OverFlow overflowed so a tick has occurred if(--BassDrumDurationCount == 0){ //time for new bass drum BassDrumDurationCount = 0x30; BassDrumIsOn = 0xFF; BassDrumPointerIncrement = 0x07FF; } //2 tick separation of each note event if(NoteDurationCount == 2) NotePointerIncrement = 0; NoteDurationCount--; } } }// note event, set it up and synthesize //get next script element CurrentNoteEvent = TuneScripts[TuneSelection][ScriptIndex++]; } //end of CurrentNoteEvent != 0xff CloseAudioStream(outStream); return 0; }
int main( int argc, char* argv[] ){ int i,j; ogg_packet op; SDL_Event event; int hasdatatobuffer = 1; int playbackdone = 0; double now, delay, last_frame_time = 0; int frameNum=0; int skipNum=0; /* takes first argument as file to play */ /* this works better on Windows and is more convenient for drag and drop ogg files over the .exe */ if( argc != 2 ) { usage(); exit(0); } infile = fopen( argv[1], "rb" ); /* start up Ogg stream synchronization layer */ ogg_sync_init(&oy); /* init supporting Vorbis structures needed in header parsing */ vorbis_info_init(&vi); vorbis_comment_init(&vc); /* init supporting Theora structures needed in header parsing */ theora_comment_init(&tc); theora_info_init(&ti); parseHeaders(); /* force audio off */ /* vorbis_p = 0; */ /* initialize decoders */ if(theora_p){ theora_decode_init(&td,&ti); printf("Ogg logical stream %x is Theora %dx%d %.02f fps video\n" " Frame content is %dx%d with offset (%d,%d).\n", to.serialno,ti.width,ti.height, (double)ti.fps_numerator/ti.fps_denominator, ti.frame_width, ti.frame_height, ti.offset_x, ti.offset_y); report_colorspace(&ti); dump_comments(&tc); }else{ /* tear down the partial theora setup */ theora_info_clear(&ti); theora_comment_clear(&tc); } if(vorbis_p){ vorbis_synthesis_init(&vd,&vi); vorbis_block_init(&vd,&vb); printf("Ogg logical stream %x is Vorbis %d channel %d Hz audio.\n", vo.serialno,vi.channels,vi.rate); }else{ /* tear down the partial vorbis setup */ vorbis_info_clear(&vi); vorbis_comment_clear(&vc); } /* open audio */ if(vorbis_p)open_audio(); /* open video */ if(theora_p)open_video(); /* our main loop */ while(!playbackdone){ /* break out on SDL quit event */ if ( SDL_PollEvent ( &event ) ) { if ( event.type == SDL_QUIT ) break ; } /* get some audio data */ while(vorbis_p && !audiobuf_ready){ int ret; float **pcm; int count = 0; int maxBytesToWrite; /* is there pending audio? does it fit our circular buffer without blocking? */ ret=vorbis_synthesis_pcmout(&vd,&pcm); maxBytesToWrite = GetAudioStreamWriteable(aOutStream); if (maxBytesToWrite<=FRAMES_PER_BUFFER){ /* break out until there is a significant amount of data to avoid a series of small write operations. */ break; } /* if there's pending, decoded audio, grab it */ if((ret>0)&&(maxBytesToWrite>0)){ for(i=0;i<ret && i<(maxBytesToWrite/vi.channels);i++) for(j=0;j<vi.channels;j++){ int val=(int)(pcm[j][i]*32767.f); if(val>32767)val=32767; if(val<-32768)val=-32768; samples[count]=val; count++; } if(WriteAudioStream( aOutStream, samples, i )) { if(count==maxBytesToWrite){ audiobuf_ready=1; } } vorbis_synthesis_read(&vd,i); if(vd.granulepos>=0) audiobuf_granulepos=vd.granulepos-ret+i; else audiobuf_granulepos+=i; }else{ /* no pending audio; is there a pending packet to decode? */ if(ogg_stream_packetout(&vo,&op)>0){ if(vorbis_synthesis(&vb,&op)==0) /* test for success! */ vorbis_synthesis_blockin(&vd,&vb); }else /* we need more data; break out to suck in another page */ break; } } /* end audio cycle */ while(theora_p && !videobuf_ready){ /* get one video packet... */ if(ogg_stream_packetout(&to,&op)>0){ theora_decode_packetin(&td,&op); videobuf_granulepos=td.granulepos; videobuf_time=theora_granule_time(&td,videobuf_granulepos); /* update the frame counter */ frameNum++; /* check if this frame time has not passed yet. If the frame is late we need to decode additonal ones and keep looping, since theora at this stage needs to decode all frames */ now=get_time(); delay=videobuf_time-now; if(delay>=0.0){ /* got a good frame, not late, ready to break out */ videobuf_ready=1; }else if(now-last_frame_time>=1.0){ /* display at least one frame per second, regardless */ videobuf_ready=1; }else{ fprintf(stderr, "dropping frame %d (%.3fs behind)\n", frameNum, -delay); } }else{ /* need more data */ break; } } if(!hasdatatobuffer && !videobuf_ready && !audiobuf_ready){ isPlaying = 0; playbackdone = 1; } /* if we're set for the next frame, sleep */ if((!theora_p || videobuf_ready) && (!vorbis_p || audiobuf_ready)){ int ticks = 1.0e3*(videobuf_time-get_time()); if(ticks>0) SDL_Delay(ticks); } if(videobuf_ready){ /* time to write our cached frame */ video_write(); videobuf_ready=0; last_frame_time=get_time(); /* if audio has not started (first frame) then start it */ if ((!isPlaying)&&(vorbis_p)){ start_audio(); isPlaying = 1; } } /* HACK: always look for more audio data */ audiobuf_ready=0; /* buffer compressed data every loop */ if(hasdatatobuffer){ hasdatatobuffer=buffer_data(&oy); if(hasdatatobuffer==0){ printf("Ogg buffering stopped, end of file reached.\n"); } } if (ogg_sync_pageout(&oy,&og)>0){ queue_page(&og); } } /* playbackdone */ /* show number of video frames decoded */ printf( "\n"); printf( "Frames decoded: %d", frameNum ); if(skipNum) printf( " (only %d shown)", frameNum-skipNum); printf( "\n" ); /* tear it all down */ fclose( infile ); if(vorbis_p){ audio_close(); ogg_stream_clear(&vo); vorbis_block_clear(&vb); vorbis_dsp_clear(&vd); vorbis_comment_clear(&vc); vorbis_info_clear(&vi); } if(theora_p){ ogg_stream_clear(&to); theora_clear(&td); theora_comment_clear(&tc); theora_info_clear(&ti); } ogg_sync_clear(&oy); printf("\r " "\nDone.\n"); SDL_Quit(); return(0); }