void Sound::play() { if (!m_buffer || m_buffer->getSampleCount() == 0) return; if (getStatus() == Playing) stop(); m_channel = 0; while (m_channel < 24 && ndspChnIsPlaying(m_channel)) m_channel++; if (m_channel == 24) { err() << "Sound::play() failed because all channels are in use." << std::endl; m_channel = -1; return; } setPlayingOffset(m_pauseOffset); if (m_pauseOffset != Time::Zero) m_pauseOffset = Time::Zero; u32 size = sizeof(Int16) * m_buffer->getSampleCount(); ndspChnReset(m_channel); ndspChnSetInterp(m_channel, NDSP_INTERP_POLYPHASE); ndspChnSetRate(m_channel, float(m_buffer->getSampleRate())); ndspChnSetFormat(m_channel, (m_buffer->getChannelCount() == 1) ? NDSP_FORMAT_MONO_PCM16 : NDSP_FORMAT_STEREO_PCM16); DSP_FlushDataCache((u8*)m_buffer->getSamples(), size); ndspChnWaveBufAdd(m_channel, &m_ndspWaveBuf); }
static void audioThreadFunc(void *arg) { Audio::MixerImpl *mixer = (Audio::MixerImpl *)arg; OSystem_3DS *osys = (OSystem_3DS *)g_system; int i; const int channel = 0; int bufferIndex = 0; const int bufferCount = 3; const int bufferSize = 80000; // Can't be too small, based on delayMillis duration const int sampleRate = mixer->getOutputRate(); int sampleLen = 0; uint32 lastTime = osys->getMillis(true); uint32 time = lastTime; ndspWaveBuf buffers[bufferCount]; for (i = 0; i < bufferCount; ++i) { memset(&buffers[i], 0, sizeof(ndspWaveBuf)); buffers[i].data_vaddr = linearAlloc(bufferSize); buffers[i].looping = false; buffers[i].status = NDSP_WBUF_FREE; } ndspChnReset(channel); ndspChnSetInterp(channel, NDSP_INTERP_LINEAR); ndspChnSetRate(channel, sampleRate); ndspChnSetFormat(channel, NDSP_FORMAT_STEREO_PCM16); while (!osys->exiting) { osys->delayMillis(100); // Note: Increasing the delay requires a bigger buffer time = osys->getMillis(true); sampleLen = (time - lastTime) * 22 * 4; // sampleRate / 1000 * channelCount * sizeof(int16); lastTime = time; if (!osys->sleeping && sampleLen > 0) { bufferIndex++; bufferIndex %= bufferCount; ndspWaveBuf *buf = &buffers[bufferIndex]; buf->nsamples = mixer->mixCallback(buf->data_adpcm, sampleLen); if (buf->nsamples > 0) { DSP_FlushDataCache(buf->data_vaddr, bufferSize); ndspChnWaveBufAdd(channel, buf); } } } for (i = 0; i < bufferCount; ++i) linearFree(buffers[i].data_pcm8); }
// startMusic: Plays a song with network streaming feature void startMusic(Socket* sock, Music* src) { closeStream = false; src->streamLoop = false; // TODO: Add looping feature songPointer = 0; netSize = 0; u32 ch = 0x08; u32 ch2 = 0x09; bool non_native_encode = false; ThreadFunc streamFunction = streamWAV; u8 tmp_encode; /*if (src->encoding == CSND_ENCODING_VORBIS){ streamFunction = streamOGG; tmp_encode = src->encoding; src->encoding = CSND_ENCODING_PCM16; non_native_encode = true; }*/ int raw_format; if (src->audiotype == 1) raw_format = NDSP_FORMAT_MONO_PCM16; else raw_format = NDSP_FORMAT_STEREO_PCM16; ndspChnReset(ch); ndspChnWaveBufClear(ch); ndspChnSetInterp(ch, NDSP_INTERP_LINEAR); ndspChnSetRate(ch, float(src->samplerate)); ndspChnSetFormat(ch, raw_format); ndspWaveBuf* waveBuf = (ndspWaveBuf*)calloc(1, sizeof(ndspWaveBuf)); createDspBlock(waveBuf, src->bytepersample, src->mem_size, 0, (u32*)src->audiobuf); src->blocks = NULL; populatePurgeTable(src, waveBuf); ndspChnWaveBufAdd(ch, waveBuf); src->tick = osGetTime(); src->wavebuf = waveBuf; src->ch = ch; src->isPlaying = true; src->lastCheck = ndspChnGetSamplePos(ch); src->streamLoop = false; svcCreateEvent(&updateStream,0); cachePackage* pkg = (cachePackage*)malloc(sizeof(cachePackage)); pkg->client = sock; pkg->song = src; svcSignalEvent(updateStream); threadCreate(streamFunction, pkg, 8192, 0x18, 0, true); src->isPlaying = true; }
void streamMusic(void* arg) { clearBottomScreen(); debug("play_buffer start\n"); stream_filename* strm_file = static_cast<stream_filename*>(arg); VGMSTREAM* vgmstream = strm_file->stream; if (!vgmstream) return; int channel = 0; ndspSetOutputMode(NDSP_OUTPUT_STEREO); for (int i = 0; i < vgmstream->channels; i++) { ndspChnReset(channel + i); ndspChnSetInterp(channel + i, NDSP_INTERP_LINEAR); ndspChnSetRate(channel + i, vgmstream->sample_rate / vgmstream->channels); ndspChnSetFormat(channel + i, NDSP_FORMAT_STEREO_PCM16); } std::vector<ndspWaveBuf> waveBufs1(vgmstream->channels); std::vector<ndspWaveBuf> waveBufs2(vgmstream->channels); for (auto& waveBuf : waveBufs1) memset(&waveBuf, 0, sizeof(ndspWaveBuf)); for (auto& waveBuf : waveBufs2) memset(&waveBuf, 0, sizeof(ndspWaveBuf)); debug("play_buffer signal produce\n"); svcSignalEvent(bufferReadyProduceRequest); // Wait for 2 buffers to play debug("play_buffer wait data 1\n"); svcWaitSynchronization(bufferReadyConsumeRequest, U64_MAX); svcClearEvent(bufferReadyConsumeRequest); debug("play_buffer signal produce\n"); svcSignalEvent(bufferReadyProduceRequest); debug("play_buffer wait data 2\n"); svcWaitSynchronization(bufferReadyConsumeRequest, U64_MAX); svcClearEvent(bufferReadyConsumeRequest); // Play it debug("play_buffer play\n"); playSoundChannels(channel, playBuffer1.samples, false, playBuffer1.channels, waveBufs1); playSoundChannels(channel, playBuffer2.samples, false, playBuffer2.channels, waveBufs2); stream_buffer* buffer = &playBuffer2; stream_buffer* playingBuf = &playBuffer1; std::vector<ndspWaveBuf>* waveBuf = &waveBufs2; std::vector<ndspWaveBuf>* playingWaveBuf = &waveBufs1; debug("play_buffer signal produce\n"); svcSignalEvent(bufferReadyProduceRequest); while (runThreads) { if (playingWaveBuf->at(0).status == NDSP_WBUF_DONE) { debug("play_buffer wait data\n"); // Wait for sound data here svcWaitSynchronization(bufferReadyConsumeRequest, U64_MAX); svcClearEvent(bufferReadyConsumeRequest); // Flip buffers if (buffer == &playBuffer1) { buffer = &playBuffer2; playingBuf = &playBuffer1; waveBuf = &waveBufs2; playingWaveBuf = &waveBufs1; } else { buffer = &playBuffer1; playingBuf = &playBuffer2; waveBuf = &waveBufs1; playingWaveBuf = &waveBufs2; } debug("play_buffer play\n"); playSoundChannels(channel, buffer->samples, false, buffer->channels, *waveBuf); debug("play_buffer signal produce\n"); svcSignalEvent(bufferReadyProduceRequest); } } for (int i = 0; i < vgmstream->channels; i++) { ndspChnWaveBufClear(channel + i); } debug("play_buffer done\n"); }
//---------------------------------------------------------------------------- int main(int argc, char **argv) { //---------------------------------------------------------------------------- PrintConsole topScreen; ndspWaveBuf waveBuf[2]; gfxInitDefault(); consoleInit(GFX_TOP, &topScreen); consoleSelect(&topScreen); printf("libctru streaming audio\n"); stb_vorbis_info info; int error; vorbisFile = stb_vorbis_open_filename("/mau5.ogg", &error, NULL); info = stb_vorbis_get_info(vorbisFile); Samples = info.sample_rate; u32 *audioBuffer = (u32*)linearAlloc(Samples*sizeof(s16)*2); bool fillBlock = false; ndspInit(); ndspSetOutputMode(NDSP_OUTPUT_STEREO); ndspChnSetInterp(0, NDSP_INTERP_LINEAR); ndspChnSetRate(0, Samples); ndspChnSetFormat(0, NDSP_FORMAT_STEREO_PCM16); float mix[12]; memset(mix, 0, sizeof(mix)); mix[0] = 1.0; mix[1] = 1.0; ndspChnSetMix(0, mix); int note = 4; memset(waveBuf,0,sizeof(waveBuf)); waveBuf[0].data_vaddr = &audioBuffer[0]; waveBuf[0].nsamples = Samples; waveBuf[1].data_vaddr = &audioBuffer[Samples]; waveBuf[1].nsamples = Samples; ndspChnWaveBufAdd(0, &waveBuf[0]); ndspChnWaveBufAdd(0, &waveBuf[1]); printf("Press up/down to change tone\n"); while(aptMainLoop()) { gfxSwapBuffers(); gfxFlushBuffers(); gspWaitForVBlank(); hidScanInput(); u32 kDown = hidKeysDown(); if (kDown & KEY_START) break; // break in order to return to hbmenu if (waveBuf[fillBlock].status == NDSP_WBUF_DONE) { fill_buffer(waveBuf[fillBlock].data_pcm16, waveBuf[fillBlock].nsamples); ndspChnWaveBufAdd(0, &waveBuf[fillBlock]); fillBlock = !fillBlock; } } ndspExit(); linearFree(audioBuffer); gfxExit(); return 0; }