示例#1
0
文件: Sound.cpp 项目: krruzic/cpp3ds
void Sound::play()
{
	if (!m_buffer || m_buffer->getSampleCount() == 0)
		return;
	if (getStatus() == Playing)
		stop();

	m_channel = 0;
	while (m_channel < 24 && ndspChnIsPlaying(m_channel))
		m_channel++;

	if (m_channel == 24) {
		err() << "Sound::play() failed because all channels are in use." << std::endl;
		m_channel = -1;
		return;
	}

	setPlayingOffset(m_pauseOffset);

	if (m_pauseOffset != Time::Zero)
		m_pauseOffset = Time::Zero;

	u32 size = sizeof(Int16) * m_buffer->getSampleCount();

	ndspChnReset(m_channel);
	ndspChnSetInterp(m_channel, NDSP_INTERP_POLYPHASE);
	ndspChnSetRate(m_channel, float(m_buffer->getSampleRate()));
	ndspChnSetFormat(m_channel, (m_buffer->getChannelCount() == 1) ? NDSP_FORMAT_MONO_PCM16 : NDSP_FORMAT_STEREO_PCM16);

	DSP_FlushDataCache((u8*)m_buffer->getSamples(), size);

	ndspChnWaveBufAdd(m_channel, &m_ndspWaveBuf);
}
示例#2
0
文件: Sound.cpp 项目: krruzic/cpp3ds
void Sound::setPlayingOffset(Time timeOffset)
{
	Status status = getStatus();
	stop();
	m_playOffset = timeOffset;
	int offset = m_buffer->getSampleRate() * m_buffer->getChannelCount() * timeOffset.asSeconds();
	m_ndspWaveBuf.data_vaddr = m_buffer->getSamples() + offset;
	m_ndspWaveBuf.nsamples = m_buffer->getSampleCount() - offset;
	if (status == Playing)
		ndspChnWaveBufAdd(m_channel, &m_ndspWaveBuf);
}
示例#3
0
void playSoundChannels(int startchn, int samples, bool loop, std::vector<sample*>& data, std::vector<ndspWaveBuf>& waveBufs)
{
    for (unsigned int i = 0; i < data.size(); i++)
    {
        int channel = startchn + i;
        waveBufs[i].data_vaddr = data[i];
        waveBufs[i].nsamples = samples / data.size();
        waveBufs[i].looping = loop;
        DSP_FlushDataCache(data[i], samples * sizeof(sample));
        ndspChnWaveBufAdd(channel, &waveBufs[i]);
    }
}
示例#4
0
static void audioThreadFunc(void *arg) {
	Audio::MixerImpl *mixer = (Audio::MixerImpl *)arg;
	OSystem_3DS *osys = (OSystem_3DS *)g_system;

	int i;
	const int channel = 0;
	int bufferIndex = 0;
	const int bufferCount = 3;
	const int bufferSize = 80000; // Can't be too small, based on delayMillis duration
	const int sampleRate = mixer->getOutputRate();
	int sampleLen = 0;
	uint32 lastTime = osys->getMillis(true);
	uint32 time = lastTime;
	ndspWaveBuf buffers[bufferCount];

	for (i = 0; i < bufferCount; ++i) {
		memset(&buffers[i], 0, sizeof(ndspWaveBuf));
		buffers[i].data_vaddr = linearAlloc(bufferSize);
		buffers[i].looping = false;
		buffers[i].status = NDSP_WBUF_FREE;
	}

	ndspChnReset(channel);
	ndspChnSetInterp(channel, NDSP_INTERP_LINEAR);
	ndspChnSetRate(channel, sampleRate);
	ndspChnSetFormat(channel, NDSP_FORMAT_STEREO_PCM16);

	while (!osys->exiting) {
		osys->delayMillis(100); // Note: Increasing the delay requires a bigger buffer

		time = osys->getMillis(true);
		sampleLen = (time - lastTime) * 22 * 4; // sampleRate / 1000 * channelCount * sizeof(int16);
		lastTime = time;

		if (!osys->sleeping && sampleLen > 0) {
			bufferIndex++;
			bufferIndex %= bufferCount;
			ndspWaveBuf *buf = &buffers[bufferIndex];

			buf->nsamples = mixer->mixCallback(buf->data_adpcm, sampleLen);
			if (buf->nsamples > 0) {
				DSP_FlushDataCache(buf->data_vaddr, bufferSize);
				ndspChnWaveBufAdd(channel, buf);
			}
		}
	}

	for (i = 0; i < bufferCount; ++i)
		linearFree(buffers[i].data_pcm8);
}
示例#5
0
// startMusic: Plays a song with network streaming feature
void startMusic(Socket* sock, Music* src)
{
	closeStream = false;
	src->streamLoop = false; // TODO: Add looping feature
	songPointer = 0;
	netSize = 0;
	u32 ch = 0x08;
	u32 ch2 = 0x09;
	bool non_native_encode = false;
	ThreadFunc streamFunction = streamWAV;
	u8 tmp_encode;
	/*if (src->encoding == CSND_ENCODING_VORBIS){
		streamFunction = streamOGG;
		tmp_encode = src->encoding;
		src->encoding = CSND_ENCODING_PCM16;
		non_native_encode = true;
	}*/
	int raw_format;
	if (src->audiotype == 1) raw_format = NDSP_FORMAT_MONO_PCM16;
	else raw_format = NDSP_FORMAT_STEREO_PCM16;
	ndspChnReset(ch);
	ndspChnWaveBufClear(ch);
	ndspChnSetInterp(ch, NDSP_INTERP_LINEAR);
	ndspChnSetRate(ch, float(src->samplerate));
	ndspChnSetFormat(ch, raw_format);
	ndspWaveBuf* waveBuf = (ndspWaveBuf*)calloc(1, sizeof(ndspWaveBuf));
	createDspBlock(waveBuf, src->bytepersample, src->mem_size, 0, (u32*)src->audiobuf);
	src->blocks = NULL;
	populatePurgeTable(src, waveBuf);
	ndspChnWaveBufAdd(ch, waveBuf);
	src->tick = osGetTime();
	src->wavebuf = waveBuf;
	src->ch = ch;
	src->isPlaying = true;
	src->lastCheck = ndspChnGetSamplePos(ch);
	src->streamLoop = false;
	svcCreateEvent(&updateStream,0);
	cachePackage* pkg = (cachePackage*)malloc(sizeof(cachePackage));
	pkg->client = sock;
	pkg->song = src;
	svcSignalEvent(updateStream);
	threadCreate(streamFunction, pkg, 8192, 0x18, 0, true);
	src->isPlaying = true;
}
示例#6
0
void N3DS_SoundCallback(void* dud)
{
	if (N3DS_audioBuf[N3DS_soundFillBlock].status == NDSP_WBUF_DONE) {
		u32 flen = (N3DS_bufferSize * N3DS_sampleSize);
		u32 ilen = flen >> 2;
		u32* invbuf = (u32*) N3DS_audioBuf[N3DS_soundFillBlock].data_vaddr;

		if (N3DS_sound->sample_size > 1)
		{
			Sound_Callback((u8*) N3DS_audioBuf[N3DS_soundFillBlock].data_pcm8, flen);
		} else
		{
			Sound_Callback((u8*) N3DS_audioBuf[N3DS_soundFillBlock].data_pcm8, flen);
			for(int i = 0; i < ilen; i++)
				invbuf[i] ^= 0x80808080;
		}

		DSP_FlushDataCache(N3DS_audioBuf[N3DS_soundFillBlock].data_pcm8, flen);

		ndspChnWaveBufAdd(0, &N3DS_audioBuf[N3DS_soundFillBlock]);
		N3DS_soundFillBlock = !N3DS_soundFillBlock;
	}
示例#7
0
void streamWAV(void* arg){

	// Fetching cachePackage struct from main thread
	cachePackage* pack = (cachePackage*)arg;
	while(1) {
	
		// Waiting for updateStream event
		svcWaitSynchronization(updateStream, U64_MAX);
		svcClearEvent(updateStream);
		
		// Close the thread if closeStream event received
		if(closeStream){
			closeStream = false;
			svcExitThread();
		}
		
		// Check if the current stream is paused or not
		Music* src = pack->song;
		Socket* Client = pack->client;
		if (src->isPlaying){
		
			// Check if a free buffer is available
			if (src->wavebuf2 == NULL){
			
				// Check if file reached EOF
				if (src->audio_pointer >= src->size){
				
					// Check if playback ended
					if (!ndspChnIsPlaying(src->ch)){
						src->isPlaying = false;
						src->tick = (osGetTime()-src->tick);
					}
					
					continue;
				}
				
				// Swap audiobuffers
				u8* tmp = src->audiobuf;
				src->audiobuf = src->audiobuf2;
				src->audiobuf2 = tmp;
				
				// Create a new block for DSP service
				u32 bytesRead;
				src->wavebuf2 = (ndspWaveBuf*)calloc(1,sizeof(ndspWaveBuf));
				createDspBlock(src->wavebuf2, src->bytepersample, src->mem_size, 0, (u32*)src->audiobuf);
				populatePurgeTable(src, src->wavebuf2);
				ndspChnWaveBufAdd(src->ch, src->wavebuf2);
				socketSend(Client, "exec2:0000");
				u32 processedBytes = 0;
				netSize = 0;
				while (netSize <= 0) heapRecv(Client, 2048);
				while (processedBytes < (src->mem_size / 2)){
					if (netSize <= 0){
							heapRecv(Client, 2048);
							continue;
					}
					if (strncmp((char*)netBuffer, "EOF", 3) == 0) break;
					memcpy(&streamCache[songPointer + processedBytes], netBuffer, netSize);
					processedBytes = processedBytes + netSize;
					heapRecv(Client, 2048);
				}
				memcpy(src->audiobuf, &streamCache[songPointer], src->mem_size);
				if (songPointer == 0) songPointer = src->mem_size / 2;
				else songPointer = 0;
				src->audio_pointer = src->audio_pointer + src->mem_size;
				
				// Changing endianess if Big Endian
				if (src->big_endian){
					u64 i = 0;
					while (i < src->mem_size){
						u8 tmp = src->audiobuf[i];
						src->audiobuf[i] = src->audiobuf[i+1];
						src->audiobuf[i+1] = tmp;
						i=i+2;	
					}
				}
			
			}
			
			// Check if a block playback is finished
			u32 curSample = ndspChnGetSamplePos(src->ch);
			if (src->lastCheck > curSample){
			
				// Prepare next block
				src->wavebuf = src->wavebuf2;
				src->wavebuf2 = NULL;
			
			}
			
			// Update sample position tick
			src->lastCheck = curSample;
		
		}
	}		
}
示例#8
0
//----------------------------------------------------------------------------
int main(int argc, char **argv) {
//----------------------------------------------------------------------------

	PrintConsole topScreen;
	ndspWaveBuf waveBuf[2];

	gfxInitDefault();

	consoleInit(GFX_TOP, &topScreen);

	consoleSelect(&topScreen);

	printf("libctru streaming audio\n");
	
	
	
	stb_vorbis_info info;
	int error;


      vorbisFile = stb_vorbis_open_filename("/mau5.ogg", &error, NULL);
      info = stb_vorbis_get_info(vorbisFile);
      Samples = info.sample_rate;
   
	

	u32 *audioBuffer = (u32*)linearAlloc(Samples*sizeof(s16)*2);

	bool fillBlock = false;

	ndspInit();

	ndspSetOutputMode(NDSP_OUTPUT_STEREO);

	ndspChnSetInterp(0, NDSP_INTERP_LINEAR);
	ndspChnSetRate(0, Samples);
	ndspChnSetFormat(0, NDSP_FORMAT_STEREO_PCM16);

	float mix[12];
	memset(mix, 0, sizeof(mix));
	mix[0] = 1.0;
	mix[1] = 1.0;
	ndspChnSetMix(0, mix);

	int note = 4;

	memset(waveBuf,0,sizeof(waveBuf));
	waveBuf[0].data_vaddr = &audioBuffer[0];
	waveBuf[0].nsamples = Samples;
	waveBuf[1].data_vaddr = &audioBuffer[Samples];
	waveBuf[1].nsamples = Samples;

	ndspChnWaveBufAdd(0, &waveBuf[0]);
	ndspChnWaveBufAdd(0, &waveBuf[1]);

	printf("Press up/down to change tone\n");

	
	
	
	
	
	
	while(aptMainLoop()) {

		gfxSwapBuffers();
		gfxFlushBuffers();
		gspWaitForVBlank();

		hidScanInput();
		u32 kDown = hidKeysDown();

		if (kDown & KEY_START)
			break; // break in order to return to hbmenu


		if (waveBuf[fillBlock].status == NDSP_WBUF_DONE) {

			fill_buffer(waveBuf[fillBlock].data_pcm16, waveBuf[fillBlock].nsamples);

			ndspChnWaveBufAdd(0, &waveBuf[fillBlock]);

			fillBlock = !fillBlock;
		}
	}

	ndspExit();

	linearFree(audioBuffer);

	gfxExit();
	return 0;
}