Пример #1
0
int RtInOut( void* outputBuffer, void* inputBuffer, unsigned int framesPerBuffer, 
			double streamTime, RtAudioStreamStatus status, void *userdata )
{
	if(!UNTZ::System::get()->getData()->isActive())
	{
		memset(outputBuffer, 0, sizeof(float) * framesPerBuffer * UNTZ::System::get()->getData()->getNumOutputChannels());
		return 0;
	}

	if(status)
		std::cout << "Stream underflow detected!" << std::endl;	
	AudioMixer *mixer = (AudioMixer*)userdata;
	mixer->process(0, NULL, UNTZ::System::get()->getData()->getNumOutputChannels(), (float*)outputBuffer, framesPerBuffer);
	
    // volume & clipping
    // HBS
    UInt32 samples = UNTZ::System::get()->getData()->getNumOutputChannels() * framesPerBuffer;
	float volume = mixer->getVolume();
    // TODO: doing an extra read/write here is painful...
    float *outB = (float*)outputBuffer;
	for (UInt32 k = 0; k < samples; ++k)
    {
        float val = *outB * volume;
        val = val > 1.0 ? 1.0 : val;
        val = val < -1.0 ? -1.0 : val;
        *(outB)++ = val;
    }

	return 0;
}
Пример #2
0
    static void EmptyAudioCallback(void * userdata, unsigned char * stream, int length)
    {
        SDL_memset(stream, 0, length);

        AudioMixer * mixer = (class AudioMixer *)userdata;
        if (mixer)
        {
            mixer->Mix(stream, length);
        }
    }
void CSgSelectionPreparationBar::OnReleasedcaptureAudioSlider(NMHDR* pNMHDR, LRESULT* pResult) 
{
    if (m_pAvGrabber) {
        
        AudioMixer *pAudioMixer = m_pAvGrabber->getAudioMixer();
        if (pAudioMixer) {
            UINT nCode = pNMHDR->code;
            pAudioMixer->setVolume(100 - m_sldAudioInputLevel.GetPos());
        }
    }

    *pResult = 0;
}
Пример #4
0
/***********************************
* startMixingAudio
*	Crea el thread de mezclado de audio
************************************/
void *AudioMixer::startMixingAudio(void *par)
{
	Log("-MixAudioThread [%d]\n",getpid());

	//Obtenemos el parametro
	AudioMixer *am = (AudioMixer *)par;

	//Bloqueamos las se�ales
	blocksignals();
	
	//Ejecutamos
	pthread_exit((void *)am->MixAudio());
}
Пример #5
0
//DWORD WINAPI AudioMixer::mixerEventThread(LPVOID param)
void __cdecl AudioMixer::mixerEventThread(LPVOID param)
{
   CLanguageSupport::SetThreadLanguage();

   AudioMixer* mixer = (AudioMixer*) param;
   mixer->mixerEventThreadExited_ = false;

   while (!mixer->eventThreadQuitRequest_)
   {
      WaitForSingleObject(mixer->mixerEvent_, INFINITE);
      ResetEvent(mixer->mixerEvent_);

      if (!mixer->eventThreadQuitRequest_ && mixer->isEnabled())
         (*(mixer->lpMixerCallback_)) (mixer->mixerHwnd_, 0, 0, 0);
   }

   mixer->mixerEventThreadExited_ = true;

   cout << "+-+-+-+-+-+-+-+-+-+ priscilla has followed" << endl;
   return; // 0;
}
Пример #6
0
int RtInOut( void* outputBuffer, void* inputBuffer, unsigned int framesPerBuffer, 
			double streamTime, RtAudioStreamStatus status, void *userdata )
{
    LinuxSystemData *sysData = (LinuxSystemData *)userdata;
    UInt32 numOutputChannels = UNTZ::System::get()->getData()->getNumOutputChannels();
    UInt32 samples = numOutputChannels * framesPerBuffer;

	if(!UNTZ::System::get()->getData()->isActive())
	{
		memset(outputBuffer, 0, sizeof(float) * samples);
		return 0;
	}

    if(sysData->mOutputBuffer.size() < samples)
    {
        sysData->mOutputBuffer.resize(samples);
    }
    float *mixerOutputBuffer = (float*)&sysData->mOutputBuffer[0];
    
	if(status)
		std::cout << "Stream underflow detected!" << std::endl;	
	AudioMixer *mixer = &sysData->mMixer;
	mixer->process(0, NULL, numOutputChannels, mixerOutputBuffer, framesPerBuffer);
	
    // volume & clipping & interleaving
	float volume = mixer->getVolume();
    float *outB = (float*)outputBuffer;
    for(UInt32 i=0; i<framesPerBuffer; i++)
    {
        for(UInt32 j=0; j<numOutputChannels; j++)
        {
			float val = volume * mixerOutputBuffer[j*framesPerBuffer+i];
            val = val > 1.0 ? 1.0 : val;
            val = val < -1.0 ? -1.0 : val;
            *(outB)++ = val;
        }
    }    

	return 0;
}
void CSgSelectionPreparationBar::OnUpdateAudioSlider(CCmdUI *pCmdUI)
{
    if (m_pAvGrabber && 
        m_pAvGrabber->getAudioMixer() && 
        m_pAvGrabber->getAudioMixer()->getSourceCount() > 0)
        pCmdUI->Enable(true);
    else
        pCmdUI->Enable(false);

    if (m_pAvGrabber && GetKeyState(VK_LBUTTON) >= 0) // left mouse not key pressed)
    {
        AudioMixer *pAudioMixer = m_pAvGrabber->getAudioMixer();
        if (pAudioMixer) {
            UINT nMixerVolume = pAudioMixer->getVolume();
            if (nMixerVolume < 0)
                nMixerVolume = 0;

            if (nMixerVolume > 100)
                nMixerVolume = 100;

            m_sldAudioInputLevel.SetPos(100 - nMixerVolume);
        }
    }
}
Пример #8
0
int main(int argc, char* argv[]) {
    const char* const progname = argv[0];
    bool useInputFloat = false;
    bool useMixerFloat = false;
    bool useRamp = true;
    uint32_t outputSampleRate = 48000;
    uint32_t outputChannels = 2; // stereo for now
    std::vector<int> Pvalues;
    const char* outputFilename = NULL;
    const char* auxFilename = NULL;
    std::vector<int32_t> names;
    std::vector<SignalProvider> providers;
    std::vector<audio_format_t> formats;

    for (int ch; (ch = getopt(argc, argv, "fmc:s:o:a:P:")) != -1;) {
        switch (ch) {
        case 'f':
            useInputFloat = true;
            break;
        case 'm':
            useMixerFloat = true;
            break;
        case 'c':
            outputChannels = atoi(optarg);
            break;
        case 's':
            outputSampleRate = atoi(optarg);
            break;
        case 'o':
            outputFilename = optarg;
            break;
        case 'a':
            auxFilename = optarg;
            break;
        case 'P':
            if (parseCSV(optarg, Pvalues) < 0) {
                fprintf(stderr, "incorrect syntax for -P option\n");
                return EXIT_FAILURE;
            }
            break;
        case '?':
        default:
            usage(progname);
            return EXIT_FAILURE;
        }
    }
    argc -= optind;
    argv += optind;

    if (argc == 0) {
        usage(progname);
        return EXIT_FAILURE;
    }

    size_t outputFrames = 0;

    // create providers for each track
    names.resize(argc);
    providers.resize(argc);
    formats.resize(argc);
    for (int i = 0; i < argc; ++i) {
        static const char chirp[] = "chirp:";
        static const char sine[] = "sine:";
        static const double kSeconds = 1;
        bool useFloat = useInputFloat;

        if (!strncmp(argv[i], chirp, strlen(chirp))) {
            std::vector<int> v;
            const char *s = parseFormat(argv[i] + strlen(chirp), &useFloat);

            parseCSV(s, v);
            if (v.size() == 2) {
                printf("creating chirp(%d %d)\n", v[0], v[1]);
                if (useFloat) {
                    providers[i].setChirp<float>(v[0], 0, v[1]/2, v[1], kSeconds);
                    formats[i] = AUDIO_FORMAT_PCM_FLOAT;
                } else {
                    providers[i].setChirp<int16_t>(v[0], 0, v[1]/2, v[1], kSeconds);
                    formats[i] = AUDIO_FORMAT_PCM_16_BIT;
                }
                providers[i].setIncr(Pvalues);
            } else {
                fprintf(stderr, "malformed input '%s'\n", argv[i]);
            }
        } else if (!strncmp(argv[i], sine, strlen(sine))) {
            std::vector<int> v;
            const char *s = parseFormat(argv[i] + strlen(sine), &useFloat);

            parseCSV(s, v);
            if (v.size() == 3) {
                printf("creating sine(%d %d %d)\n", v[0], v[1], v[2]);
                if (useFloat) {
                    providers[i].setSine<float>(v[0], v[1], v[2], kSeconds);
                    formats[i] = AUDIO_FORMAT_PCM_FLOAT;
                } else {
                    providers[i].setSine<int16_t>(v[0], v[1], v[2], kSeconds);
                    formats[i] = AUDIO_FORMAT_PCM_16_BIT;
                }
                providers[i].setIncr(Pvalues);
            } else {
                fprintf(stderr, "malformed input '%s'\n", argv[i]);
            }
        } else {
            printf("creating filename(%s)\n", argv[i]);
            if (useInputFloat) {
                providers[i].setFile<float>(argv[i]);
                formats[i] = AUDIO_FORMAT_PCM_FLOAT;
            } else {
                providers[i].setFile<short>(argv[i]);
                formats[i] = AUDIO_FORMAT_PCM_16_BIT;
            }
            providers[i].setIncr(Pvalues);
        }
        // calculate the number of output frames
        size_t nframes = (int64_t) providers[i].getNumFrames() * outputSampleRate
                / providers[i].getSampleRate();
        if (i == 0 || outputFrames > nframes) { // choose minimum for outputFrames
            outputFrames = nframes;
        }
    }

    // create the output buffer.
    const size_t outputFrameSize = outputChannels
            * (useMixerFloat ? sizeof(float) : sizeof(int16_t));
    const size_t outputSize = outputFrames * outputFrameSize;
    const audio_channel_mask_t outputChannelMask =
            audio_channel_out_mask_from_count(outputChannels);
    void *outputAddr = NULL;
    (void) posix_memalign(&outputAddr, 32, outputSize);
    memset(outputAddr, 0, outputSize);

    // create the aux buffer, if needed.
    const size_t auxFrameSize = sizeof(int32_t); // Q4.27 always
    const size_t auxSize = outputFrames * auxFrameSize;
    void *auxAddr = NULL;
    if (auxFilename) {
        (void) posix_memalign(&auxAddr, 32, auxSize);
        memset(auxAddr, 0, auxSize);
    }

    // create the mixer.
    const size_t mixerFrameCount = 320; // typical numbers may range from 240 or 960
    AudioMixer *mixer = new AudioMixer(mixerFrameCount, outputSampleRate);
    audio_format_t mixerFormat = useMixerFloat
            ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
    float f = AudioMixer::UNITY_GAIN_FLOAT / providers.size(); // normalize volume by # tracks
    static float f0; // zero

    // set up the tracks.
    for (size_t i = 0; i < providers.size(); ++i) {
        //printf("track %d out of %d\n", i, providers.size());
        uint32_t channelMask = audio_channel_out_mask_from_count(providers[i].getNumChannels());
        const int name = i;
        const status_t status = mixer->create(
                name, channelMask, formats[i], AUDIO_SESSION_OUTPUT_MIX);
        LOG_ALWAYS_FATAL_IF(status != OK);
        names[i] = name;
        mixer->setBufferProvider(name, &providers[i]);
        mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
                (void *)outputAddr);
        mixer->setParameter(
                name,
                AudioMixer::TRACK,
                AudioMixer::MIXER_FORMAT,
                (void *)(uintptr_t)mixerFormat);
        mixer->setParameter(
                name,
                AudioMixer::TRACK,
                AudioMixer::FORMAT,
                (void *)(uintptr_t)formats[i]);
        mixer->setParameter(
                name,
                AudioMixer::TRACK,
                AudioMixer::MIXER_CHANNEL_MASK,
                (void *)(uintptr_t)outputChannelMask);
        mixer->setParameter(
                name,
                AudioMixer::TRACK,
                AudioMixer::CHANNEL_MASK,
                (void *)(uintptr_t)channelMask);
        mixer->setParameter(
                name,
                AudioMixer::RESAMPLE,
                AudioMixer::SAMPLE_RATE,
                (void *)(uintptr_t)providers[i].getSampleRate());
        if (useRamp) {
            mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f0);
            mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f0);
            mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME0, &f);
            mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME1, &f);
        } else {
            mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f);
            mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f);
        }
        if (auxFilename) {
            mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::AUX_BUFFER,
                    (void *) auxAddr);
            mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::AUXLEVEL, &f0);
            mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::AUXLEVEL, &f);
        }
        mixer->enable(name);
    }

    // pump the mixer to process data.
    size_t i;
    for (i = 0; i < outputFrames - mixerFrameCount; i += mixerFrameCount) {
        for (size_t j = 0; j < names.size(); ++j) {
            mixer->setParameter(names[j], AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
                    (char *) outputAddr + i * outputFrameSize);
            if (auxFilename) {
                mixer->setParameter(names[j], AudioMixer::TRACK, AudioMixer::AUX_BUFFER,
                        (char *) auxAddr + i * auxFrameSize);
            }
        }
        mixer->process();
    }
    outputFrames = i; // reset output frames to the data actually produced.

    // write to files
    writeFile(outputFilename, outputAddr,
            outputSampleRate, outputChannels, outputFrames, useMixerFloat);
    if (auxFilename) {
        // Aux buffer is always in q4_27 format for O and earlier.
        // memcpy_to_i16_from_q4_27((int16_t*)auxAddr, (const int32_t*)auxAddr, outputFrames);
        // Aux buffer is always in float format for P.
        memcpy_to_i16_from_float((int16_t*)auxAddr, (const float*)auxAddr, outputFrames);
        writeFile(auxFilename, auxAddr, outputSampleRate, 1, outputFrames, false);
    }

    delete mixer;
    free(outputAddr);
    free(auxAddr);
    return EXIT_SUCCESS;
}
Пример #9
0
//mix two wav file to a wav file
void test_mixwave()
{
    InitMediaCommon(NULL, NULL, NULL, NULL);
    
    CAudioCodec* pAudioEncoder = CreateAudioCodec(kAudioCodecFDKAAC, kEncoder);
    if(NULL == pAudioEncoder)
    {
        printf("create audio decoder failed. codec type: kAudioCodecFDKAAC.\r\n");
        return;
    }
    AudioCodecParam audioFormat;
    audioFormat.iSampleRate = 44100;
    audioFormat.iBitsOfSample=16;
    audioFormat.iNumOfChannels = 2;
    audioFormat.iQuality = 5;
    audioFormat.iProfile = 29;
    audioFormat.iHaveAdts = 1;

    audioFormat.ExtParam.iUsevbr = true;
    audioFormat.ExtParam.iUsedtx = true;
    audioFormat.ExtParam.iCvbr = true;
    audioFormat.ExtParam.iUseInbandfec = true;
    audioFormat.ExtParam.iPacketlossperc = 25;
    audioFormat.ExtParam.iComplexity = 8;
    audioFormat.ExtParam.iFrameDuration = 20*10;

    if(NULL == pAudioEncoder->Init(&audioFormat))
    {
        printf("init audio decoder failed.\r\n"); 
        return;
    }
    int iEncOutSize=0;
    pAudioEncoder->CalcBufSize(&iEncOutSize, 0);
    unsigned char* pAudioEncBuf = (unsigned char*)malloc(iEncOutSize);
       

    char* pRtmpUrl = (char*)"rtmp://101.201.146.134/hulu/w_test.flv";
    int ret = 0;
    int iAudioFramePos=0;
    u_int32_t timestamp=0;    

    IWriter* pRtmpWriter = CreateWriter(pRtmpUrl);
    pRtmpWriter->Open(pRtmpUrl, NULL);

    int input_size = 2*2*2048;

    AudioStreamFormat asf;
    asf.flag = 0;
    asf.sampleRate = 44100;
    asf.sampleBits= 16;
    asf.channelNum = 2;
    int iAudioTS = 0;
    double iAudioFrameBufLen = (double)asf.sampleRate * asf.channelNum * asf.sampleBits /8/1000;
    double dAudioFrameTSLen = (double)input_size/iAudioFrameBufLen;

    AudioMixer* pAMix =  AudioMixer::CreateAudioMixer(asf, 2); 

    //open wav file
    int format, sample_rate, channels, bits_per_sample;
    
    uint8_t* input_buf_1   = (uint8_t*) malloc(input_size);
    int16_t* convert_buf_1 = (int16_t*) malloc(input_size);
    uint8_t* input_buf_2   = (uint8_t*) malloc(input_size);
    int16_t* convert_buf_2 = (int16_t*) malloc(input_size);
    uint8_t* output_buf    = (uint8_t*) malloc(input_size);

    AudioMixer::AudioDataInfo adInfo[2];
    

    const char* pwavfilename1="input1.wav";
    const char* pwavfilename2="input2.wav";
    
    void *wav1= wav_read_open(pwavfilename1);
    void *wav2= wav_read_open(pwavfilename2);
    FILE *pfOutWav = fopen("output.pcm", "wb");;

 
    if (NULL != wav1) 
    {
        printf("open wav file %s ok\n", pwavfilename1);
        if (!wav_get_header(wav1, &format, &channels, &sample_rate, &bits_per_sample, NULL)) {
            return ;
        }
        if (format != 1 || bits_per_sample != 16) {
            printf("Unsupported WAV format %d\n", format);
            return ;
        }
    }    
    
    if (NULL != wav2) 
    {
        printf("open wav file %s ok\n", pwavfilename1);
        if (!wav_get_header(wav2, &format, &channels, &sample_rate, &bits_per_sample, NULL)) {
            return ;
        }
        if (format != 1 || bits_per_sample != 16) {
            printf("Unsupported WAV format %d\n", format);
            return ;
        }
    } 

    int read1 = 0;
    int read2 = 0;
    if(wav1)
    {
        //this data is for offset
        read1 = wav_read_data(wav1, input_buf_1, input_size);
        read1 = wav_read_data(wav1, input_buf_1, input_size);
        read1 = wav_read_data(wav1, input_buf_1, input_size);
        read1 = wav_read_data(wav1, input_buf_1, input_size);
        read1 = wav_read_data(wav1, input_buf_1, input_size);
    }

    struct timeval tv1;
    struct timeval tv2;

    //read wav file and mix wav file
    while (1) {
        if(wav1)
            read1 = wav_read_data(wav1, input_buf_1, input_size);
        if(wav1 && read1 <= 0)
        {
            break;
        }
        else if(read1 <=0 ){
            memset(input_buf_1,0,input_size);
        }

/*
        //not use is ok
        for (int i = 0; i < read1/2; i++) {
            const uint8_t* in = &input_buf_1[2*i];
            convert_buf_1[i] = in[0] | (in[1] << 8);
        }
*/

        if(wav2)
            read2 = wav_read_data(wav2, input_buf_2, input_size);
        if(wav2 && read2 <= 0)
            break;
        else if(read2 <=0 ){
            memset(input_buf_2,0,input_size);
        }
/*
        //not use is ok
        for (int i = 0; i < read2/2; i++) {
            const uint8_t* in = &input_buf_2[2*i];
            convert_buf_2[i] = in[0] | (in[1] << 8);
        }
*/

        adInfo[0]._bufferSize = adInfo[0]._leftLength = input_size;
        adInfo[0]._leftData = input_buf_1;
        adInfo[0]._enabled = true;

        adInfo[1]._bufferSize = adInfo[1]._leftLength = input_size;
        adInfo[1]._leftData = input_buf_2;
        adInfo[1]._enabled = true;
        int packetLen = channels*bits_per_sample/8;
        int packnum = input_size/packetLen;

        gettimeofday(&tv1, NULL); 
        pAMix->MixData(output_buf, packnum, packetLen, 0, adInfo, 2);
        gettimeofday(&tv2, NULL);
        
        printf("mix wav file len:%d used:%dus.\r\n", input_size, ((tv2.tv_sec*1000*1000+tv2.tv_usec)-(tv1.tv_sec*1000*1000+tv1.tv_usec))); 
        //printf("mix data read1:%d read2:%d input_size:%d\r\n", read1, read2, input_size);
    
        //write output wav file
   //   fwrite(output_buf, 1, input_size, pfOutWav);


        //encoder to fdkaac
        for (int i = 0; i < input_size/2; i++) {
            const uint8_t* in = &output_buf[2*i];
            convert_buf_1[i] = in[0] | (in[1] << 8);
        }

        int iAudioEncBufLen = iEncOutSize;
        pAudioEncoder->Process((unsigned char*)convert_buf_1, input_size, pAudioEncBuf, &iAudioEncBufLen);

        //send it to rtmp server
        timestamp = iAudioFramePos * dAudioFrameTSLen;
        pushaudio(pRtmpWriter, &timestamp, (char*)convert_buf_1, iAudioEncBufLen); 
        printf("push audio framePos:%d  wavlen:%d timestamp:%d enclen:%d  frametslen:%f\r\n", iAudioFramePos, input_size, timestamp, iAudioEncBufLen, dAudioFrameTSLen);
        iAudioFramePos++; 
        usleep(40*1000);    
    }

    if(wav1)
        wav_read_close(wav1); 
    if(wav2)
        wav_read_close(wav2); 
    if(pfOutWav)
        fclose(pfOutWav);


}
Пример #10
0
void
AudioSegment::WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aOutputChannels, uint32_t aSampleRate)
{
  nsAutoTArray<AudioDataValue,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> buf;
  nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channelData;
  // Offset in the buffer that will end up sent to the AudioStream, in samples.
  uint32_t offset = 0;

  if (GetDuration() <= 0) {
    MOZ_ASSERT(GetDuration() == 0);
    return;
  }

  uint32_t outBufferLength = GetDuration() * aOutputChannels;
  buf.SetLength(outBufferLength);


  for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
    AudioChunk& c = *ci;
    uint32_t frames = c.mDuration;

    // If we have written data in the past, or we have real (non-silent) data
    // to write, we can proceed. Otherwise, it means we just started the
    // AudioStream, and we don't have real data to write to it (just silence).
    // To avoid overbuffering in the AudioStream, we simply drop the silence,
    // here. The stream will underrun and output silence anyways.
    if (c.mBuffer && c.mBufferFormat != AUDIO_FORMAT_SILENCE) {
      channelData.SetLength(c.mChannelData.Length());
      for (uint32_t i = 0; i < channelData.Length(); ++i) {
        channelData[i] = c.mChannelData[i];
      }
      if (channelData.Length() < aOutputChannels) {
        // Up-mix. Note that this might actually make channelData have more
        // than aOutputChannels temporarily.
        AudioChannelsUpMix(&channelData, aOutputChannels, gZeroChannel);
      }
      if (channelData.Length() > aOutputChannels) {
        // Down-mix.
        DownmixAndInterleave(channelData, c.mBufferFormat, frames,
                             c.mVolume, aOutputChannels, buf.Elements() + offset);
      } else {
        InterleaveAndConvertBuffer(channelData.Elements(), c.mBufferFormat,
                                   frames, c.mVolume,
                                   aOutputChannels,
                                   buf.Elements() + offset);
      }
    } else {
      // Assumes that a bit pattern of zeroes == 0.0f
      memset(buf.Elements() + offset, 0, aOutputChannels * frames * sizeof(AudioDataValue));
    }

    offset += frames * aOutputChannels;

#if !defined(MOZILLA_XPCOMRT_API)
    if (!c.mTimeStamp.IsNull()) {
      TimeStamp now = TimeStamp::Now();
      // would be more efficient to c.mTimeStamp to ms on create time then pass here
      LogTime(AsyncLatencyLogger::AudioMediaStreamTrack, aID,
              (now - c.mTimeStamp).ToMilliseconds(), c.mTimeStamp);
    }
#endif // !defined(MOZILLA_XPCOMRT_API)
  }

  if (offset) {
    aMixer.Mix(buf.Elements(), aOutputChannels, offset / aOutputChannels, aSampleRate);
  }
}
Пример #11
0
void init_audio_synth()
{
    AudioOutput::Buffer buf;
    buf.dataSize = BUFFER_SIZE * CHANNEL_NUM * sizeof(int16_t);
    buf.data = (uint8_t *)&g_outBuf[0][0];
    g_audioOut.add_buffer(&buf);
    buf.data = (uint8_t *)&g_outBuf[1][0];
    g_audioOut.add_buffer(&buf);

    g_audioOut.set_source(&g_audioOutConverter);
    AudioBuffer audioBuf(&g_audioBuf[0], BUFFER_SIZE);
    g_audioOutConverter.set_buffer(audioBuf);
    g_audioOutConverter.set_source(&g_mixer);

    g_kickSeq.set_sample_rate(kSampleRate);
    g_kickSeq.set_tempo(100.0f);
//     g_kickSeq.set_sequence("x---x---x---x-x-x---x---x---x---xx--x--x--xxx-x-");
    g_kickSeq.set_sequence("x---------x---------"); //x-x----xx---");
    g_kickSeq.init();

    g_kickGen.set_sample_rate(kSampleRate);
    g_kickGen.set_sequence(&g_kickSeq);
    g_kickGen.set_freq(50.0f);
    g_kickGen.enable_sustain(false);
    g_kickGen.init();
    g_kickGen.set_attack(0.01f);
    g_kickGen.set_release(0.6f);

    g_bassSeq.set_sample_rate(kSampleRate);
    g_bassSeq.set_tempo(100.0f);
    g_bassSeq.set_sequence("--s>>>>p--------"); //"--s>>>p-----s>>>>>>p----");
    g_bassSeq.init();

    g_bassGen.set_sample_rate(kSampleRate);
    g_bassGen.set_sequence(&g_bassSeq);
    g_bassGen.set_freq(80.0f);
    g_bassGen.enable_sustain(true);
    g_bassGen.init();
    g_bassGen.set_attack(0.3f);
    g_bassGen.set_release(3.0f);

    g_filter.set_sample_rate(kSampleRate);
    g_filter.set_frequency(120.0f);
    g_filter.set_q(0.4f);
    g_filter.recompute_coefficients();
    g_filter.set_input(&g_bassGen);

    g_delay.set_sample_rate(kSampleRate);
    g_delay.set_maximum_delay_seconds(0.4f);
    g_delay.set_delay_samples(g_kickSeq.get_samples_per_beat());
    g_delay.set_feedback(0.7f);
    g_delay.set_wet_mix(0.5f);
    g_delay.set_dry_mix(0.8f);
    g_delay.set_input(&g_kickGen);

    AudioBuffer mixBuf(&g_mixBuf[0], BUFFER_SIZE);
    g_mixer.set_buffer(mixBuf);
    g_mixer.set_input_count(2);
    g_mixer.set_input(0, &g_delay, 0.5f);
    g_mixer.set_input(1, &g_bassGen, 0.34f);
//     g_mixer.set_input(2, &g_tickGen, 0.3f);
}