Beispiel #1
0
	//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
	inline audio create_audio(audio_format aform) {
		audio a;
		switch(aform) {
		case audio_format::NONE:
			break;
		case audio_format::PCM8_MONO:
			a = audio(new audio_mno8);
			break;
		case audio_format::PCM8_STEREO:
			a = audio(new audio_sto8);
			break;
		case audio_format::PCM16_MONO:
			a = audio(new audio_mno16);
			break;
		case audio_format::PCM16_STEREO:
			a = audio(new audio_sto16);
			break;
		case audio_format::PCM24_MONO:
			a = audio(new audio_mno24);
			break;
		case audio_format::PCM24_STEREO:
			a = audio(new audio_sto24);
			break;
		case audio_format::PCM32_MONO:
			a = audio(new audio_mno32);
			break;
		case audio_format::PCM32_STEREO:
			a = audio(new audio_sto32);
			break;
		}
		return a;
	}
Beispiel #2
0
int main(int argc, char **argv) {
    glutInit(&argc, argv);						// initialize GLUT
    glutInitWindowSize(screenWidth, screenHeight);				// startup window size 
    glutInitWindowPosition(100, 100);           // where to put window on screen
    glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH);    // 8 bit R,G,B,A + double buffer + depth buffer
    
    glutCreateWindow("OpenGL teapots");				// application window is created and displayed
    
    glViewport(0, 0, screenWidth, screenHeight);
    
    glutDisplayFunc(onDisplay);					// register callback
    glutIdleFunc(onIdle);						// register callback
    glutKeyboardFunc(onKeyboard);
    glutKeyboardUpFunc(onKeyboardUp);
    for(int i=0; i<256; i++)
        keysPressed.push_back(false);
    glutMouseFunc(onMouse);
    glutMotionFunc(onMouseMotion);
    glutReshapeFunc(onReshape);
    
    glEnable(GL_LIGHTING);
    glEnable(GL_DEPTH_TEST);
    glEnable(GL_NORMALIZE);
    
    std::thread audio (playAudio);
    
    scene.initialize();
    
    glutMainLoop();								// launch event handling loop
    
    killAudio();
    
    return 0;
}
Beispiel #3
0
static int mainInternal(QApplication &a)
{
#ifdef USING_GLES2
	emugl = new MainUI();
	emugl->resize(pixel_xres, pixel_yres);
	emugl->showFullScreen();
#endif
#ifdef __SYMBIAN32__
	// Set RunFast hardware mode for VFPv2.
	User::SetFloatingPointMode(EFpModeRunFast);
	// Disable screensaver
	QScopedPointer<QSystemScreenSaver> ssObject(new QSystemScreenSaver(emugl));
	ssObject->setScreenSaverInhibit();
	QScopedPointer<SymbianMediaKeys> mediakeys(new SymbianMediaKeys());
#endif

	QScopedPointer<QThread> thread(new QThread);
	QScopedPointer<MainAudio> audio(new MainAudio());
	audio->moveToThread(thread.data());
	QObject::connect(thread.data(), SIGNAL(started()), audio.data(), SLOT(run()));
	thread->start();

#ifdef QT_HAS_SDL
	SDLJoystick joy(true);
	joy.startEventLoop();
#endif
	int ret = a.exec();
	thread->quit();
	return ret;
}
static AudioDataValue*
CopyAndPackAudio(AVFrame* aFrame, uint32_t aNumChannels, uint32_t aNumSamples)
{
  // These are the only two valid AAC packet sizes.
  NS_ASSERTION(aNumSamples == 960 || aNumSamples == 1024,
               "Should have exactly one AAC audio packet.");
  MOZ_ASSERT(aNumChannels <= MAX_CHANNELS);

  nsAutoArrayPtr<AudioDataValue> audio(
    new AudioDataValue[aNumChannels * aNumSamples]);

  AudioDataValue** data = reinterpret_cast<AudioDataValue**>(aFrame->data);

  if (aFrame->format == AV_SAMPLE_FMT_FLT) {
    // Audio data already packed. No need to do anything other than copy it
    // into a buffer we own.
    memcpy(audio, data[0], aNumChannels * aNumSamples * sizeof(AudioDataValue));
  } else if (aFrame->format == AV_SAMPLE_FMT_FLTP) {
    // Planar audio data. Pack it into something we can understand.
    for (uint32_t channel = 0; channel < aNumChannels; channel++) {
      for (uint32_t sample = 0; sample < aNumSamples; sample++) {
        audio[sample * aNumChannels + channel] = data[channel][sample];
      }
    }
  }

  return audio.forget();
}
Beispiel #5
0
Q_DECL_EXPORT
#endif
int main(int argc, char *argv[])
{
#if defined(Q_OS_LINUX) && !defined(MAEMO)
	QApplication::setAttribute(Qt::AA_X11InitThreads, true);
#endif
	QApplication a(argc, argv);
	QSize res = QApplication::desktop()->screenGeometry().size();
	if (res.width() < res.height())
		res.transpose();
	pixel_xres = res.width();
	pixel_yres = res.height();
	g_dpi_scale = CalculateDPIScale();
	dp_xres = (int)(pixel_xres * g_dpi_scale); dp_yres = (int)(pixel_yres * g_dpi_scale);
	net::Init();
#ifdef __SYMBIAN32__
	const char *savegame_dir = "E:/PPSSPP/";
	const char *assets_dir = "E:/PPSSPP/";
#elif defined(BLACKBERRY)
	const char *savegame_dir = "/accounts/1000/shared/misc/";
	const char *assets_dir = "app/native/assets/";
#elif defined(MEEGO_EDITION_HARMATTAN) || defined(MAEMO)
	const char *savegame_dir = "/home/user/MyDocs/PPSSPP/";
	const char *assets_dir = "/opt/PPSSPP/";
#else
	const char *savegame_dir = "./";
	const char *assets_dir = "./";
#endif
	NativeInit(argc, (const char **)argv, savegame_dir, assets_dir, "BADCOFFEE");
#ifdef USING_GLES2
	emugl = new MainUI();
	emugl->resize(pixel_xres, pixel_yres);
	emugl->showFullScreen();
#endif
#ifdef __SYMBIAN32__
	// Set RunFast hardware mode for VFPv2.
	User::SetFloatingPointMode(EFpModeRunFast);
	// Disable screensaver
	QScopedPointer<QSystemScreenSaver> ssObject(new QSystemScreenSaver(emugl));
	ssObject->setScreenSaverInhibit();
	QScopedPointer<SymbianMediaKeys> mediakeys(new SymbianMediaKeys());
#endif

	QScopedPointer<QThread> thread(new QThread);
	QScopedPointer<MainAudio> audio(new MainAudio());
	audio->moveToThread(thread.data());
	QObject::connect(thread.data(), SIGNAL(started()), audio.data(), SLOT(run()));
	thread->start();

#ifdef QT_HAS_SDL
	SDLJoystick joy(true);
	joy.startEventLoop();
#endif
	int ret = a.exec();
	thread->quit();
	NativeShutdown();
	net::Shutdown();
	return ret;
}
Beispiel #6
0
void emitPacket(AudioEncoder* enc, AVPacket& opacket)
{
    auto sampleFmt = av_get_sample_fmt(enc->oparams.sampleFmt.c_str());
    assert(av_sample_fmt_is_planar(sampleFmt) == 0 &&
           "planar formats not supported");

    // enc->time = enc->frame->pkt_pts > 0 ?
    // static_cast<std::int64_t>(enc->frame->pkt_pts *
    // av_q2d(enc->stream->time_base) * 1000) : 0;
    // enc->pts = enc->frame->pkt_pts;

    // Compute stream time in milliseconds
    if (enc->stream && opacket.pts >= 0) {
        enc->time = static_cast<std::int64_t>(
            opacket.pts * av_q2d(enc->stream->time_base) * 1000);
    }
    enc->pts = opacket.pts;

    assert(opacket.data);
    assert(opacket.size);
    // assert(opacket.pts >= 0);
    // assert(opacket.dts >= 0);

    AudioPacket audio(opacket.data, opacket.size, enc->outputFrameSize,
                      enc->time);
    audio.source = &opacket;
    audio.opaque = enc;

    enc->emitter.emit(/*enc, */ audio);
}
Beispiel #7
0
//-------------------------------------------------------------------------
// 									MAIN
//-------------------------------------------------------------------------
int main(int argc, char* argv[])
{
    cout << "Libfaust version : " << getCLibFaustVersion () << endl;

    string error_msg;
    dsp_factory* factory = createDSPFactoryFromFile(argv[argc-1], 0, nullptr, "", error_msg, -1);
    if (factory == 0) {
        std::cerr << "Unable to crate Faust DSP factory" << std::endl;
        exit(1);
    }
    
    dsp* DSP = factory->createDSPInstance();
    if (DSP == 0) {
        std::cerr << "Unable to allocate Faust DSP object" << std::endl;
        exit(1);
    }

    OssiaUI ossia{1234, 5678};
    DSP->buildUserInterface(&ossia);

    portaudio audio(44100, 256);
    audio.init("FaustDSP", DSP);
    audio.start();

    ossia.run(50);
    audio.stop();

    return 0;
}
int RTPEndpoint::RunAudio()
{
        BYTE lost;
        AudioCodec::Type codec;
        DWORD timestamp;
        RTPPacket audio(MediaFrame::Audio,0,0);

        while(inited)
        {
		DWORD len = audio.GetMaxMediaLength();
                //Get the packet
                if (!RTPSession::GetAudioPacket(audio.GetMediaData(), &len ,&lost, &codec, &timestamp))
			//Next
			continue;
		//Set length
		audio.SetMediaLength(len);
		//Set codec
		audio.SetCodec(codec);
		audio.SetType(codec);
		//Set timestamp
		audio.SetTimestamp(timestamp);
		//Multiplex
		Multiplex(audio);
        }

        return 1;
}
Beispiel #9
0
static int mainInternal(QApplication &a)
{
#ifdef MOBILE_DEVICE
	emugl = new MainUI();
	emugl->resize(pixel_xres, pixel_yres);
	emugl->showFullScreen();
#endif
#ifdef __SYMBIAN32__
	// Set RunFast hardware mode for VFPv2.
	User::SetFloatingPointMode(EFpModeRunFast);
	// Disable screensaver
	QScopedPointer<QSystemScreenSaver> ssObject(new QSystemScreenSaver(emugl));
	ssObject->setScreenSaverInhibit();
	QScopedPointer<SymbianMediaKeys> mediakeys(new SymbianMediaKeys());
#endif

#ifdef QT_HAS_SDL
	SDLJoystick joy(true);
	joy.startEventLoop();
	SDL_Init(SDL_INIT_AUDIO);
	SDL_AudioSpec fmt, ret_fmt;
	memset(&fmt, 0, sizeof(fmt));
	fmt.freq = 44100;
	fmt.format = AUDIO_S16;
	fmt.channels = 2;
	fmt.samples = 2048;
	fmt.callback = &mixaudio;
	fmt.userdata = (void *)0;

	if (SDL_OpenAudio(&fmt, &ret_fmt) < 0) {
		ELOG("Failed to open audio: %s", SDL_GetError());
	} else {
		if (ret_fmt.samples != fmt.samples) // Notify, but still use it
			ELOG("Output audio samples: %d (requested: %d)", ret_fmt.samples, fmt.samples);
		if (ret_fmt.freq != fmt.freq || ret_fmt.format != fmt.format || ret_fmt.channels != fmt.channels) {
			ELOG("Sound buffer format does not match requested format.");
			ELOG("Output audio freq: %d (requested: %d)", ret_fmt.freq, fmt.freq);
			ELOG("Output audio format: %d (requested: %d)", ret_fmt.format, fmt.format);
			ELOG("Output audio channels: %d (requested: %d)", ret_fmt.channels, fmt.channels);
			ELOG("Provided output format does not match requirement, turning audio off");
			SDL_CloseAudio();
		}
	}

	// Audio must be unpaused _after_ NativeInit()
	SDL_PauseAudio(0);
#else
	QScopedPointer<QThread> thread(new QThread);
	QScopedPointer<MainAudio> audio(new MainAudio());
	audio->moveToThread(thread.data());
	QObject::connect(thread.data(), SIGNAL(started()), audio.data(), SLOT(run()));
	thread->start();
#endif
	int ret = a.exec();
#ifndef QT_HAS_SDL
	thread->quit();
#endif
	return ret;
}
Beispiel #10
0
void MPlayer::vol_down()
{
    QtAV::AudioOutput *ao = audio();

    if (ao && ao->isAvailable()) {
        qreal v = ao->volume();
#ifdef QT_NO_DEBUG_OUTPUT

        if (v <= 0.0) {
            return;
        }

#endif
        v -= 0.02;
        ao->setVolume(v);
        qDebug("vol = %.3f", audio()->volume());
    }
}
Beispiel #11
0
QVariant MusicModel::data(const QModelIndex &index, int role) const
{
    if (role == Qt::DisplayRole)
    {
        QString audio(files[index.row()].artist + QString(" ") + files[index.row()].title);
        return QVariant(audio);
    }
    return QVariant();
}
Beispiel #12
0
void onKeyboard(unsigned char key, int x, int y) {
//    if (key == ']') {
//        playAudio();
//    }
    if (key == '[') {
        std::thread audio (killAudio);
    }
    
    keysPressed.at(key) = true;
}
void test_profile_create(){
  HTTPClientSession s(HOST, PORT);
  HTTPRequest request(HTTPRequest::HTTP_POST, "/api/v1/profile/");
  //std::ostringstream os;

  
  JSONNode base(JSON_NODE);
  base.push_back(JSONNode("name","test profile"));
  base.push_back(JSONNode("description","test description"));

  JSONNode format(JSON_NODE);
  format.set_name("format");
  format.push_back(JSONNode("id","mp4"));
  base.push_back(format);

  
  JSONNode video(JSON_NODE);
  video.set_name("video");
  //video.push_back(JSONNode("id","mpeg4"));
  video.push_back(JSONNode("id","libx264"));
  video.push_back(JSONNode("qscale","4"));
  video.push_back(JSONNode("aubq","8"));
  video.push_back(JSONNode("me_range","16"));
  video.push_back(JSONNode("qmin","4"));
  video.push_back(JSONNode("qmax","51"));
  video.push_back(JSONNode("qcomp","0.6"));
  video.push_back(JSONNode("qdiff","4"));

  base.push_back(video);
  
  JSONNode audio(JSON_NODE);
  audio.set_name("audio");
  audio.push_back(JSONNode("id","mp2"));
  audio.push_back(JSONNode("ar","44100"));
  audio.push_back(JSONNode("ac","2"));

  base.push_back(audio);

  //request.write(os);
  std::ostream & os=s.sendRequest(request);
  os << base.write_formatted();
  HTTPResponse response;
  std::istream& rs = s.receiveResponse(response);


  std::string data;
  StreamCopier::copyToString(rs, data);
  LOGDEBUG("response:"<<data);
  JSONNode node = getJson(data);
  assert_response(node);
  assert(node.contains("uuid"));
  created_profile_uuid=node["uuid"].as_string();
}
Beispiel #14
0
QStringList RGBAlgorithm::algorithms(const Doc * doc)
{
    QStringList list;
    RGBText text(doc);
    RGBImage image(doc);
    RGBAudio audio(doc);
    list << text.name();
    list << image.name();
    list << audio.name();
    list << RGBScript::scriptNames(doc);
    return list;
}
Beispiel #15
0
void
SeekTask::OnAudioDecoded(MediaData* aAudioSample)
{
  AssertOwnerThread();
  RefPtr<MediaData> audio(aAudioSample);
  MOZ_ASSERT(audio);

  // The MDSM::mDecodedAudioEndTime will be updated once the whole SeekTask is
  // resolved.

  SAMPLE_LOG("OnAudioDecoded [%lld,%lld] disc=%d",
             (audio ? audio->mTime : -1),
             (audio ? audio->GetEndTime() : -1),
             (audio ? audio->mDiscontinuity : 0));

  if (!Exists()) {
    // We've received a sample from a previous decode. Discard it.
    return;
  }

  if (audio->mDiscontinuity) {
    mDropAudioUntilNextDiscontinuity = false;
  }

  if (!mDropAudioUntilNextDiscontinuity) {
    // We must be after the discontinuity; we're receiving samples
    // at or after the seek target.
    if (mSeekJob.mTarget.IsFast() &&
        mSeekJob.mTarget.GetTime().ToMicroseconds() > mCurrentTimeBeforeSeek &&
        audio->mTime < mCurrentTimeBeforeSeek) {
      // We are doing a fastSeek, but we ended up *before* the previous
      // playback position. This is surprising UX, so switch to an accurate
      // seek and decode to the seek target. This is not conformant to the
      // spec, fastSeek should always be fast, but until we get the time to
      // change all Readers to seek to the keyframe after the currentTime
      // in this case, we'll just decode forward. Bug 1026330.
      mSeekJob.mTarget.SetType(SeekTarget::Accurate);
    }
    if (mSeekJob.mTarget.IsFast()) {
      // Non-precise seek; we can stop the seek at the first sample.
      mSeekedAudioData = audio;
    } else {
      // We're doing an accurate seek. We must discard
      // MediaData up to the one containing exact seek target.
      if (NS_FAILED(DropAudioUpToSeekTarget(audio.get()))) {
        RejectIfExist(__func__);
        return;
      }
    }
  }
  CheckIfSeekComplete();
}
Beispiel #16
0
static int mainInternal(QApplication &a)
{
#ifdef MOBILE_DEVICE
	emugl = new MainUI();
	emugl->resize(pixel_xres, pixel_yres);
	emugl->showFullScreen();
#endif
	EnableFZ();
	// Disable screensaver
#ifdef __SYMBIAN32__
	QSystemScreenSaver ssObject(emugl);
	ssObject.setScreenSaverInhibit();
	QScopedPointer<SymbianMediaKeys> mediakeys(new SymbianMediaKeys());
#elif defined(QT_HAS_SYSTEMINFO)
	QScreenSaver ssObject(emugl);
	ssObject.setScreenSaverEnabled(false);
#endif

#ifdef SDL
	SDLJoystick joy(true);
	joy.startEventLoop();
	SDL_Init(SDL_INIT_AUDIO);
	SDL_AudioSpec fmt, ret_fmt;
	memset(&fmt, 0, sizeof(fmt));
	fmt.freq = 44100;
	fmt.format = AUDIO_S16;
	fmt.channels = 2;
	fmt.samples = 2048;
	fmt.callback = &mixaudio;
	fmt.userdata = (void *)0;

	if (SDL_OpenAudio(&fmt, &ret_fmt) < 0) {
		ELOG("Failed to open audio: %s", SDL_GetError());
	} else {
		if (ret_fmt.samples != fmt.samples) // Notify, but still use it
			ELOG("Output audio samples: %d (requested: %d)", ret_fmt.samples, fmt.samples);
		if (ret_fmt.freq != fmt.freq || ret_fmt.format != fmt.format || ret_fmt.channels != fmt.channels) {
			ELOG("Sound buffer format does not match requested format.");
			ELOG("Output audio freq: %d (requested: %d)", ret_fmt.freq, fmt.freq);
			ELOG("Output audio format: %d (requested: %d)", ret_fmt.format, fmt.format);
			ELOG("Output audio channels: %d (requested: %d)", ret_fmt.channels, fmt.channels);
			ELOG("Provided output format does not match requirement, turning audio off");
			SDL_CloseAudio();
		}
	}
	SDL_PauseAudio(0);
#else
	QScopedPointer<MainAudio> audio(new MainAudio());
	audio->run();
#endif
	return a.exec();
}
  nsresult Output(BufferInfo::Param aInfo, void* aBuffer,
                  MediaFormat::Param aFormat, const TimeUnit& aDuration)
  {
    // The output on Android is always 16-bit signed
    nsresult rv;
    int32_t numChannels;
    NS_ENSURE_SUCCESS(rv =
        aFormat->GetInteger(NS_LITERAL_STRING("channel-count"), &numChannels), rv);
    AudioConfig::ChannelLayout layout(numChannels);
    if (!layout.IsValid()) {
      return NS_ERROR_FAILURE;
    }

    int32_t sampleRate;
    NS_ENSURE_SUCCESS(rv =
        aFormat->GetInteger(NS_LITERAL_STRING("sample-rate"), &sampleRate), rv);

    int32_t size;
    NS_ENSURE_SUCCESS(rv = aInfo->Size(&size), rv);

    int32_t offset;
    NS_ENSURE_SUCCESS(rv = aInfo->Offset(&offset), rv);

#ifdef MOZ_SAMPLE_TYPE_S16
    const int32_t numSamples = size / 2;
#else
#error We only support 16-bit integer PCM
#endif

    const int32_t numFrames = numSamples / numChannels;
    AlignedAudioBuffer audio(numSamples);
    if (!audio) {
      return NS_ERROR_OUT_OF_MEMORY;
    }

    const uint8_t* bufferStart = static_cast<uint8_t*>(aBuffer) + offset;
    PodCopy(audio.get(), reinterpret_cast<const AudioDataValue*>(bufferStart),
            numSamples);

    int64_t presentationTimeUs;
    NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);

    RefPtr<AudioData> data = new AudioData(0, presentationTimeUs,
                                           aDuration.ToMicroseconds(),
                                           numFrames,
                                           Move(audio),
                                           numChannels,
                                           sampleRate);
    INVOKE_CALLBACK(Output, data);
    return NS_OK;
  }
int snd_Start(int frameCount, int samplesPerSec, int stereo, int semaIndex)
{
    Synchronized<es::Monitor*> method(monitorPlayState);

    if (!gSoundOutput)
    {
        return false;
    }

    int bytesPerFrame;
    int bufferBytes;

    bytesPerFrame = stereo ? 2 * BYTES_PER_SAMPLE : BYTES_PER_SAMPLE;
    bufferBytes   = ((frameCount * bytesPerFrame) / 8) * 8;

    if (playState.open)
    {
        // still open from last time; clean up before continuing
        snd_Stop();
    }

    int chan = (stereo ? 2 : 1);

    Handle<es::AudioFormat> audio(gSoundOutput);
    audio->setBitsPerSample(16);
    audio->setChannels(chan);
    audio->setSamplingRate(samplesPerSec);

    playState.bufSizeInBytes = bufferBytes * 2;
    playState.buffer = new u8[playState.bufSizeInBytes];
    if (!playState.buffer)
    {
        return false;
    }
    memset(playState.buffer, 0, playState.bufSizeInBytes);

    playState.open           = false;  // set to true if successful
    playState.stereo         = stereo;
    playState.frameCount     = bufferBytes / bytesPerFrame;
    playState.sampleRate     = samplesPerSec;
    playState.lastFlipTime   = ioMicroMSecs();
    playState.playSemaIndex  = semaIndex;
    playState.playing        = true;
    playState.head = playState.tail = playState.buffer;
    playState.done           = false;
    playState.open = true;
    monitorPlayState->notifyAll();

    return true;
}
Beispiel #19
0
RGBAlgorithm* RGBAlgorithm::algorithm(const Doc * doc, const QString& name)
{
    RGBText text(doc);
    RGBImage image(doc);
    RGBAudio audio(doc);
    if (name == text.name())
        return text.clone();
    else if (name == image.name())
        return image.clone();
    else if (name == audio.name())
        return audio.clone();
    else
        return RGBScript::script(doc, name).clone();
}
Beispiel #20
0
void configDialog::fillSamplerateBox()
{
    SamplerateBox->clear();

    RtAudio::Api api=ApiBox->itemData(ApiBox->currentIndex()).value<RtAudio::Api>();
    RtAudio audio(api);
    const int device=SoundcardBox->currentIndex();
    if (device < 0 )
        return;

    RtAudio::DeviceInfo info=audio.getDeviceInfo(device);
    for (std::vector<unsigned int>::iterator it=info.sampleRates.begin(); it!=info.sampleRates.end(); ++it)
    {
        SamplerateBox->addItem(QString::number(*it));
    }
}
Beispiel #21
0
        void AudioOutput::setVolume(qreal newVolume)
        {
            for(int i = 0; i < FILTER_COUNT; ++i) {
                ComPointer<IBasicAudio> audio(m_filters[i], IID_IBasicAudio);
                if (audio) {
                    const qreal currentVolume = newVolume * (m_currentIndex == i ? m_crossfadeProgress : 1-m_crossfadeProgress);
                    const qreal newDbVolume = (qMax(0., 1.-::log(::pow(currentVolume, -log10over20)))-1.) * 10000;
                    audio->put_Volume(qRound(newDbVolume));
                }
            }

            if (m_volume != newVolume) {
                m_volume = newVolume;
                emit volumeChanged(newVolume);
            }
        }
static AlignedAudioBuffer
CopyAndPackAudio(AVFrame* aFrame, uint32_t aNumChannels, uint32_t aNumAFrames)
{
  MOZ_ASSERT(aNumChannels <= MAX_CHANNELS);

  AlignedAudioBuffer audio(aNumChannels * aNumAFrames);
  if (!audio) {
    return audio;
  }

  if (aFrame->format == AV_SAMPLE_FMT_FLT) {
    // Audio data already packed. No need to do anything other than copy it
    // into a buffer we own.
    memcpy(audio.get(), aFrame->data[0],
           aNumChannels * aNumAFrames * sizeof(AudioDataValue));
  } else if (aFrame->format == AV_SAMPLE_FMT_FLTP) {
    // Planar audio data. Pack it into something we can understand.
    AudioDataValue* tmp = audio.get();
    AudioDataValue** data = reinterpret_cast<AudioDataValue**>(aFrame->data);
    for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
      for (uint32_t channel = 0; channel < aNumChannels; channel++) {
        *tmp++ = data[channel][frame];
      }
    }
  } else if (aFrame->format == AV_SAMPLE_FMT_S16) {
    // Audio data already packed. Need to convert from S16 to 32 bits Float
    AudioDataValue* tmp = audio.get();
    int16_t* data = reinterpret_cast<int16_t**>(aFrame->data)[0];
    for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
      for (uint32_t channel = 0; channel < aNumChannels; channel++) {
        *tmp++ = AudioSampleToFloat(*data++);
      }
    }
  } else if (aFrame->format == AV_SAMPLE_FMT_S16P) {
    // Planar audio data. Convert it from S16 to 32 bits float
    // and pack it into something we can understand.
    AudioDataValue* tmp = audio.get();
    int16_t** data = reinterpret_cast<int16_t**>(aFrame->data);
    for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
      for (uint32_t channel = 0; channel < aNumChannels; channel++) {
        *tmp++ = AudioSampleToFloat(data[channel][frame]);
      }
    }
  }

  return audio;
}
Beispiel #23
0
//-------------------------------------------------------------------------
// 									MAIN
//-------------------------------------------------------------------------
int main(int argc, char *argv[] )
{
	char* appname = basename (argv [0]);
    char  rcfilename[256];
	char* home = getenv("HOME");
	snprintf(rcfilename, 255, "%s/.%src", home, appname);
	
	DSP = new mydsp();
	if (DSP==0) {
        std::cerr << "Unable to allocate Faust DSP object" << std::endl;
		exit(1);
	}

	GUI* interface = new QTGUI(argc, argv);
	FUI* finterface	= new FUI();
	DSP->buildUserInterface(interface);
	DSP->buildUserInterface(finterface);

#ifdef HTTPCTRL
	httpdUI*	httpdinterface = new httpdUI(appname, argc, argv);
	DSP->buildUserInterface(httpdinterface);
    std::cout << "HTTPD is on" << std::endl;
#endif

#ifdef OSCCTRL
	GUI* oscinterface = new OSCUI(appname, argc, argv);
	DSP->buildUserInterface(oscinterface);
#endif

	alsaaudio audio (argc, argv, DSP);
	audio.init(appname, DSP);
	finterface->recallState(rcfilename);	
	audio.start();
	
#ifdef HTTPCTRL
	httpdinterface->run();
#endif
	
#ifdef OSCCTRL
	oscinterface->run();
#endif
	interface->run();
	
	audio.stop();
	finterface->saveState(rcfilename);
  	return 0;
}
void
AccurateSeekTask::OnAudioDecoded(MediaData* aAudioSample)
{
  AssertOwnerThread();
  MOZ_ASSERT(!mSeekTaskPromise.IsEmpty(), "Seek shouldn't be finished");

  RefPtr<MediaData> audio(aAudioSample);
  MOZ_ASSERT(audio);

  // The MDSM::mDecodedAudioEndTime will be updated once the whole SeekTask is
  // resolved.

  SAMPLE_LOG("OnAudioDecoded [%lld,%lld] disc=%d",
    audio->mTime, audio->GetEndTime(), audio->mDiscontinuity);

  // Video-only seek doesn't reset audio decoder. There might be pending audio
  // requests when AccurateSeekTask::Seek() begins. We will just store the data
  // without checking |mDiscontinuity| or calling DropAudioUpToSeekTarget().
  if (mTarget.IsVideoOnly()) {
    mSeekedAudioData = audio.forget();
    return;
  }

  if (mFirstAudioSample) {
    mFirstAudioSample = false;
    MOZ_ASSERT(audio->mDiscontinuity);
  }

  AdjustFastSeekIfNeeded(audio);

  if (mTarget.IsFast()) {
    // Non-precise seek; we can stop the seek at the first sample.
    mSeekedAudioData = audio;
    mDoneAudioSeeking = true;
  } else if (NS_FAILED(DropAudioUpToSeekTarget(audio))) {
    CancelCallbacks();
    RejectIfExist(__func__);
    return;
  }

  if (!mDoneAudioSeeking) {
    RequestAudioData();
    return;
  }
  MaybeFinishSeek();
}
void
FFmpegAACDecoder<LIBAV_VER>::DecodePacket(MP4Sample* aSample)
{
  AVPacket packet;
  av_init_packet(&packet);

  aSample->Pad(FF_INPUT_BUFFER_PADDING_SIZE);
  packet.data = aSample->data;
  packet.size = aSample->size;
  packet.pos = aSample->byte_offset;

  if (!PrepareFrame()) {
    NS_WARNING("FFmpeg audio decoder failed to allocate frame.");
    mCallback->Error();
    return;
  }

  int decoded;
  int bytesConsumed =
    avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet);

  if (bytesConsumed < 0 || !decoded) {
    NS_WARNING("FFmpeg audio decoder error.");
    mCallback->Error();
    return;
  }

  NS_ASSERTION(bytesConsumed == (int)aSample->size,
               "Only one audio packet should be received at a time.");

  uint32_t numChannels = mCodecContext->channels;

  nsAutoArrayPtr<AudioDataValue> audio(
    CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples));

  nsAutoPtr<AudioData> data(
    new AudioData(packet.pos, aSample->composition_timestamp, aSample->duration,
                  mFrame->nb_samples, audio.forget(), numChannels));

  mCallback->Output(data.forget());

  if (mTaskQueue->IsEmpty()) {
    mCallback->InputExhausted();
  }
}
Beispiel #26
0
void configDialog::fillSoundcardBox()
{
    SoundcardBox->clear();

    RtAudio::Api api=ApiBox->itemData(ApiBox->currentIndex()).value<RtAudio::Api>();
    RtAudio audio(api);
    const unsigned int devices=audio.getDeviceCount();
    for (unsigned int i=0; i<devices; i++)
    {
        RtAudio::DeviceInfo info=audio.getDeviceInfo(i);
        // Check if it supports float32 output
        if (info.probed
                && (info.nativeFormats & RTAUDIO_FLOAT32)
                && (info.outputChannels > 0))
            SoundcardBox->addItem(QString::fromStdString(info.name));
            // TODO: maybe we should cache supported samplerates here
    }
}
Beispiel #27
0
uint32_t
AudioSink::PlayFromAudioQueue()
{
  AssertOnAudioThread();
  NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
  nsRefPtr<AudioData> audio(AudioQueue().PopFront());

  SINK_LOG_V("playing %u frames of audio at time %lld",
             audio->mFrames, audio->mTime);
  mAudioStream->Write(audio->mAudioData, audio->mFrames);

  StartAudioStreamPlaybackIfNeeded();

  if (audio->mOffset != -1) {
    mStateMachine->DispatchOnPlaybackOffsetUpdate(audio->mOffset);
  }
  return audio->mFrames;
}
Beispiel #28
0
QUrl saveAudioTrack(const QByteArray &result, const QUrl& source)
{
	if (result.isEmpty())
	{
		return QUrl("");
	}

	QString uri = QString(AUDIO_TRACK_PATH).arg(source.toString().section('/', -1));
	qDebug() << uri;

	QFile audio(uri);
	if (audio.open(QIODevice::WriteOnly))
	{
		audio.write(result);
		audio.close();
	}
	return QUrl(uri);
}
void test_profile_update(){
  HTTPClientSession s(HOST, PORT);
  HTTPRequest request(HTTPRequest::HTTP_PUT, std::string("/api/v1/profile/").append(created_profile_uuid));
  JSONNode base(JSON_NODE);
  base.push_back(JSONNode("name","test profile"));
  base.push_back(JSONNode("description","test description"));

  JSONNode format(JSON_NODE);
  format.set_name("format");
  format.push_back(JSONNode("id","matroska"));
  base.push_back(format);

  
  JSONNode video(JSON_NODE);
  video.set_name("video");
  video.push_back(JSONNode("id","mpeg4"));

  base.push_back(video);
  
  JSONNode audio(JSON_NODE);
  audio.set_name("audio");
  audio.push_back(JSONNode("id","mp2"));
  audio.push_back(JSONNode("ar","44100"));
  audio.push_back(JSONNode("ac","2"));

  base.push_back(audio);

  //request.write(os);
  std::ostream & os=s.sendRequest(request);
  os << base.write_formatted();
  HTTPResponse response;
  std::istream& rs = s.receiveResponse(response);


  std::string data;
  StreamCopier::copyToString(rs, data);
  LOGDEBUG("response:"<<data);
  JSONNode node = getJson(data);
  assert_response(node);
  assert(node.contains("uuid"));
  assert(node["uuid"]==created_profile_uuid);
}
Beispiel #30
0
RGBAlgorithm* RGBAlgorithm::loader(const Doc * doc, const QDomElement& root)
{
    RGBAlgorithm* algo = NULL;

    if (root.tagName() != KXMLQLCRGBAlgorithm)
    {
        qWarning() << Q_FUNC_INFO << "RGB Algorithm node not found";
        return NULL;
    }

    QString type = root.attribute(KXMLQLCRGBAlgorithmType);
    if (type == KXMLQLCRGBImage)
    {
        RGBImage image(doc);
        if (image.loadXML(root) == true)
            algo = image.clone();
    }
    else if (type == KXMLQLCRGBText)
    {
        RGBText text(doc);
        if (text.loadXML(root) == true)
            algo = text.clone();
    }
    else if (type == KXMLQLCRGBAudio)
    {
        RGBAudio audio(doc);
        if (audio.loadXML(root) == true)
            algo = audio.clone();
    }
    else if (type == KXMLQLCRGBScript)
    {
        RGBScript scr = RGBScript::script(doc, root.text());
        if (scr.apiVersion() > 0 && scr.name().isEmpty() == false)
            algo = scr.clone();
    }
    else
    {
        qWarning() << "Unrecognized RGB algorithm type:" << type;
    }

    return algo;
}