bool AudioOutputPortAudio::write() { DPTR_D(AudioOutputPortAudio); QMutexLocker lock(&d.mutex); Q_UNUSED(lock); if (!d.available) return false; if (Pa_IsStreamStopped(d.stream)) Pa_StartStream(d.stream); #if KNOW_WHY #ifndef Q_OS_MAC //? int diff = Pa_GetStreamWriteAvailable(d.stream) - d.outputLatency * d.sample_rate; if (diff > 0) { int newsize = diff * d.channels * sizeof(float); static char *a = new char[newsize]; memset(a, 0, newsize); Pa_WriteStream(d.stream, a, diff); } #endif #endif //KNOW_WHY PaError err = Pa_WriteStream(d.stream, d.data.constData(), d.data.size()/audioFormat().channels()/audioFormat().bytesPerSample()); if (err == paUnanticipatedHostError) { qWarning("Write portaudio stream error: %s", Pa_GetErrorText(err)); return false; } return true; }
int main(int argc, char *argv[]) { printf("avion test flight\n"); const int audioSize = 1024; Avion::AvionDecoder::AudioFormat audioFormat(true, Avion::AvionDecoder::PCM_32_FLOAT, 44100.0, 1024, true); Avion::AvionDecoder::VideoFormat videoFormat(true, Avion::AvionDecoder::RGBA, true); Avion::AvionDecoder* decoder = Avion::AvionDecoder::create("file:///Users/radar/Desktop/simian_mobile_disco-audacity_of_huge_(2009).mp4", audioFormat, videoFormat); //wrapper->seek(60); printf("decoder open: framerate=%f w=%d h=%d\n", decoder->getVideoFrameRate(), decoder->getVideoWidth(), decoder->getVideoHeight()); size_t size = decoder->getVideoWidth() * decoder->getVideoHeight() * 4; uint8_t* image = new uint8_t[size]; uint8_t samples[audioSize]; int error = 0; double pts = 0; while (error != -1) { error = decoder->decodeVideo(image, pts); printf("got video frame %f %d\n", pts, error); error = decoder->decodeAudio(samples, pts); printf("got audio frame %f %d\n", pts, error); usleep(1000000); } delete[] image; delete decoder; return 0; }
//TODO: call open after audio format changed? bool AudioOutputPortAudio::open() { DPTR_D(AudioOutputPortAudio); QMutexLocker lock(&d.mutex); Q_UNUSED(lock); d.outputParameters->sampleFormat = toPaSampleFormat(audioFormat().sampleFormat()); d.outputParameters->channelCount = audioFormat().channels(); PaError err = Pa_OpenStream(&d.stream, NULL, d.outputParameters, audioFormat().sampleRate(), 0, paNoFlag, NULL, NULL); if (err == paNoError) { d.outputLatency = Pa_GetStreamInfo(d.stream)->outputLatency; d.available = true; } else { qWarning("Open portaudio stream error: %s", Pa_GetErrorText(err)); d.available = false; } return err == paNoError; }
bool AudioOutputOpenSL::open() { DPTR_D(AudioOutputOpenSL); d.available = false; resetStatus(); SLDataLocator_BufferQueue bufferQueueLocator = { SL_DATALOCATOR_BUFFERQUEUE, (SLuint32)d.nb_buffers }; SLDataFormat_PCM pcmFormat = audioFormatToSL(audioFormat()); SLDataSource audioSrc = { &bufferQueueLocator, &pcmFormat }; // OutputMix SL_RUN_CHECK_FALSE((*d.engine)->CreateOutputMix(d.engine, &d.m_outputMixObject, 0, NULL, NULL)); SL_RUN_CHECK_FALSE((*d.m_outputMixObject)->Realize(d.m_outputMixObject, SL_BOOLEAN_FALSE)); SLDataLocator_OutputMix outputMixLocator = { SL_DATALOCATOR_OUTPUTMIX, d.m_outputMixObject }; SLDataSink audioSink = { &outputMixLocator, NULL }; const SLInterfaceID ids[] = { SL_IID_BUFFERQUEUE};//, SL_IID_VOLUME }; const SLboolean req[] = { SL_BOOLEAN_TRUE};//, SL_BOOLEAN_TRUE }; // AudioPlayer SL_RUN_CHECK_FALSE((*d.engine)->CreateAudioPlayer(d.engine, &d.m_playerObject, &audioSrc, &audioSink, sizeof(ids)/sizeof(ids[0]), ids, req)); SL_RUN_CHECK_FALSE((*d.m_playerObject)->Realize(d.m_playerObject, SL_BOOLEAN_FALSE)); // Buffer interface SL_RUN_CHECK_FALSE((*d.m_playerObject)->GetInterface(d.m_playerObject, SL_IID_BUFFERQUEUE, &d.m_bufferQueueItf)); SL_RUN_CHECK_FALSE((*d.m_bufferQueueItf)->RegisterCallback(d.m_bufferQueueItf, AudioOutputOpenSLPrivate::bufferQueueCallback, &d)); // Play interface SL_RUN_CHECK_FALSE((*d.m_playerObject)->GetInterface(d.m_playerObject, SL_IID_PLAY, &d.m_playItf)); // call when SL_PLAYSTATE_STOPPED SL_RUN_CHECK_FALSE((*d.m_playItf)->RegisterCallback(d.m_playItf, AudioOutputOpenSLPrivate::playCallback, this)); #if 0 SLuint32 mask = SL_PLAYEVENT_HEADATEND; // TODO: what does this do? SL_RUN_CHECK_FALSE((*d.m_playItf)->SetPositionUpdatePeriod(d.m_playItf, 100)); SL_RUN_CHECK_FALSE((*d.m_playItf)->SetCallbackEventsMask(d.m_playItf, mask)); #endif // Volume interface //SL_RUN_CHECK_FALSE((*d.m_playerObject)->GetInterface(d.m_playerObject, SL_IID_VOLUME, &d.m_volumeItf)); const int kBufferSize = 1024*4; static char init_data[kBufferSize]; memset(init_data, 0, sizeof(init_data)); for (quint32 i = 0; i < d.nb_buffers; ++i) { SL_RUN_CHECK_FALSE((*d.m_bufferQueueItf)->Enqueue(d.m_bufferQueueItf, init_data, sizeof(init_data))); d.nextEnqueueInfo().data_size = sizeof(init_data); d.nextEnqueueInfo().timestamp = 0; d.bufferAdded(); d.buffers_queued++; } SL_RUN_CHECK_FALSE((*d.m_playItf)->SetPlayState(d.m_playItf, SL_PLAYSTATE_PLAYING)); d.available = true; return true; }
bool AudioOutputPortAudio::write(const QByteArray& data) { DPTR_D(AudioOutputPortAudio); QMutexLocker lock(&d.mutex); Q_UNUSED(lock); if (!d.available) return false; if (Pa_IsStreamStopped(d.stream)) Pa_StartStream(d.stream); PaError err = Pa_WriteStream(d.stream, data.constData(), data.size()/audioFormat().channels()/audioFormat().bytesPerSample()); if (err == paUnanticipatedHostError) { qWarning("Write portaudio stream error: %s", Pa_GetErrorText(err)); return false; } return true; }
void RtspStreamWorker::openCodecs(AVFormatContext *context, AVDictionary *options) { for (unsigned int i = 0; i < context->nb_streams; i++) { qDebug() << "processing stream id " << i; AVStream *stream = context->streams[i]; bool codecOpened = openCodec(stream, options); if (!codecOpened) { qDebug() << "RtspStream: cannot find decoder for stream" << i << "codec" << stream->codec->codec_id; continue; } if (stream->codec->codec_type==AVMEDIA_TYPE_VIDEO) m_videoStreamIndex = i; if (stream->codec->codec_type==AVMEDIA_TYPE_AUDIO) m_audioStreamIndex = i; char info[512]; avcodec_string(info, sizeof(info), stream->codec, 0); qDebug() << "RtspStream: stream #" << i << ":" << info; } if (m_audioStreamIndex > -1) { qDebug() << "audio stream time base " << context->streams[m_audioStreamIndex]->codec->time_base.num << "/" << context->streams[m_audioStreamIndex]->codec->time_base.den; emit audioFormat(context->streams[m_audioStreamIndex]->codec->sample_fmt, context->streams[m_audioStreamIndex]->codec->channels, context->streams[m_audioStreamIndex]->codec->sample_rate); } qDebug() << "video stream index: " << m_videoStreamIndex; qDebug() << "audio steam index: " << m_audioStreamIndex; }
int chn = d.channels; if (chn == 6 || chn == 8) { float *audio_buffer = (float *)d.data.data(); int size_per_chn = d.data.size() >> 2; for (int i = 0 ; i < size_per_chn; i += chn) { float tmp = audio_buffer[i+2]; audio_buffer[i+2] = audio_buffer[i+4]; audio_buffer[i+4] = tmp; tmp = audio_buffer[i+3]; audio_buffer[i+3] = audio_buffer[i+5]; audio_buffer[i+5] = tmp; } } #endif #endif //KNOW_WHY PaError err = Pa_WriteStream(d.stream, d.data.data(), d.data.size()/audioFormat().channels()/audioFormat().bytesPerSample()); if (err == paUnanticipatedHostError) { qWarning("Write portaudio stream error: %s", Pa_GetErrorText(err)); return false; } return true; } //TODO: what about planar, int8, int24 etc that FFmpeg or Pa not support? static int toPaSampleFormat(AudioFormat::SampleFormat format) { switch (format) { case AudioFormat::SampleFormat_Unsigned8: return paUInt8; case AudioFormat::SampleFormat_Signed16: return paInt16; case AudioFormat::SampleFormat_Signed32: