IAudioSource* cAudioManager::createFromMemory(const char* name, const char* data, size_t length, const char* extension) { if(!Initialized) return NULL; cAudioMutexBasicLock lock(Mutex); cAudioString audioName = safeCStr(name); cAudioString ext = safeCStr(extension); IAudioDecoderFactory* factory = getAudioDecoderFactory(ext.c_str()); if(!factory) { getLogger()->logError("AudioManager", "Failed to create Audio Source (%s): Codec (.%s) is not supported.", audioName.c_str(), ext.c_str()); return NULL; } cMemorySource* source = CAUDIO_NEW cMemorySource(data, length, true); if(source && source->isValid()) { IAudioDecoder* decoder = factory->CreateAudioDecoder(source); source->drop(); IAudioSource* audio = createAudioSource(decoder, audioName, "cMemorySource"); if(audio != NULL) return audio; if(source) source->drop(); } return NULL; }
IAudioSource* cAudioManager::createFromRaw(const char* name, const char* data, size_t length, unsigned int frequency, AudioFormats format) { if(!Initialized) return NULL; cAudioMutexBasicLock lock(Mutex); cAudioString audioName = safeCStr(name); IAudioDecoderFactory* factory = getAudioDecoderFactory("raw"); if(!factory) { getLogger()->logError("AudioManager", "Failed to create Audio Source (%s): Codec (.raw) is not supported.", audioName.c_str()); return NULL; } cMemorySource* source = CAUDIO_NEW cMemorySource(data, length, true); if(source && source->isValid()) { IAudioDecoder* decoder = ((cRawAudioDecoderFactory*)factory)->CreateAudioDecoder(source, frequency, format); source->drop(); IAudioSource* audio = createAudioSource(decoder, audioName, "cMemorySource"); if(audio != NULL) return audio; if(source) source->drop(); } return NULL; }
status_t StagefrightRecorder::startRTPRecording() { CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_RTP_AVP); if ((mAudioSource != AUDIO_SOURCE_LIST_END && mVideoSource != VIDEO_SOURCE_LIST_END) || (mAudioSource == AUDIO_SOURCE_LIST_END && mVideoSource == VIDEO_SOURCE_LIST_END)) { // Must have exactly one source. return BAD_VALUE; } if (mOutputFd < 0) { return BAD_VALUE; } sp<MediaSource> source; if (mAudioSource != AUDIO_SOURCE_LIST_END) { source = createAudioSource(); } else { status_t err = setupVideoEncoder(&source); if (err != OK) { return err; } } mWriter = new ARTPWriter(dup(mOutputFd)); mWriter->addSource(source); mWriter->setListener(mListener); return mWriter->start(); }
status_t StagefrightRecorder::startAMRRecording() { CHECK(mOutputFormat == OUTPUT_FORMAT_AMR_NB || mOutputFormat == OUTPUT_FORMAT_AMR_WB); if (mOutputFormat == OUTPUT_FORMAT_AMR_NB) { if (mAudioEncoder != AUDIO_ENCODER_DEFAULT && mAudioEncoder != AUDIO_ENCODER_AMR_NB) { LOGE("Invalid encoder %d used for AMRNB recording", mAudioEncoder); return BAD_VALUE; } if (mSampleRate != 8000) { LOGE("Invalid sampling rate %d used for AMRNB recording", mSampleRate); return BAD_VALUE; } } else { // mOutputFormat must be OUTPUT_FORMAT_AMR_WB if (mAudioEncoder != AUDIO_ENCODER_AMR_WB) { LOGE("Invlaid encoder %d used for AMRWB recording", mAudioEncoder); return BAD_VALUE; } if (mSampleRate != 16000) { LOGE("Invalid sample rate %d used for AMRWB recording", mSampleRate); return BAD_VALUE; } } if (mAudioChannels != 1) { LOGE("Invalid number of audio channels %d used for amr recording", mAudioChannels); return BAD_VALUE; } if (mAudioSource >= AUDIO_SOURCE_LIST_END) { LOGE("Invalid audio source: %d", mAudioSource); return BAD_VALUE; } sp<MediaSource> audioEncoder = createAudioSource(); if (audioEncoder == NULL) { return UNKNOWN_ERROR; } mWriter = new AMRWriter(dup(mOutputFd)); mWriter->addSource(audioEncoder); if (mMaxFileDurationUs != 0) { mWriter->setMaxFileDuration(mMaxFileDurationUs); } if (mMaxFileSizeBytes != 0) { mWriter->setMaxFileSize(mMaxFileSizeBytes); } mWriter->setListener(mListener); mWriter->start(); return OK; }
status_t StagefrightRecorder::setupAudioEncoder(const sp<MediaWriter>& writer) { sp<MediaSource> audioEncoder; switch(mAudioEncoder) { case AUDIO_ENCODER_AMR_NB: case AUDIO_ENCODER_AMR_WB: case AUDIO_ENCODER_AAC: audioEncoder = createAudioSource(); break; default: LOGE("Unsupported audio encoder: %d", mAudioEncoder); return UNKNOWN_ERROR; } if (audioEncoder == NULL) { return UNKNOWN_ERROR; } writer->addSource(audioEncoder); return OK; }
IAudioSource* cAudioManager::create(const char* name, const char* filename, bool stream) { if(!Initialized) return NULL; cAudioMutexBasicLock lock(Mutex); cAudioString audioName = safeCStr(name); cAudioString path = safeCStr(filename); cAudioString ext = getExt(path); IAudioDecoderFactory* factory = getAudioDecoderFactory(ext.c_str()); if(!factory) { getLogger()->logError("AudioManager", "Failed to create Audio Source (%s): No decoder could be found for (.%s).", audioName.c_str(), ext.c_str()); return NULL; } for(size_t i=0; i<dataSourcePriorityList.size(); ++i) { const cAudioString dataSourceName = dataSourcePriorityList[i].second; IDataSourceFactory* dataFactory = datasourcemap[dataSourceName]; if(dataFactory) { IDataSource* source = dataFactory->CreateDataSource(filename, stream); if(source && source->isValid()) { IAudioDecoder* decoder = factory->CreateAudioDecoder(source); source->drop(); IAudioSource* audio = createAudioSource(decoder, audioName, dataSourceName); if(audio != NULL) return audio; if(source) source->drop(); return NULL; } } } return NULL; }
void setupDarwinStreaming(UsageEnvironment& env, WISInput& inputDevice) { // Create a 'Darwin injector' object: injector = DarwinInjector::createNew(env, applicationName); // For RTCP: const unsigned maxCNAMElen = 100; unsigned char CNAME[maxCNAMElen + 1]; gethostname((char *) CNAME, maxCNAMElen); CNAME[maxCNAMElen] = '\0'; // just in case /******************audio***********************/ if (audioFormat != AFMT_NONE) { // Create the audio source: sourceAudio = createAudioSource(env, inputDevice.audioSource()); if (packageFormat != PFMT_TRANSPORT_STREAM) { // there's a separate RTP stream for audio // Create 'groupsocks' for RTP and RTCP. // (Note: Because we will actually be streaming through a remote Darwin server, // via TCP, we just use dummy destination addresses, port numbers, and TTLs here.) struct in_addr dummyDestAddress; dummyDestAddress.s_addr = 0; rtpGroupsockAudio = new Groupsock(env, dummyDestAddress, 0, 0); rtcpGroupsockAudio = new Groupsock(env, dummyDestAddress, 0, 0); // Create a RTP sink for the audio stream: sinkAudio = createAudioRTPSink(env, rtpGroupsockAudio); // Create (and start) a 'RTCP instance' for this RTP sink: unsigned totalSessionBandwidthAudio = (audioOutputBitrate+500)/1000; // in kbps; for RTCP b/w share rtcpAudio = RTCPInstance::createNew(env, rtcpGroupsockAudio, totalSessionBandwidthAudio, CNAME, sinkAudio, NULL /* we're a server */); // Note: This starts RTCP running automatically // Add these to our 'Darwin injector': injector->addStream(sinkAudio, rtcpAudio); } } /******************end audio***********************/ /******************video***********************/ if (videoFormat != VFMT_NONE) { // Create the video source: if (packageFormat == PFMT_TRANSPORT_STREAM) { MPEG2TransportStreamFromESSource* tsSource = MPEG2TransportStreamFromESSource::createNew(env); tsSource->addNewVideoSource(inputDevice.videoSource(), 2); if (sourceAudio != NULL) tsSource->addNewAudioSource(sourceAudio, 2); // Gather the Transport packets into network packet-sized chunks: sourceVideo = MPEG2TransportStreamAccumulator::createNew(env, tsSource); sourceAudio = NULL; } else { switch (videoFormat) { case VFMT_NONE: // not used break; case VFMT_MJPEG: { sourceVideo = WISJPEGStreamSource::createNew(inputDevice.videoSource()); break; } case VFMT_MPEG1: case VFMT_MPEG2: { sourceVideo = MPEG1or2VideoStreamDiscreteFramer::createNew(env, inputDevice.videoSource()); break; } case VFMT_MPEG4: { sourceVideo = MPEG4VideoStreamDiscreteFramer::createNew(env, inputDevice.videoSource()); break; } } } // Create 'groupsocks' for RTP and RTCP. // (Note: Because we will actually be streaming through a remote Darwin server, // via TCP, we just use dummy destination addresses, port numbers, and TTLs here.) struct in_addr dummyDestAddress; dummyDestAddress.s_addr = 0; rtpGroupsockVideo = new Groupsock(env, dummyDestAddress, 0, 0); rtcpGroupsockVideo = new Groupsock(env, dummyDestAddress, 0, 0); // Create a RTP sink for the video stream: unsigned char payloadFormatCode = 97; // if dynamic setVideoRTPSinkBufferSize(); if (packageFormat == PFMT_TRANSPORT_STREAM) { sinkVideo = SimpleRTPSink::createNew(env, rtpGroupsockVideo, 33, 90000, "video", "mp2t", 1, True, False/*no 'M' bit*/); } else { switch (videoFormat) { case VFMT_NONE: // not used break; case VFMT_MJPEG: { sinkVideo = JPEGVideoRTPSink::createNew(env, rtpGroupsockVideo); break; } case VFMT_MPEG1: case VFMT_MPEG2: { sinkVideo = MPEG1or2VideoRTPSink::createNew(env, rtpGroupsockVideo); break; } case VFMT_MPEG4: { sinkVideo = MPEG4ESVideoRTPSink::createNew(env, rtpGroupsockVideo, payloadFormatCode); break; } } } // Create (and start) a 'RTCP instance' for this RTP sink: unsigned totalSessionBandwidthVideo = (videoBitrate+500)/1000; // in kbps; for RTCP b/w share rtcpVideo = RTCPInstance::createNew(env, rtcpGroupsockVideo, totalSessionBandwidthVideo, CNAME, sinkVideo, NULL /* we're a server */); // Note: This starts RTCP running automatically // Add these to our 'Darwin injector': injector->addStream(sinkVideo, rtcpVideo); } /******************end video***********************/ // Next, specify the destination Darwin Streaming Server: char const* remoteStreamName = "test.sdp";//#####@@@@@ if (!injector->setDestination(remoteDSSNameOrAddress, remoteStreamName, applicationName, "LIVE555 Streaming Media")) { env << "Failed to connect to remote Darwin Streaming Server: " << env.getResultMsg() << "\n"; exit(1); } env << "Play this stream (from the Darwin Streaming Server) using the URL:\n" << "\trtsp://" << remoteDSSNameOrAddress << "/" << remoteStreamName << "\n"; }