/** * \brief mute output * \param audec pointer to audec * \param en 1 = mute, 0 = unmute * \return 0 on success otherwise negative error code */ extern "C" int android_mute(struct aml_audio_dec* audec, adec_bool_t en) { adec_print("android out mute"); audio_out_operations_t *out_ops = &audec->aout_ops; AudioTrack *track = (AudioTrack *)out_ops->private_data; Mutex::Autolock _l(mLock); if (!track) { adec_print("No track instance!\n"); return -1; } track->mute(en); return 0; }
/** * \brief set output volume * \param audec pointer to audec * \param vol volume value * \return 0 on success otherwise negative error code */ extern "C" int android_set_volume(struct aml_audio_dec* audec, float vol) { adec_print("android set volume"); audio_out_operations_t *out_ops = &audec->aout_ops; AudioTrack *track = (AudioTrack *)out_ops->private_data; Mutex::Autolock _l(mLock); if (!track) { adec_print("No track instance!\n"); return -1; } track->setVolume(vol, vol); return 0; }
status_t APlaybackDevice::open() { status_t err; int sampleRateInHz; int channelConfig; int audioFormat; int bufferSizeInBytes; LOG_FUNC_START sampleRateInHz = mDevice->Frequency; channelConfig = aluChannelsFromFormat(mDevice->Format) == 1 ? AUDIO_CHANNEL_OUT_MONO : AUDIO_CHANNEL_OUT_STEREO; audioFormat = aluBytesFromFormat(mDevice->Format) == 1 ? AUDIO_FORMAT_PCM_8_BIT : AUDIO_FORMAT_PCM_16_BIT; err = AudioTrack::getMinFrameCount(&bufferSizeInBytes, audioFormat, sampleRateInHz); RETURN_IF(err); LOGV("rate(%i), channel(%i), format(%i), buffSize(%i), numUpdates(%i)", sampleRateInHz, channelConfig, audioFormat, bufferSizeInBytes, mDevice->NumUpdates); err = mAudioTrack.set(AUDIO_STREAM_MUSIC, sampleRateInHz, audioFormat, channelConfig, bufferSizeInBytes, // frameCount 0, // flags 0, 0); // callback, callback data (user) RETURN_IF(err); err = mAudioTrack.initCheck(); RETURN_IF(err); if(mBuffer) { delete mBuffer; } mBuffer = new AAudioBuffer(bufferSizeInBytes); LOG_FUNC_END return NO_ERROR; }
PyObject* getAudioTrackVolume(PyObject*, PyObject* args) { const char* trackname; if (!PyArg_ParseTuple(args, "s", &trackname)) { return NULL; } Track* t = song->findTrack(QString(trackname)); if (t == NULL) return NULL; if (t->type() == Track::DRUM || t->type() == Track::MIDI) return NULL; AudioTrack* track = (AudioTrack*) t; return Py_BuildValue("d", track->volume()); }
int APlaybackDevice::handlePlayback() { int bufferSizeInSamples; bufferSizeInSamples = mBuffer->size() / aluFrameSizeFromFormat(mDevice->Format); mAudioTrack.start(); while(mPlaybackEnabled) { aluMixData(mDevice, mBuffer->data(), bufferSizeInSamples); if(!write(mBuffer)) { LOGE("Can't write audio buffer into audio track"); mPlaybackEnabled = false; } } mAudioTrack.stop(); mAudioTrack.flush(); return 0; }
void AudioTrackList::append(PassRefPtr<AudioTrack> prpTrack) { RefPtr<AudioTrack> track = prpTrack; // Insert tracks in the media file order. size_t index = track->inbandTrackIndex(); size_t insertionIndex; for (insertionIndex = 0; insertionIndex < m_inbandTracks.size(); ++insertionIndex) { AudioTrack* otherTrack = static_cast<AudioTrack*>(m_inbandTracks[insertionIndex].get()); if (otherTrack->inbandTrackIndex() > index) break; } m_inbandTracks.insert(insertionIndex, track); ASSERT(!track->mediaElement() || track->mediaElement() == mediaElement()); track->setMediaElement(mediaElement()); scheduleAddTrackEvent(track.release()); }
/** * \brief stop output * \param audec pointer to audec * \return 0 on success otherwise negative error code */ extern "C" int android_stop(struct aml_audio_dec* audec) { adec_print("android out stop"); audio_out_operations_t *out_ops = &audec->aout_ops; AudioTrack *track = (AudioTrack *)out_ops->private_data; Mutex::Autolock _l(mLock); if (!track) { adec_print("No track instance!\n"); return -1; } track->stop(); /* release AudioTrack */ delete track; out_ops->private_data = NULL; return 0; }
bool APlaybackDevice::write(AAudioBuffer* buffer) { ssize_t length, size; length = 0; while(length < buffer->size()) { size = mAudioTrack.write(buffer->data() + length, buffer->size() - length); if(size < 0) { return false; } length += size; } return true; }
void AudioPortConfig::routingChanged() { //--------------------------------------------------- // populate lists //--------------------------------------------------- routeList->clear(); newSrcList->clear(); newDstList->clear(); tracksList->clear(); btnConnectOut->setEnabled(false); connectButton->setEnabled(false); removeButton->setEnabled(false); TrackList* tl = song->tracks(); for (ciTrack i = tl->begin(); i != tl->end(); ++i) { if ((*i)->isMidiTrack()) continue; AudioTrack* track = (AudioTrack*) (*i); if (track->type() == Track::WAVE_OUTPUT_HELPER || track->type() == Track::WAVE_INPUT_HELPER) { for (int channel = 0; channel < track->channels(); ++channel) { Route r(track, channel); tracksList->addItem(r.name()); } } else tracksList->addItem(Route(track, -1).name()); } if(selectedIndex < tracksList->count()) tracksList->setCurrentRow(selectedIndex, QItemSelectionModel::ClearAndSelect); //if(_selected) // setSelected(_selected->name()); }
status_t MediaPlayerService::AudioOutput::open(uint32_t sampleRate, int channelCount, int format, int bufferCount) { // Check argument "bufferCount" against the mininum buffer count if (bufferCount < mMinBufferCount) { LOGD("bufferCount (%d) is too small and increased to %d", bufferCount, mMinBufferCount); bufferCount = mMinBufferCount; } LOGV("open(%u, %d, %d, %d)", sampleRate, channelCount, format, bufferCount); if (mTrack) close(); int afSampleRate; int afFrameCount; int frameCount; if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) != NO_ERROR) { return NO_INIT; } if (AudioSystem::getOutputSamplingRate(&afSampleRate, mStreamType) != NO_ERROR) { return NO_INIT; } frameCount = (sampleRate*afFrameCount*bufferCount)/afSampleRate; AudioTrack *t = new AudioTrack(mStreamType, sampleRate, format, channelCount, frameCount); if ((t == 0) || (t->initCheck() != NO_ERROR)) { LOGE("Unable to create audio track"); delete t; return NO_INIT; } LOGV("setVolume"); t->setVolume(mLeftVolume, mRightVolume); mMsecsPerFrame = 1.e3 / (float) sampleRate; mLatency = t->latency() + kAudioVideoDelayMs; mTrack = t; return NO_ERROR; }
void MediaSource::removeSourceBuffer(SourceBuffer& buffer, ExceptionCode& ec) { LOG(MediaSource, "MediaSource::removeSourceBuffer() %p", this); Ref<SourceBuffer> protect(buffer); // 2. If sourceBuffer specifies an object that is not in sourceBuffers then // throw a NOT_FOUND_ERR exception and abort these steps. if (!m_sourceBuffers->length() || !m_sourceBuffers->contains(buffer)) { ec = NOT_FOUND_ERR; return; } // 3. If the sourceBuffer.updating attribute equals true, then run the following steps: ... buffer.abortIfUpdating(); // 4. Let SourceBuffer audioTracks list equal the AudioTrackList object returned by sourceBuffer.audioTracks. RefPtr<AudioTrackList> audioTracks = buffer.audioTracks(); // 5. If the SourceBuffer audioTracks list is not empty, then run the following steps: if (audioTracks->length()) { // 5.1 Let HTMLMediaElement audioTracks list equal the AudioTrackList object returned by the audioTracks // attribute on the HTMLMediaElement. // 5.2 Let the removed enabled audio track flag equal false. bool removedEnabledAudioTrack = false; // 5.3 For each AudioTrack object in the SourceBuffer audioTracks list, run the following steps: while (audioTracks->length()) { AudioTrack* track = audioTracks->lastItem(); // 5.3.1 Set the sourceBuffer attribute on the AudioTrack object to null. track->setSourceBuffer(nullptr); // 5.3.2 If the enabled attribute on the AudioTrack object is true, then set the removed enabled // audio track flag to true. if (track->enabled()) removedEnabledAudioTrack = true; // 5.3.3 Remove the AudioTrack object from the HTMLMediaElement audioTracks list. // 5.3.4 Queue a task to fire a trusted event named removetrack, that does not bubble and is not // cancelable, and that uses the TrackEvent interface, at the HTMLMediaElement audioTracks list. if (mediaElement()) mediaElement()->removeAudioTrack(track); // 5.3.5 Remove the AudioTrack object from the SourceBuffer audioTracks list. // 5.3.6 Queue a task to fire a trusted event named removetrack, that does not bubble and is not // cancelable, and that uses the TrackEvent interface, at the SourceBuffer audioTracks list. audioTracks->remove(track); } // 5.4 If the removed enabled audio track flag equals true, then queue a task to fire a simple event // named change at the HTMLMediaElement audioTracks list. if (removedEnabledAudioTrack) mediaElement()->audioTracks()->scheduleChangeEvent(); } // 6. Let SourceBuffer videoTracks list equal the VideoTrackList object returned by sourceBuffer.videoTracks. RefPtr<VideoTrackList> videoTracks = buffer.videoTracks(); // 7. If the SourceBuffer videoTracks list is not empty, then run the following steps: if (videoTracks->length()) { // 7.1 Let HTMLMediaElement videoTracks list equal the VideoTrackList object returned by the videoTracks // attribute on the HTMLMediaElement. // 7.2 Let the removed selected video track flag equal false. bool removedSelectedVideoTrack = false; // 7.3 For each VideoTrack object in the SourceBuffer videoTracks list, run the following steps: while (videoTracks->length()) { VideoTrack* track = videoTracks->lastItem(); // 7.3.1 Set the sourceBuffer attribute on the VideoTrack object to null. track->setSourceBuffer(nullptr); // 7.3.2 If the selected attribute on the VideoTrack object is true, then set the removed selected // video track flag to true. if (track->selected()) removedSelectedVideoTrack = true; // 7.3.3 Remove the VideoTrack object from the HTMLMediaElement videoTracks list. // 7.3.4 Queue a task to fire a trusted event named removetrack, that does not bubble and is not // cancelable, and that uses the TrackEvent interface, at the HTMLMediaElement videoTracks list. if (mediaElement()) mediaElement()->removeVideoTrack(track); // 7.3.5 Remove the VideoTrack object from the SourceBuffer videoTracks list. // 7.3.6 Queue a task to fire a trusted event named removetrack, that does not bubble and is not // cancelable, and that uses the TrackEvent interface, at the SourceBuffer videoTracks list. videoTracks->remove(track); } // 7.4 If the removed selected video track flag equals true, then queue a task to fire a simple event // named change at the HTMLMediaElement videoTracks list. if (removedSelectedVideoTrack) mediaElement()->videoTracks()->scheduleChangeEvent(); } // 8. Let SourceBuffer textTracks list equal the TextTrackList object returned by sourceBuffer.textTracks. RefPtr<TextTrackList> textTracks = buffer.textTracks(); // 9. If the SourceBuffer textTracks list is not empty, then run the following steps: if (textTracks->length()) { // 9.1 Let HTMLMediaElement textTracks list equal the TextTrackList object returned by the textTracks // attribute on the HTMLMediaElement. // 9.2 Let the removed enabled text track flag equal false. bool removedEnabledTextTrack = false; // 9.3 For each TextTrack object in the SourceBuffer textTracks list, run the following steps: while (textTracks->length()) { TextTrack* track = textTracks->lastItem(); // 9.3.1 Set the sourceBuffer attribute on the TextTrack object to null. track->setSourceBuffer(nullptr); // 9.3.2 If the mode attribute on the TextTrack object is set to "showing" or "hidden", then // set the removed enabled text track flag to true. if (track->mode() == TextTrack::showingKeyword() || track->mode() == TextTrack::hiddenKeyword()) removedEnabledTextTrack = true; // 9.3.3 Remove the TextTrack object from the HTMLMediaElement textTracks list. // 9.3.4 Queue a task to fire a trusted event named removetrack, that does not bubble and is not // cancelable, and that uses the TrackEvent interface, at the HTMLMediaElement textTracks list. if (mediaElement()) mediaElement()->removeTextTrack(track); // 9.3.5 Remove the TextTrack object from the SourceBuffer textTracks list. // 9.3.6 Queue a task to fire a trusted event named removetrack, that does not bubble and is not // cancelable, and that uses the TrackEvent interface, at the SourceBuffer textTracks list. textTracks->remove(track); } // 9.4 If the removed enabled text track flag equals true, then queue a task to fire a simple event // named change at the HTMLMediaElement textTracks list. if (removedEnabledTextTrack) mediaElement()->textTracks()->scheduleChangeEvent(); } // 10. If sourceBuffer is in activeSourceBuffers, then remove sourceBuffer from activeSourceBuffers ... m_activeSourceBuffers->remove(buffer); // 11. Remove sourceBuffer from sourceBuffers and fire a removesourcebuffer event // on that object. m_sourceBuffers->remove(buffer); // 12. Destroy all resources for sourceBuffer. buffer.removedFromMediaSource(); }
Route name2route(const QString& rn, bool /*dst*/, int rtype)/*{{{*/ { // printf("name2route %s\n", rn.toLatin1().constData()); int channel = -1; QString s(rn); // Support old route style in oom files. Obsolete. if (rn.size() >= 2 && rn[0].isNumber() && rn[1] == ':') { channel = rn[0].toAscii() - int('1'); s = rn.mid(2); } if (rtype == -1) { if (checkAudioDevice()) { void* p = audioDevice->findPort(s.toLatin1().constData()); if (p) return Route(p, channel); } TrackList* tl = song->tracks(); for (iTrack i = tl->begin(); i != tl->end(); ++i) { if ((*i)->isMidiTrack()) { MidiTrack* track = (MidiTrack*) * i; if (track->name() == s) return Route(track, channel); } else { AudioTrack* track = (AudioTrack*) * i; if (track->name() == s) return Route(track, channel); } } for (iMidiDevice i = midiDevices.begin(); i != midiDevices.end(); ++i) { if ((*i)->name() == s) return Route(*i, channel); } // p3.3.49 if (s.left(ROUTE_MIDIPORT_NAME_PREFIX.length()) == ROUTE_MIDIPORT_NAME_PREFIX) { bool ok = false; int port = s.mid(ROUTE_MIDIPORT_NAME_PREFIX.length()).toInt(&ok); if (ok) return Route(port, channel); } } else { if (rtype == Route::TRACK_ROUTE) { TrackList* tl = song->tracks(); for (iTrack i = tl->begin(); i != tl->end(); ++i) { if ((*i)->isMidiTrack()) { MidiTrack* track = (MidiTrack*) * i; if (track->name() == s) return Route(track, channel); } else { AudioTrack* track = (AudioTrack*) * i; if (track->name() == s) return Route(track, channel); } } }// TODO Distinguish the device types else if (rtype == Route::MIDI_DEVICE_ROUTE) { for (iMidiDevice i = midiDevices.begin(); i != midiDevices.end(); ++i) { if ((*i)->name() == s) return Route(*i, channel); } } else if (rtype == Route::JACK_ROUTE) { if (checkAudioDevice()) { void* p = audioDevice->findPort(s.toLatin1().constData()); if (p) return Route(p, channel); } } else if (rtype == Route::MIDI_PORT_ROUTE) // p3.3.49 { if (s.left(ROUTE_MIDIPORT_NAME_PREFIX.length()) == ROUTE_MIDIPORT_NAME_PREFIX) { bool ok = false; int port = s.mid(ROUTE_MIDIPORT_NAME_PREFIX.length()).toInt(&ok); if (ok) return Route(port, channel); } } } printf(" name2route: <%s> not found\n", rn.toLatin1().constData()); return Route((Track*) 0, channel); }/*}}}*/
qboolean SNDDMA_Init(void) { if ( ! enableSound() ) { return false; } gDMAByteIndex = 0; // Initialize the AudioTrack. status_t result = gAudioTrack.set( AudioSystem::DEFAULT, // stream type SAMPLE_RATE, // sample rate BITS_PER_SAMPLE == 16 ? AudioSystem::PCM_16_BIT : AudioSystem::PCM_8_BIT, // format (8 or 16) (CHANNEL_COUNT > 1) ? AudioSystem::CHANNEL_OUT_STEREO : AudioSystem::CHANNEL_OUT_MONO, // channel mask 0, // default buffer size 0, // flags AndroidQuakeSoundCallback, // callback 0, // user 0); // default notification size LOGI("AudioTrack status = %d (%s)\n", result, result == NO_ERROR ? "success" : "error"); if ( result == NO_ERROR ) { LOGI("AudioTrack latency = %u ms\n", gAudioTrack.latency()); LOGI("AudioTrack format = %u bits\n", gAudioTrack.format() == AudioSystem::PCM_16_BIT ? 16 : 8); LOGI("AudioTrack sample rate = %u Hz\n", gAudioTrack.getSampleRate()); LOGI("AudioTrack frame count = %d\n", int(gAudioTrack.frameCount())); LOGI("AudioTrack channel count = %d\n", gAudioTrack.channelCount()); // Initialize Quake's idea of a DMA buffer. shm = &sn; memset((void*)&sn, 0, sizeof(sn)); shm->splitbuffer = false; // Not used. shm->samplebits = gAudioTrack.format() == AudioSystem::PCM_16_BIT ? 16 : 8; shm->speed = gAudioTrack.getSampleRate(); shm->channels = gAudioTrack.channelCount(); shm->samples = TOTAL_BUFFER_SIZE / BYTES_PER_SAMPLE; shm->samplepos = 0; // Not used. shm->buffer = (unsigned char*) Hunk_AllocName(TOTAL_BUFFER_SIZE, (char*) "shmbuf"); shm->submission_chunk = 1; // Not used. shm->soundalive = true; if ( (shm->samples & 0x1ff) != 0 ) { LOGE("SNDDDMA_Init: samples must be power of two."); return false; } if ( shm->buffer == 0 ) { LOGE("SNDDDMA_Init: Could not allocate sound buffer."); return false; } gAudioTrack.setVolume(1.0f, 1.0f); gAudioTrack.start(); } return result == NO_ERROR; }
/* ============== SNDDMA_Shutdown Reset the sound device for exiting =============== */ void SNDDMA_Shutdown(void) { gAudioTrack.stop(); }
bool Song::event(QEvent* _e) { if (_e->type() != QEvent::User) return false; //ignore all events except user events, which are events from Python bridge subsystem QPybridgeEvent* e = (QPybridgeEvent*) _e; switch (e->getType()) { case QPybridgeEvent::SONG_UPDATE: this->update(e->getP1()); break; case QPybridgeEvent::SONGLEN_CHANGE: this->setLen(e->getP1()); break; case QPybridgeEvent::SONG_POSCHANGE: this->setPos(e->getP1(), e->getP2()); break; case QPybridgeEvent::SONG_SETPLAY: this->setPlay(true); break; case QPybridgeEvent::SONG_SETSTOP: this->setStop(true); break; case QPybridgeEvent::SONG_REWIND: this->rewindStart(); break; case QPybridgeEvent::SONG_SETMUTE: { Track* track = this->findTrack(e->getS1()); if (track == NULL) return false; bool muted = e->getP1() == 1; track->setMute(muted); this->update(SC_MUTE | SC_TRACK_MODIFIED); break; } case QPybridgeEvent::SONG_SETCTRL: { Track* t = this->findTrack(e->getS1()); if (t == NULL) return false; if (t->isMidiTrack() == false) return false; MidiTrack* track = (MidiTrack*) t; int chan = track->outChannel(); int num = e->getP1(); int val = e->getP2(); int tick = song->cpos(); MidiPlayEvent ev(tick, track->outPort(), chan, ME_CONTROLLER, num, val, t); audio->msgPlayMidiEvent(&ev); song->update(SC_MIDI_CONTROLLER); break; } case QPybridgeEvent::SONG_SETAUDIOVOL: { Track* t = this->findTrack(e->getS1()); if (t == NULL) return false; if (t->type() == Track::DRUM || t->type() == Track::MIDI) return false; AudioTrack* track = (AudioTrack*) t; track->setVolume(e->getD1()); break; } case QPybridgeEvent::SONG_IMPORT_PART: { Track* track = this->findTrack(e->getS1()); QString filename = e->getS2(); unsigned int tick = e->getP1(); if (track == NULL) return false; oom->importPartToTrack(filename, tick, track); break; } case QPybridgeEvent::SONG_TOGGLE_EFFECT: { Track* t = this->findTrack(e->getS1()); if (t == NULL) return false; if (t->type() != Track::WAVE) return false; int fxid = e->getP1(); int onoff = (e->getP2() == 1); AudioTrack* track = (AudioTrack*) t; Pipeline* pipeline = track->efxPipe(); const Pipeline* pipeline = track->efxPipe(); if(pipeline) { int pdepth = pipeline->size(); if (fxid > pdepth) return false; pipeline->setOn(fxid, onoff); } break; } case QPybridgeEvent::SONG_ADD_TRACK: song->addTrack(e->getP1()); song->updateTrackViews(); break; case QPybridgeEvent::SONG_CHANGE_TRACKNAME: { Track* t = this->findTrack(e->getS1()); if (t == NULL) return false; t->setName(e->getS2()); break; } case QPybridgeEvent::SONG_DELETE_TRACK: { Track* t = this->findTrack(e->getS1()); if (t == NULL) return false; audio->msgRemoveTrack(t); break; } default: printf("Unknown pythonthread event received: %d\n", e->getType()); break; } return true; }
int libmediacb_start(msm_ctx *ctx, int channels, int samplerate) { __android_log_print(ANDROID_LOG_INFO,"liblossless","libmedia_ START REEEEACHED REACHEDDDDDD1"); status_t status; int chans; if(!ctx) return LIBLOSSLESS_ERR_NOCTX; __android_log_print(ANDROID_LOG_INFO,"liblossless","libmediacb_start ctx=%p chans=%d rate=%d afd=%d atrack=%p", ctx, channels, samplerate,ctx->afd,ctx->track); AudioTrack* atrack = (AudioTrack *) ctx->track; if(atrack && ctx->samplerate == samplerate && ctx->channels == channels) { __android_log_print(ANDROID_LOG_INFO,"liblossless","same audio track parameters, restarting"); atrack->stop(); atrack->flush(); ctx->cbstart = 0; ctx->cbend = 0; atrack->start(); return 0; } if(!ctx->cbbuf) { ctx->cbbuf = (unsigned char *) malloc(DEFAULT_CB_BUFSZ); if(!ctx->cbbuf) return LIBLOSSLESS_ERR_NOMEM; ctx->cbbuf_size = DEFAULT_CB_BUFSZ; } ctx->cbstart = 0; ctx->cbend = 0; if(!atrack) { atrack = new AudioTrack(); if(!atrack) { __android_log_print(ANDROID_LOG_ERROR,"liblossless","could not create AudioTrack!"); return LIBLOSSLESS_ERR_INIT; } __android_log_print(ANDROID_LOG_INFO,"liblossless","AudioTrack created at %p. Now trying to setup (buffsz %d)", atrack, DEFAULT_ATRACK_CONF_BUFSZ); if(!sdk_version) { char c[PROP_VALUE_MAX]; if(__system_property_get("ro.build.version.sdk",c) > 0) sscanf(c,"%d",&sdk_version); else sdk_version = 8; __android_log_print(ANDROID_LOG_INFO,"liblossless","got sdk_version %d", sdk_version); } if(sdk_version > 13) chans = (channels == 2) ? 3 : 1; else if(sdk_version > 6) chans = (channels == 2) ? 12 : 4; else chans = channels; #ifdef BUILD_JB status = atrack->set(_MUSIC, samplerate, FMTBPS, chans, DEFAULT_ATRACK_CONF_BUFSZ/(2*channels),AUDIO_OUTPUT_FLAG_NONE,cbf,ctx); #else status = atrack->set(_MUSIC, samplerate, FMTBPS, chans, DEFAULT_ATRACK_CONF_BUFSZ/(2*channels),0,cbf,ctx); #endif if(status != NO_ERROR) { __android_log_print(ANDROID_LOG_INFO,"liblossless","AudioTrack setup failed"); delete atrack; return LIBLOSSLESS_ERR_INIT; } ctx->track = atrack; } else { atrack->stop(); atrack->flush(); ctx->cbstart = 0; ctx->cbend = 0; __android_log_print(ANDROID_LOG_INFO,"liblossless","trying to reconfigure old AudioTrack"); status = atrack->setSampleRate(samplerate); if(status != NO_ERROR) { __android_log_print(ANDROID_LOG_INFO,"liblossless","could not set AudioTrack sample rate"); return LIBLOSSLESS_ERR_INIT; } } __android_log_print(ANDROID_LOG_INFO,"liblossless","AudioTrack setup OK, starting audio!"); ctx->conf_size = DEFAULT_CONF_BUFSZ; atrack->start(); __android_log_print(ANDROID_LOG_INFO,"liblossless","playback started!"); atrack->setPositionUpdatePeriod(0); atrack->setMarkerPosition(0); static int s(0); if(!s) { print_priority(__FUNCTION__); s = 1; } return 0; }
void AudioPortConfig::trackSelectionChanged() { routeList->clear(); newSrcList->clear(); newDstList->clear(); QListWidgetItem* titem = tracksList->currentItem(); AudioTrack* atrack = (AudioTrack*)song->findTrack(titem->text()); if(atrack) { _selected = atrack; selectedIndex = tracksList->row(titem); //TrackList* tl = song->tracks(); //for(iTrack t = tl->begin(); t != tl->end(); ++t) //{ // if((*t)->isMidiTrack()) // continue; // AudioTrack* track = (AudioTrack*) (*t); // if(track->name() == atrack->name()) // continue; //You cant connect a track to itself //int channels = track->channels(); switch (atrack->type()) { case Track::WAVE_OUTPUT_HELPER:/*{{{*/ for(iTrack t = song->tracks()->begin(); t != song->tracks()->end(); ++t) { if((*t)->isMidiTrack()) continue; AudioTrack* track = (AudioTrack*) (*t); if(track->name() == atrack->name() || track->type() == Track::WAVE_OUTPUT_HELPER) continue; //You cant connect a track to itself //for (int channel = 0; channel < track->channels(); ++channel) //{ Route r(track, -1); newSrcList->addItem(r.name()); //} } insertInputs(); //newDstList->addItem(Route(track, -1).name()); break; case Track::WAVE_INPUT_HELPER: for(iTrack t = song->tracks()->begin(); t != song->tracks()->end(); ++t) { if((*t)->isMidiTrack()) continue; AudioTrack* track = (AudioTrack*) (*t); if(track->name() == atrack->name()) continue; //You cant connect a track to itself switch(track->type()) { case Track::WAVE_OUTPUT_HELPER: case Track::WAVE: newDstList->addItem(Route(track, -1).name()); break; default: break; } } insertOutputs(); break; case Track::WAVE: for(iTrack t = song->tracks()->begin(); t != song->tracks()->end(); ++t) { if((*t)->isMidiTrack()) continue; AudioTrack* track = (AudioTrack*) (*t); if(track->name() == atrack->name()) continue; //You cant connect a track to itself if(track->type() == Track::WAVE_INPUT_HELPER) { newSrcList->addItem(Route(track, -1).name()); } else if(track->type() == Track::WAVE_OUTPUT_HELPER) { newDstList->addItem(Route(track, -1).name()); } } break; default: break;/*}}}*/ } //} QTreeWidgetItem* widgetItem; const RouteList* rl = atrack->outRoutes(); for (ciRoute r = rl->begin(); r != rl->end(); ++r) { QString src(""); if (atrack->type() == Track::WAVE_OUTPUT_HELPER) { widgetItem = new QTreeWidgetItem(routeList, QStringList() << src << QString("") << atrack->name() << r->name() << QString::number(r->channel), Track::WAVE_OUTPUT_HELPER); } else { widgetItem = new QTreeWidgetItem(routeList, QStringList() << src << QString("") << atrack->name() << r->name() << QString::number(0), Track::WAVE_OUTPUT_HELPER); } widgetItem->setTextAlignment(1, Qt::AlignHCenter); widgetItem->setTextAlignment(4, Qt::AlignHCenter); } const RouteList* rli = atrack->inRoutes(); for (ciRoute ri = rli->begin(); ri != rli->end(); ++ri) { QString src(""); if (atrack->type() == Track::WAVE_INPUT_HELPER) { widgetItem = new QTreeWidgetItem(routeList, QStringList() << ri->name() << QString::number(ri->channel) << atrack->name() << src << QString(""), Track::WAVE_INPUT_HELPER); } else { widgetItem = new QTreeWidgetItem(routeList, QStringList() << ri->name() << QString::number(0) << atrack->name() << src << QString(""), Track::WAVE_INPUT_HELPER); } widgetItem->setTextAlignment(1, Qt::AlignHCenter); widgetItem->setTextAlignment(4, Qt::AlignHCenter); } routeSelectionChanged(); // init remove button srcSelectionChanged(); // init select button } }
/** * \brief output initialization * \param audec pointer to audec * \return 0 on success otherwise negative error code */ extern "C" int android_init(struct aml_audio_dec* audec) { adec_print("android out init"); status_t status; AudioTrack *track; audio_out_operations_t *out_ops = &audec->aout_ops; Mutex::Autolock _l(mLock); track = new AudioTrack(); if (track == NULL) { adec_print("AudioTrack Create Failed!"); return -1; } int SessionID = audec->SessionID; adec_print("SessionID = %d",SessionID); #if defined(_VERSION_JB) status = track->set(AUDIO_STREAM_MUSIC, audec->samplerate, AUDIO_FORMAT_PCM_16_BIT, (audec->channels == 1) ? AUDIO_CHANNEL_OUT_MONO : AUDIO_CHANNEL_OUT_STEREO, 0, // frameCount AUDIO_OUTPUT_FLAG_NONE, // flags audioCallback, audec, // user when callback 0, // notificationFrames 0, // shared buffer false, // threadCanCallJava SessionID); // sessionId #elif defined(_VERSION_ICS) status = track->set(AUDIO_STREAM_MUSIC, audec->samplerate, AUDIO_FORMAT_PCM_16_BIT, (audec->channels == 1) ? AUDIO_CHANNEL_OUT_MONO : AUDIO_CHANNEL_OUT_STEREO, 0, // frameCount 0, // flags audioCallback, audec, // user when callback 0, // notificationFrames 0, // shared buffer false, // threadCanCallJava SessionID); // sessionId #else // GB or lower: status = track->set(AudioSystem::MUSIC, audec->samplerate, AudioSystem::PCM_16_BIT, (audec->channels == 1) ? AudioSystem::CHANNEL_OUT_MONO : AudioSystem::CHANNEL_OUT_STEREO, 0, // frameCount 0, // flags audioCallback, audec, // user when callback 0, // notificationFrames 0, // shared buffer SessionID); #endif if (status != NO_ERROR) { adec_print("track->set returns %d", status); adec_print("audio out samplet %d", audec->samplerate); adec_print("audio out channels %d", audec->channels); delete track; track = NULL; return -1; } af_resample_linear_init(); out_ops->private_data = (void *)track; return 0; }
bool APlaybackDevice::isPlaying() { return mPlaybackEnabled && !mAudioTrack.stopped(); }
void SoundChannel::play(const sp<Sample>& sample, int nextChannelID, float leftVolume, float rightVolume, int priority, int loop, float rate) { AudioTrack* oldTrack; LOGV("play %p: sampleID=%d, channelID=%d, leftVolume=%f, rightVolume=%f, priority=%d, loop=%d, rate=%f", this, sample->sampleID(), nextChannelID, leftVolume, rightVolume, priority, loop, rate); // if not idle, this voice is being stolen if (mState != IDLE) { LOGV("channel %d stolen - event queued for channel %d", channelID(), nextChannelID); mNextEvent.set(sample, nextChannelID, leftVolume, rightVolume, priority, loop, rate); stop(); return; } // initialize track int afFrameCount; int afSampleRate; int streamType = mSoundPool->streamType(); if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) { afFrameCount = kDefaultFrameCount; } if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) { afSampleRate = kDefaultSampleRate; } int numChannels = sample->numChannels(); uint32_t sampleRate = uint32_t(float(sample->sampleRate()) * rate + 0.5); uint32_t bufferFrames = (afFrameCount * sampleRate) / afSampleRate; uint32_t frameCount = 0; if (loop) { frameCount = sample->size()/numChannels/((sample->format() == AudioSystem::PCM_16_BIT) ? sizeof(int16_t) : sizeof(uint8_t)); } #ifndef USE_SHARED_MEM_BUFFER // Ensure minimum audio buffer size in case of short looped sample if(frameCount < kDefaultBufferCount * bufferFrames) { frameCount = kDefaultBufferCount * bufferFrames; } #endif AudioTrack* newTrack; // mToggle toggles each time a track is started on a given channel. // The toggle is concatenated with the SoundChannel address and passed to AudioTrack // as callback user data. This enables the detection of callbacks received from the old // audio track while the new one is being started and avoids processing them with // wrong audio audio buffer size (mAudioBufferSize) unsigned long toggle = mToggle ^ 1; void *userData = (void *)((unsigned long)this | toggle); #ifdef USE_SHARED_MEM_BUFFER newTrack = new AudioTrack(streamType, sampleRate, sample->format(), numChannels, sample->getIMemory(), 0, callback, userData); #else newTrack = new AudioTrack(streamType, sampleRate, sample->format(), numChannels, frameCount, 0, callback, userData, bufferFrames); #endif if (newTrack->initCheck() != NO_ERROR) { LOGE("Error creating AudioTrack"); delete newTrack; return; } LOGV("setVolume %p", newTrack); newTrack->setVolume(leftVolume, rightVolume); newTrack->setLoop(0, frameCount, loop); { Mutex::Autolock lock(&mLock); // From now on, AudioTrack callbacks recevieved with previous toggle value will be ignored. mToggle = toggle; oldTrack = mAudioTrack; mAudioTrack = newTrack; mPos = 0; mSample = sample; mChannelID = nextChannelID; mPriority = priority; mLoop = loop; mLeftVolume = leftVolume; mRightVolume = rightVolume; mNumChannels = numChannels; mRate = rate; clearNextEvent(); mState = PLAYING; mAudioTrack->start(); mAudioBufferSize = newTrack->frameCount()*newTrack->frameSize(); } LOGV("delete oldTrack %p", oldTrack); delete oldTrack; }
std::vector<WebMediaPlayer::TrackId> SourceBuffer::initializationSegmentReceived(const std::vector<MediaTrackInfo>& newTracks) { WTF_LOG(Media, "SourceBuffer::initializationSegmentReceived %p tracks=%zu", this, newTracks.size()); ASSERT(m_source); ASSERT(m_source->mediaElement()); ASSERT(m_updating); // TODO(servolk): Implement proper 'initialization segment received' algorithm according to MSE spec: // https://w3c.github.io/media-source/#sourcebuffer-init-segment-received std::vector<WebMediaPlayer::TrackId> result; for (const auto& trackInfo : newTracks) { const auto& trackType = std::get<0>(trackInfo); const auto& id = std::get<1>(trackInfo); const auto& kind = std::get<2>(trackInfo); const auto& label = std::get<3>(trackInfo); const auto& language = std::get<4>(trackInfo); if (!RuntimeEnabledFeatures::audioVideoTracksEnabled()) { static WebMediaPlayer::TrackId nextTrackId = 0; result.push_back(++nextTrackId); continue; } const TrackBase* trackBase = nullptr; if (trackType == WebMediaPlayer::AudioTrack) { AudioTrack* audioTrack = nullptr; if (!m_firstInitializationSegmentReceived) { audioTrack = AudioTrack::create(id, kind, label, language, false); audioTracks().add(audioTrack); m_source->mediaElement()->audioTracks().add(audioTrack); } else { audioTrack = findExistingTrackById(audioTracks(), id); ASSERT(audioTrack); } trackBase = audioTrack; result.push_back(audioTrack->trackId()); } else if (trackType == WebMediaPlayer::VideoTrack) { VideoTrack* videoTrack = nullptr; if (!m_firstInitializationSegmentReceived) { videoTrack = VideoTrack::create(id, kind, label, language, false); videoTracks().add(videoTrack); m_source->mediaElement()->videoTracks().add(videoTrack); } else { videoTrack = findExistingTrackById(videoTracks(), id); ASSERT(videoTrack); } trackBase = videoTrack; result.push_back(videoTrack->trackId()); } else { NOTREACHED(); } (void)trackBase; #if !LOG_DISABLED const char* logActionStr = m_firstInitializationSegmentReceived ? "using existing" : "added"; const char* logTrackTypeStr = (trackType == WebMediaPlayer::AudioTrack) ? "audio" : "video"; WTF_LOG(Media, "Tracks (sb=%p): %s %sTrack %p trackId=%d id=%s label=%s lang=%s", this, logActionStr, logTrackTypeStr, trackBase, trackBase->trackId(), trackBase->id().utf8().data(), trackBase->label().utf8().data(), trackBase->language().utf8().data()); #endif } if (!m_firstInitializationSegmentReceived) { // 5. If active track flag equals true, then run the following steps: // 5.1. Add this SourceBuffer to activeSourceBuffers. // 5.2. Queue a task to fire a simple event named addsourcebuffer at // activesourcebuffers. m_source->setSourceBufferActive(this); // 6. Set first initialization segment received flag to true. m_firstInitializationSegmentReceived = true; } return result; }