static size_t in_get_buffer_size(const struct audio_stream *stream) { const struct submix_stream_in *in = reinterpret_cast<const struct submix_stream_in *>(stream); ALOGV("in_get_buffer_size() returns %u", in->dev->config.period_size * audio_stream_frame_size(stream)); return in->dev->config.period_size * audio_stream_frame_size(stream); }
static size_t out_get_buffer_size(const struct audio_stream *stream) { struct stream_out *out = (struct stream_out *)stream; size_t buf_size; buf_size = out->pcm_config.period_size * audio_stream_frame_size((struct audio_stream *)stream); ALOGV("%s : %d, period_size : %d, frame_size : %d", __func__, buf_size, out->pcm_config.period_size, audio_stream_frame_size((struct audio_stream *)stream)); return buf_size; }
static ssize_t out_write(struct audio_stream_out *stream, const void* buffer, size_t bytes) { int ret; struct stream_out *out = (struct stream_out *)stream; pthread_mutex_lock(&out->dev->lock); pthread_mutex_lock(&out->lock); if (out->standby) { ret = start_output_stream(out); if (ret != 0) { goto err; } out->standby = false; } pcm_write(out->pcm, (void *)buffer, bytes); pthread_mutex_unlock(&out->lock); pthread_mutex_unlock(&out->dev->lock); return bytes; err: pthread_mutex_unlock(&out->lock); if (ret != 0) { usleep(bytes * 1000000 / audio_stream_frame_size(&stream->common) / out_get_sample_rate(&stream->common)); } return bytes; }
static ssize_t in_read(struct audio_stream_in *stream, void* buffer, size_t bytes) { /* XXX: fake timing for audio input */ usleep(bytes * 1000000 / audio_stream_frame_size(&stream->common) / in_get_sample_rate(&stream->common)); return bytes; }
static ssize_t out_write(struct audio_stream_out *stream, const void* buffer, size_t bytes) { /* XXX: fake timing for audio output */ usleep(bytes * 1000000 / audio_stream_frame_size(&stream->common) / out_get_sample_rate(&stream->common)); return bytes; }
static ssize_t out_write(struct audio_stream_out *stream, const void* buffer, size_t bytes) { int ret = 0; struct stream_out *out = (struct stream_out *)stream; struct audio_device *adev = out ? out->dev : NULL; if(adev == NULL) return -ENOSYS; ALOGV("%s enter",__func__); pthread_mutex_lock(&out->dev->lock); pthread_mutex_lock(&out->lock); // there is a possibility that the HD interface is open // and normal pcm stream is still active. Feed the new // interface to normal pcm stream if(adev->active_pcm) { if(adev->active_pcm != out->pcm) out->pcm = adev->active_pcm; } if ((out->standby) || (!adev->active_pcm)) { ret = start_output_stream(out); if (ret != 0) { goto err; } out->standby = false; } if(!out->pcm){ ALOGD("%s: null handle to write - device already closed",__func__); goto err; } ret = pcm_write(out->pcm, (void *)buffer, bytes); ALOGVV("%s: pcm_write returned = %d rate = %d",__func__,ret,out->pcm_config.rate); err: pthread_mutex_unlock(&out->lock); pthread_mutex_unlock(&out->dev->lock); if (ret != 0) { uint64_t duration_ms = ((bytes * 1000)/ (audio_stream_frame_size(&stream->common)) / (out_get_sample_rate(&stream->common))); ALOGV("%s : silence written", __func__); usleep(duration_ms * 1000); } ALOGV("%s exit",__func__); return bytes; }
static size_t out_get_buffer_size(const struct audio_stream *stream) { struct sco_stream_out *out = (struct sco_stream_out *) stream; size_t size = audio_stream_frame_size(&out->stream.common) * out->cfg.frame_num; DBG("buf size %zd", size); return size; }
/* Returns bytes for ONE PERIOD */ size_t hdmi_out_get_buffer_size(const struct audio_stream *stream) { hdmi_out_t *out = (hdmi_out_t*)stream; struct pcm_config *config = &out->config; size_t ans; ans = audio_stream_frame_size((struct audio_stream*)stream) * config->period_size; TRACEM("stream=%p returning %u", stream, ans); return ans; }
static int adev_open_input_stream(struct audio_hw_device *dev, audio_io_handle_t handle, audio_devices_t devices, struct audio_config *config, struct audio_stream_in **stream_in) { struct audio_device *adev = (struct audio_device *)dev; struct stream_in *in; int ret, buffer_size, frame_size; int channel_count = popcount(config->channel_mask); ALOGV("%s: enter", __func__); *stream_in = NULL; if (check_input_parameters(config->sample_rate, config->format, channel_count) != 0) return -EINVAL; in = (struct stream_in *)calloc(1, sizeof(struct stream_in)); if (!in) return -ENOMEM; in->stream.common.get_sample_rate = in_get_sample_rate; in->stream.common.set_sample_rate = in_set_sample_rate; in->stream.common.get_buffer_size = in_get_buffer_size; in->stream.common.get_channels = in_get_channels; in->stream.common.get_format = in_get_format; in->stream.common.standby = in_standby; in->stream.common.set_parameters = in_set_parameters; in->stream.common.get_parameters = in_get_parameters; in->stream.read = in_read; in->stream.get_input_frames_lost = in_get_input_frames_lost; in->device = devices; in->source = AUDIO_SOURCE_DEFAULT; in->dev = adev; in->standby = true; in->channel_mask = config->channel_mask; /* Update config params with the requested sample rate and channels */ in->pcm_config = pcm_config_audio_capture; in->pcm_config.channels = channel_count; in->pcm_config.rate = config->sample_rate; frame_size = audio_stream_frame_size((struct audio_stream *)in); buffer_size = get_input_buffer_size(config->sample_rate, config->format, channel_count); in->pcm_config.period_size = buffer_size / frame_size; *stream_in = &in->stream; ALOGV("%s: exit", __func__); return 0; }
static ssize_t out_write(struct audio_stream_out *stream, const void *buffer, size_t bytes) { struct sco_stream_out *out = (struct sco_stream_out *) stream; size_t frame_num = bytes / audio_stream_frame_size(&out->stream.common); size_t output_frame_num = frame_num; void *send_buf = out->downmix_buf; size_t total; DBG("write to fd %d bytes %zu", out->fd, bytes); if (!out->downmix_buf) { error("sco: downmix buffer not initialized"); return -1; } downmix_to_mono(out, buffer, frame_num); if (out->resampler) { int ret; /* limit resampler's output within what resample buf can hold */ output_frame_num = out->resample_frame_num; ret = out->resampler->resample_from_input(out->resampler, send_buf, &frame_num, out->resample_buf, &output_frame_num); if (ret) { error("Failed to resample frames: %zd input %zd (%s)", frame_num, output_frame_num, strerror(ret)); return -1; } send_buf = out->resample_buf; DBG("Resampled: frame_num %zd, output_frame_num %zd", frame_num, output_frame_num); } total = output_frame_num * sizeof(int16_t) * 1; DBG("total %zd", total); if (!write_data(out, send_buf, total)) return -1; return bytes; }
static ssize_t in_read(struct audio_stream_in *stream, void *buffer, size_t bytes) { struct stream_in *in = (struct stream_in *)stream; struct audio_device *adev = in->dev; int i, ret = -1; ALOGV("%s enter",__func__); pthread_mutex_lock(&in->lock); if (in->standby) { pthread_mutex_lock(&adev->lock); ret = start_input_stream(in); pthread_mutex_unlock(&adev->lock); if (ret != 0) { goto exit; } in->standby = false; } if (in->pcm) { ret = pcm_read(in->pcm, buffer, bytes); } ALOGV("in_read returned %d bytes ret = %d",bytes,ret); exit: pthread_mutex_unlock(&in->lock); if (ret != 0) { in_standby(&in->stream.common); uint64_t duration_ms = ((bytes * 1000)/ (audio_stream_frame_size(&in->stream.common)) / (in_get_sample_rate(&in->stream.common))); ALOGV("%s : silence read - read failed", __func__); usleep(duration_ms * 1000); } ALOGV("%s exit",__func__); return bytes; }
void channel_remap(struct audio_stream_out *stream, const void *buffer, size_t bytes) { hdmi_out_t *out = (hdmi_out_t*)stream; struct hdmi_device_t *adev = (struct hdmi_device_t *)out->dev; int x, y, frames; int16_t *buf = (int16_t *)buffer; int16_t *tmp_buf = (int16_t *)out->buffcpy; frames = (bytes/audio_stream_frame_size(&out->stream_out.common)); while (frames--){ for(y = 0; y < (int)out->config.channels; y++){ for(x = 0; x < (int)out->config.channels; x++){ if (cea_channel_map[y] == adev->map[x]){ tmp_buf[y] = buf[x]; break; } } } tmp_buf += (int)out->config.channels; buf += (int)out->config.channels; } }
ssize_t hdmi_out_write(struct audio_stream_out *stream, const void* buffer, size_t bytes) { hdmi_out_t *out = (hdmi_out_t*)stream; struct hdmi_device_t *adev = (struct hdmi_device_t *)out->dev; ssize_t ret; TRACEM("stream=%p buffer=%p bytes=%d", stream, buffer, bytes); if (!out->up) { if(hdmi_out_open_pcm(out)) { ret = -ENOSYS; goto exit; } } if (out->config.channels > 2 && !adev->CEAMap){ channel_remap(stream, buffer, bytes); ret = pcm_write(out->pcm, out->buffcpy, bytes); } else { ret = pcm_write(out->pcm, buffer, bytes); } exit: if (ret != 0) { ALOGE("Error writing to HDMI pcm: %s", pcm_get_error(out->pcm)); hdmi_out_standby((struct audio_stream*)stream); unsigned int usecs = bytes * 1000000 / audio_stream_frame_size((struct audio_stream*)stream) / hdmi_out_get_sample_rate((struct audio_stream*)stream); if (usecs >= 1000000L) { usecs = 999999L; } usleep(usecs); } return bytes; }
ssize_t InStream::read(void* buffer, size_t bytes) { int ret = 0; LOGFUNC("%s(%p, %p, %d)", __func__, this, buffer, bytes); AutoMutex lock(mLock); if (mStandby) { if (startInputStream()) return -EBUSY; } ret = pcm_read(mPcm, buffer, bytes); ALOGV("pcm_read(%p, %p, %d) returned %d", mPcm, buffer, bytes, ret); if (ret >= 0 && mDev.mMicMute) memset(buffer, 0, bytes); if (ret < 0) { ALOGE("pcm_read(%p, %p, %d) returned %d", mPcm, buffer, bytes, ret); usleep(bytes * 1000000 / audio_stream_frame_size(&audio_stream_in()->common) / mConfig.rate); } return bytes; }
static size_t out_get_buffer_size(const struct audio_stream *stream) { return pcm_config.period_size * audio_stream_frame_size((struct audio_stream *)stream); }
static ssize_t in_read(struct audio_stream_in *stream, void* buffer, size_t bytes) { //ALOGV("in_read bytes=%u", bytes); ssize_t frames_read = -1977; struct submix_stream_in *in = reinterpret_cast<struct submix_stream_in *>(stream); const size_t frame_size = audio_stream_frame_size(&stream->common); const size_t frames_to_read = bytes / frame_size; pthread_mutex_lock(&in->dev->lock); const bool output_standby_transition = (in->output_standby != in->dev->output_standby); in->output_standby = in->dev->output_standby; if (in->dev->input_standby || output_standby_transition) { in->dev->input_standby = false; // keep track of when we exit input standby (== first read == start "real recording") // or when we start recording silence, and reset projected time int rc = clock_gettime(CLOCK_MONOTONIC, &in->record_start_time); if (rc == 0) { in->read_counter_frames = 0; } } in->read_counter_frames += frames_to_read; size_t remaining_frames = frames_to_read; { // about to read from audio source sp<MonoPipeReader> source = in->dev->rsxSource.get(); if (source == 0) { ALOGE("no audio pipe yet we're trying to read!"); pthread_mutex_unlock(&in->dev->lock); usleep((bytes / frame_size) * 1000000 / in_get_sample_rate(&stream->common)); memset(buffer, 0, bytes); return bytes; } pthread_mutex_unlock(&in->dev->lock); // read the data from the pipe (it's non blocking) int attempts = 0; char* buff = (char*)buffer; while ((remaining_frames > 0) && (attempts < MAX_READ_ATTEMPTS)) { attempts++; frames_read = source->read(buff, remaining_frames, AudioBufferProvider::kInvalidPTS); if (frames_read > 0) { remaining_frames -= frames_read; buff += frames_read * frame_size; //ALOGV(" in_read (att=%d) got %ld frames, remaining=%u", // attempts, frames_read, remaining_frames); } else { //ALOGE(" in_read read returned %ld", frames_read); usleep(READ_ATTEMPT_SLEEP_MS * 1000); } } // done using the source pthread_mutex_lock(&in->dev->lock); source.clear(); pthread_mutex_unlock(&in->dev->lock); } if (remaining_frames > 0) { ALOGV(" remaining_frames = %d", remaining_frames); memset(((char*)buffer)+ bytes - (remaining_frames * frame_size), 0, remaining_frames * frame_size); } // compute how much we need to sleep after reading the data by comparing the wall clock with // the projected time at which we should return. struct timespec time_after_read;// wall clock after reading from the pipe struct timespec record_duration;// observed record duration int rc = clock_gettime(CLOCK_MONOTONIC, &time_after_read); const uint32_t sample_rate = in_get_sample_rate(&stream->common); if (rc == 0) { // for how long have we been recording? record_duration.tv_sec = time_after_read.tv_sec - in->record_start_time.tv_sec; record_duration.tv_nsec = time_after_read.tv_nsec - in->record_start_time.tv_nsec; if (record_duration.tv_nsec < 0) { record_duration.tv_sec--; record_duration.tv_nsec += 1000000000; } // read_counter_frames contains the number of frames that have been read since the beginning // of recording (including this call): it's converted to usec and compared to how long we've // been recording for, which gives us how long we must wait to sync the projected recording // time, and the observed recording time long projected_vs_observed_offset_us = ((int64_t)(in->read_counter_frames - (record_duration.tv_sec*sample_rate))) * 1000000 / sample_rate - (record_duration.tv_nsec / 1000); ALOGV(" record duration %5lds %3ldms, will wait: %7ldus", record_duration.tv_sec, record_duration.tv_nsec/1000000, projected_vs_observed_offset_us); if (projected_vs_observed_offset_us > 0) { usleep(projected_vs_observed_offset_us); } } ALOGV("in_read returns %d", bytes); return bytes; }
static size_t in_get_buffer_size(const struct audio_stream *stream) { struct stream_in *in = (struct stream_in *)stream; return in->pcm_config.period_size * audio_stream_frame_size(stream); }
static ssize_t out_write(struct audio_stream_out *stream, const void* buffer, size_t bytes) { //ALOGV("out_write(bytes=%d)", bytes); ssize_t written_frames = 0; struct submix_stream_out *out = reinterpret_cast<struct submix_stream_out *>(stream); const size_t frame_size = audio_stream_frame_size(&stream->common); const size_t frames = bytes / frame_size; pthread_mutex_lock(&out->dev->lock); out->dev->output_standby = false; sp<MonoPipe> sink = out->dev->rsxSink.get(); if (sink != 0) { if (sink->isShutdown()) { sink.clear(); pthread_mutex_unlock(&out->dev->lock); // the pipe has already been shutdown, this buffer will be lost but we must // simulate timing so we don't drain the output faster than realtime usleep(frames * 1000000 / out_get_sample_rate(&stream->common)); return bytes; } } else { pthread_mutex_unlock(&out->dev->lock); ALOGE("out_write without a pipe!"); ALOG_ASSERT("out_write without a pipe!"); return 0; } pthread_mutex_unlock(&out->dev->lock); written_frames = sink->write(buffer, frames); if (written_frames < 0) { if (written_frames == (ssize_t)NEGOTIATE) { ALOGE("out_write() write to pipe returned NEGOTIATE"); pthread_mutex_lock(&out->dev->lock); sink.clear(); pthread_mutex_unlock(&out->dev->lock); written_frames = 0; return 0; } else { // write() returned UNDERRUN or WOULD_BLOCK, retry ALOGE("out_write() write to pipe returned unexpected %16lx", written_frames); written_frames = sink->write(buffer, frames); } } pthread_mutex_lock(&out->dev->lock); sink.clear(); pthread_mutex_unlock(&out->dev->lock); if (written_frames < 0) { ALOGE("out_write() failed writing to pipe with %16lx", written_frames); return 0; } else { ALOGV("out_write() wrote %lu bytes)", written_frames * frame_size); return written_frames * frame_size; } }
static int adev_open_output_stream(struct audio_hw_device *dev, uint32_t devices, int *format, uint32_t *channels, uint32_t *sample_rate, struct audio_stream_out **stream_out) { struct adev_a2dp *adev = (struct adev_a2dp *)dev; struct astream_out *out; int ret; pthread_mutex_lock(&adev->lock); /* one output stream at a time */ if (adev->output) { LOGV("output exists"); ret = -EBUSY; goto err_output_exists; } out = calloc(1, sizeof(struct astream_out)); if (!out) { ret = -ENOMEM; goto err_alloc; } pthread_mutex_init(&out->lock, NULL); out->stream.common.get_sample_rate = out_get_sample_rate; out->stream.common.set_sample_rate = out_set_sample_rate; out->stream.common.get_buffer_size = out_get_buffer_size; out->stream.common.get_channels = out_get_channels; out->stream.common.get_format = out_get_format; out->stream.common.set_format = out_set_format; out->stream.common.standby = out_standby; out->stream.common.dump = out_dump; out->stream.common.set_parameters = out_set_parameters; out->stream.common.get_parameters = out_get_parameters; out->stream.common.set_device = out_set_device; out->stream.common.get_device = out_get_device; out->stream.common.add_audio_effect = out_add_audio_effect; out->stream.common.remove_audio_effect = out_remove_audio_effect; out->stream.get_latency = out_get_latency; out->stream.set_volume = out_set_volume; out->stream.write = out_write; out->stream.get_render_position = out_get_render_position; out->sample_rate = 44100; out->buffer_size = 512 * 20; out->channels = AUDIO_CHANNEL_OUT_STEREO; out->format = AUDIO_FORMAT_PCM_16_BIT; out->fd = -1; out->device = devices; out->bt_enabled = adev->bt_enabled; out->suspended = adev->suspended; /* for now, buffer_duration_us is precalculated and never changed. * if the sample rate or the format ever changes on the fly, we'd have * to recalculate this */ out->buffer_duration_us = ((out->buffer_size * 1000 ) / audio_stream_frame_size(&out->stream.common) / out->sample_rate) * 1000; if (!_out_validate_parms(out, format ? *format : 0, channels ? *channels : 0, sample_rate ? *sample_rate : 0)) { LOGV("invalid parameters"); ret = -EINVAL; goto err_validate_parms; } int err = pthread_create(&out->buf_thread, (const pthread_attr_t *) NULL, _out_buf_thread_func, out); if (err != 0) { goto err_validate_parms; } /* PCM format is always 16bit, stereo */ out->buf_size = (out->buffer_size * BUF_NUM_PERIODS) / sizeof(int32_t); out->buf = (uint32_t *)malloc(out->buf_size * sizeof(int32_t)); if (!out->buf) { goto err_validate_parms; } /* XXX: check return code? */ if (adev->bt_enabled) _out_init_locked(out, "00:00:00:00:00:00"); adev->output = out; if (format) *format = out->format; if (channels) *channels = out->channels; if (sample_rate) *sample_rate = out->sample_rate; pthread_mutex_unlock(&adev->lock); *stream_out = &out->stream; return 0; err_validate_parms: free(out); err_alloc: err_output_exists: pthread_mutex_unlock(&adev->lock); *stream_out = NULL; return ret; }