Пример #1
0
static ssize_t in_read(struct audio_stream_in *stream, void* buffer,
                       size_t bytes)
{
    /* XXX: fake timing for audio input */
    usleep(bytes * 1000000 / audio_stream_in_frame_size(stream) /
           in_get_sample_rate(&stream->common));
    return bytes;
}
Пример #2
0
static ssize_t in_read(struct audio_stream_in *stream, void *buffer,
                       size_t bytes)
{
    struct stream_in *in = (struct stream_in *)stream;
    struct audio_device *adev = in->dev;
    int i, ret = -1;

    ALOGV("%s enter",__func__);

    pthread_mutex_lock(&in->lock);
    if (in->standby) {
        pthread_mutex_lock(&adev->lock);
        ret = start_input_stream(in);
        pthread_mutex_unlock(&adev->lock);
        if (ret != 0) {
            goto exit;
        }
        in->standby = false;
    }

    if (in->pcm) {
        ret = pcm_read(in->pcm, buffer, bytes);
    }
    ALOGV("in_read returned %d bytes ret = %d",bytes,ret);

exit:
    pthread_mutex_unlock(&in->lock);

    if (ret != 0) {
        in_standby(&in->stream.common);
        uint64_t duration_ms = ((bytes * 1000)/
                                (audio_stream_frame_size(&in->stream.common)) /
                                (in_get_sample_rate(&in->stream.common)));
        ALOGV("%s : silence read - read failed", __func__);
        usleep(duration_ms * 1000);
    }
    ALOGV("%s exit",__func__);

    return bytes;
}
static ssize_t in_read(struct audio_stream_in *stream, void* buffer,
                       size_t bytes)
{
    //ALOGV("in_read bytes=%u", bytes);
    ssize_t frames_read = -1977;
    struct submix_stream_in *in = reinterpret_cast<struct submix_stream_in *>(stream);
    const size_t frame_size = audio_stream_frame_size(&stream->common);
    const size_t frames_to_read = bytes / frame_size;

    pthread_mutex_lock(&in->dev->lock);

    const bool output_standby_transition = (in->output_standby != in->dev->output_standby);
    in->output_standby = in->dev->output_standby;

    if (in->dev->input_standby || output_standby_transition) {
        in->dev->input_standby = false;
        // keep track of when we exit input standby (== first read == start "real recording")
        // or when we start recording silence, and reset projected time
        int rc = clock_gettime(CLOCK_MONOTONIC, &in->record_start_time);
        if (rc == 0) {
            in->read_counter_frames = 0;
        }
    }

    in->read_counter_frames += frames_to_read;
    size_t remaining_frames = frames_to_read;

    {
        // about to read from audio source
        sp<MonoPipeReader> source = in->dev->rsxSource.get();
        if (source == 0) {
            ALOGE("no audio pipe yet we're trying to read!");
            pthread_mutex_unlock(&in->dev->lock);
            usleep((bytes / frame_size) * 1000000 / in_get_sample_rate(&stream->common));
            memset(buffer, 0, bytes);
            return bytes;
        }

        pthread_mutex_unlock(&in->dev->lock);

        // read the data from the pipe (it's non blocking)
        int attempts = 0;
        char* buff = (char*)buffer;
        while ((remaining_frames > 0) && (attempts < MAX_READ_ATTEMPTS)) {
            attempts++;
            frames_read = source->read(buff, remaining_frames, AudioBufferProvider::kInvalidPTS);
            if (frames_read > 0) {
                remaining_frames -= frames_read;
                buff += frames_read * frame_size;
                //ALOGV("  in_read (att=%d) got %ld frames, remaining=%u",
                //      attempts, frames_read, remaining_frames);
            } else {
                //ALOGE("  in_read read returned %ld", frames_read);
                usleep(READ_ATTEMPT_SLEEP_MS * 1000);
            }
        }
        // done using the source
        pthread_mutex_lock(&in->dev->lock);
        source.clear();
        pthread_mutex_unlock(&in->dev->lock);
    }

    if (remaining_frames > 0) {
        ALOGV("  remaining_frames = %d", remaining_frames);
        memset(((char*)buffer)+ bytes - (remaining_frames * frame_size), 0,
                remaining_frames * frame_size);
    }

    // compute how much we need to sleep after reading the data by comparing the wall clock with
    //   the projected time at which we should return.
    struct timespec time_after_read;// wall clock after reading from the pipe
    struct timespec record_duration;// observed record duration
    int rc = clock_gettime(CLOCK_MONOTONIC, &time_after_read);
    const uint32_t sample_rate = in_get_sample_rate(&stream->common);
    if (rc == 0) {
        // for how long have we been recording?
        record_duration.tv_sec  = time_after_read.tv_sec - in->record_start_time.tv_sec;
        record_duration.tv_nsec = time_after_read.tv_nsec - in->record_start_time.tv_nsec;
        if (record_duration.tv_nsec < 0) {
            record_duration.tv_sec--;
            record_duration.tv_nsec += 1000000000;
        }

        // read_counter_frames contains the number of frames that have been read since the beginning
        // of recording (including this call): it's converted to usec and compared to how long we've
        // been recording for, which gives us how long we must wait to sync the projected recording
        // time, and the observed recording time
        long projected_vs_observed_offset_us =
                ((int64_t)(in->read_counter_frames
                            - (record_duration.tv_sec*sample_rate)))
                        * 1000000 / sample_rate
                - (record_duration.tv_nsec / 1000);

        ALOGV("  record duration %5lds %3ldms, will wait: %7ldus",
                record_duration.tv_sec, record_duration.tv_nsec/1000000,
                projected_vs_observed_offset_us);
        if (projected_vs_observed_offset_us > 0) {
            usleep(projected_vs_observed_offset_us);
        }
    }


    ALOGV("in_read returns %d", bytes);
    return bytes;

}