コード例 #1
0
size_t LibmediaPlayback::FillBuffer(void* data, size_t size) {
  CHECK(data);
  if (in_file_) {
    int frames_read = 0;
    int frame_size = audio_bytes_per_sample(audio_format_) * num_channels_;
    int sample_size = audio_bytes_per_sample(audio_format_);
    if (sample_size == 1) {
      void* temp = malloc(size * sizeof(short));
      frames_read = sf_readf_short(in_file_, reinterpret_cast<short*>(temp),
                                   size / frame_size);
      int num_samples = frames_read * frame_size / sample_size;
      memcpy_to_u8_from_i16(reinterpret_cast<uint8_t*>(data),
                            reinterpret_cast<int16_t*>(temp), num_samples);
      free(temp);
    } else if (sample_size == 2) {
      frames_read = sf_readf_short(in_file_, reinterpret_cast<short*>(data),
                                   size / frame_size);
    } else if (sample_size == 4) {
      frames_read = sf_readf_int(in_file_, reinterpret_cast<int*>(data),
                                 size / frame_size);
    } else {
      LOG(ERROR) << "Could not handle file with sample size = " << sample_size;
      return 0;
    }
    size = frame_size * frames_read;
  } else {
    size = (size < sine_data_buffer_->size()) ? size :
        sine_data_buffer_->size();
    memcpy(data, sine_data_buffer_->data(), size);
  }
  return size;
}
コード例 #2
0
size_t AudioRecord::frameSize() const
{
    if (inputSource() == AUDIO_SOURCE_VOICE_COMMUNICATION) {
        if (audio_is_linear_pcm(mFormat)) {
            return channelCount()*audio_bytes_per_sample(mFormat);
        } else {
            return channelCount()*sizeof(int16_t);
        }
    } else {
        if (format() ==AUDIO_FORMAT_AMR_NB) {
            return channelCount() * AMR_FRAMESIZE; // Full rate framesize
        } else if (format() == AUDIO_FORMAT_EVRC) {
            return channelCount() * EVRC_FRAMESIZE; // Full rate framesize
        } else if (format() == AUDIO_FORMAT_QCELP) {
           return channelCount() * QCELP_FRAMESIZE; // Full rate framesize
        } else if (format() == AUDIO_FORMAT_AAC) {
            // Not actual framsize but for variable frame rate AAC encoding,
            // buffer size is treated as a frame size
            return AAC_FRAMESIZE;
        } else if(format() == AUDIO_FORMAT_AMR_WB) {
            return channelCount() * AMR_WB_FRAMESIZE;
        }
        if (audio_is_linear_pcm(mFormat)) {
            return channelCount()*audio_bytes_per_sample(mFormat);
        } else {
            return sizeof(uint8_t);
        }
   }
}
コード例 #3
0
bool AudioPort::isBetterFormatMatch(audio_format_t newFormat,
                                    audio_format_t currentFormat,
                                    audio_format_t targetFormat)
{
    if (newFormat == currentFormat) {
        return false;
    }
    if (currentFormat == AUDIO_FORMAT_INVALID) {
        return true;
    }
    if (newFormat == targetFormat) {
        return true;
    }
    int currentDiffBytes = (int)audio_bytes_per_sample(targetFormat) -
            audio_bytes_per_sample(currentFormat);
    int newDiffBytes = (int)audio_bytes_per_sample(targetFormat) -
            audio_bytes_per_sample(newFormat);

    if (abs(newDiffBytes) < abs(currentDiffBytes)) {
        return true;
    } else if (abs(newDiffBytes) == abs(currentDiffBytes)) {
        return (newDiffBytes >= 0);
    }
    return false;
}
コード例 #4
0
ReformatBufferProvider::ReformatBufferProvider(int32_t channelCount,
        audio_format_t inputFormat, audio_format_t outputFormat,
        size_t bufferFrameCount) :
        CopyBufferProvider(
                channelCount * audio_bytes_per_sample(inputFormat),
                channelCount * audio_bytes_per_sample(outputFormat),
                bufferFrameCount),
        mChannelCount(channelCount),
        mInputFormat(inputFormat),
        mOutputFormat(outputFormat)
{
    ALOGV("ReformatBufferProvider(%p)(%u, %#x, %#x)",
            this, channelCount, inputFormat, outputFormat);
}
コード例 #5
0
// static
status_t AudioRecord::getMinFrameCount(
        int* frameCount,
        uint32_t sampleRate,
        int format,
        int channelCount)
{
    size_t size = 0;
    if (AudioSystem::getInputBufferSize(sampleRate, format, channelCount, &size)
            != NO_ERROR) {
        LOGE("AudioSystem could not query the input buffer size.");
        return NO_INIT;
    }

    if (size == 0) {
        LOGE("Unsupported configuration: sampleRate %d, format %d, channelCount %d",
            sampleRate, format, channelCount);
        return BAD_VALUE;
    }

    // We double the size of input buffer for ping pong use of record buffer.
    size <<= 1;

    if (audio_is_linear_pcm(format)) {
        size /= channelCount * audio_bytes_per_sample(format);
    }

    *frameCount = size;
    return NO_ERROR;
}
// static
status_t AudioRecord::getMinFrameCount(
        size_t* frameCount,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask)
{
    if (frameCount == NULL) {
        return BAD_VALUE;
    }

    size_t size;
    status_t status = AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &size);
    if (status != NO_ERROR) {
        ALOGE("AudioSystem could not query the input buffer size for sampleRate %u, format %#x, "
              "channelMask %#x; status %d", sampleRate, format, channelMask, status);
        return status;
    }

    // handle non-linear-pcm formats and update frameCount
    if (!audio_is_linear_pcm(format)) {
        *frameCount = (size * 2) / sizeof(uint8_t);
        return NO_ERROR;
    }

    // We double the size of input buffer for ping pong use of record buffer.
    // Assumes audio_is_linear_pcm(format)
    if ((*frameCount = (size * 2) / (audio_channel_count_from_in_mask(channelMask) *
            audio_bytes_per_sample(format))) == 0) {
        ALOGE("Unsupported configuration: sampleRate %u, format %#x, channelMask %#x",
            sampleRate, format, channelMask);
        return BAD_VALUE;
    }

    return NO_ERROR;
}
コード例 #7
0
static size_t audio_hw_get_input_buffer_size(const struct audio_hw_device *dev,
	                                     const struct audio_config *config)
{
	struct tinyalsa_audio_device *device;
	struct tinyalsa_mixer_io_props *mixer_props;
	size_t size;

        int channel_count = popcount(config->channel_mask);

	ALOGD("%s(%p, %d, %d, %d)++", __func__, dev, config->sample_rate, config->format, channel_count);

	if(dev == NULL)
		return -1;

	device = (struct tinyalsa_audio_device *) dev;

	if(device->mixer == NULL)
		return -1;

	mixer_props = tinyalsa_mixer_get_input_props(device->mixer);
	if(mixer_props == NULL)
		return -1;

	// Default value
	if(mixer_props->rate == 0)
		mixer_props->rate = 44100;

	size = (mixer_props->period_size * config->sample_rate) / mixer_props->rate;
	size = ((size + 15) / 16) * 16;
	size = size * channel_count * audio_bytes_per_sample(config->format);

	ALOGD("%s(%p, %d, %d, %d)--", __func__, dev, config->sample_rate, config->format, channel_count);

	return size;
}
コード例 #8
0
int AudioTrack::frameSize() const
{
    if (audio_is_linear_pcm(mFormat)) {
        return channelCount()*audio_bytes_per_sample(mFormat);
    } else {
        return sizeof(uint8_t);
    }
}
コード例 #9
0
ファイル: audio_hw.c プロジェクト: LeMaker/android-actions
static ssize_t out_write(struct audio_stream_out *stream, const void* buffer, size_t bytes)
{
    int ret;
    struct stream_out *out = (struct stream_out *)stream;

    pthread_mutex_lock(&out->dev->lock);
    pthread_mutex_lock(&out->lock);
    if (out->standby) {
        ret = start_output_stream(out);
        if (ret != 0) {
            pthread_mutex_unlock(&out->dev->lock);
            goto err;
        }
        out->standby = false;
    }
    pthread_mutex_unlock(&out->dev->lock);

    alsa_device_proxy* proxy = &out->proxy;
    const void * write_buff = buffer;
    int num_write_buff_bytes = bytes;
    const int num_device_channels = proxy_get_channel_count(proxy); /* what we told alsa */
    const int num_req_channels = out->hal_channel_count; /* what we told AudioFlinger */
    if (num_device_channels != num_req_channels) {
        /* allocate buffer */
        const size_t required_conversion_buffer_size =
                 bytes * num_device_channels / num_req_channels;
        if (required_conversion_buffer_size > out->conversion_buffer_size) {
            out->conversion_buffer_size = required_conversion_buffer_size;
            out->conversion_buffer = realloc(out->conversion_buffer,
                                             out->conversion_buffer_size);
        }
        /* convert data */
        const audio_format_t audio_format = out_get_format(&(out->stream.common));
        const unsigned sample_size_in_bytes = audio_bytes_per_sample(audio_format);
        num_write_buff_bytes =
                adjust_channels(write_buff, num_req_channels,
                                out->conversion_buffer, num_device_channels,
                                sample_size_in_bytes, num_write_buff_bytes);
        write_buff = out->conversion_buffer;
    }

    if (write_buff != NULL && num_write_buff_bytes != 0) {
        proxy_write(&out->proxy, write_buff, num_write_buff_bytes);
    }

    pthread_mutex_unlock(&out->lock);

    return bytes;

err:
    pthread_mutex_unlock(&out->lock);
    if (ret != 0) {
        usleep(bytes * 1000000 / audio_stream_out_frame_size(stream) /
               out_get_sample_rate(&stream->common));
    }

    return bytes;
}
コード例 #10
0
RemixBufferProvider::RemixBufferProvider(audio_channel_mask_t inputChannelMask,
        audio_channel_mask_t outputChannelMask, audio_format_t format,
        size_t bufferFrameCount) :
        CopyBufferProvider(
                audio_bytes_per_sample(format)
                    * audio_channel_count_from_out_mask(inputChannelMask),
                audio_bytes_per_sample(format)
                    * audio_channel_count_from_out_mask(outputChannelMask),
                bufferFrameCount),
        mFormat(format),
        mSampleSize(audio_bytes_per_sample(format)),
        mInputChannels(audio_channel_count_from_out_mask(inputChannelMask)),
        mOutputChannels(audio_channel_count_from_out_mask(outputChannelMask))
{
    ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %zu %zu",
            this, format, inputChannelMask, outputChannelMask,
            mInputChannels, mOutputChannels);
    (void) memcpy_by_index_array_initialization_from_channel_mask(
            mIdxAry, ARRAY_SIZE(mIdxAry), outputChannelMask, inputChannelMask);
}
コード例 #11
0
ファイル: PowerLog.cpp プロジェクト: MIPS/system-media
void PowerLog::log(const void *buffer, size_t frames, int64_t nowNs)
{
    std::lock_guard<std::mutex> guard(mLock);

    const size_t bytes_per_sample = audio_bytes_per_sample(mFormat);
    while (frames > 0) {
        // check partial computation
        size_t required = mFramesPerEntry - mCurrentFrames;
        size_t process = std::min(required, frames);

        if (mCurrentTime == 0) {
            mCurrentTime = nowNs;
        }
        mCurrentEnergy +=
                audio_utils_compute_energy_mono(buffer, mFormat, process * mChannelCount);
        mCurrentFrames += process;

        ALOGV("nowNs:%lld, required:%zu, process:%zu, mCurrentEnergy:%f, mCurrentFrames:%zu",
                (long long)nowNs, required, process, mCurrentEnergy, mCurrentFrames);
        if (process < required) {
            return;
        }

        // We store the data as normalized energy per sample. The energy sequence is
        // zero terminated. Consecutive zeroes are ignored.
        if (mCurrentEnergy == 0.f) {
            if (mConsecutiveZeroes++ == 0) {
                mEntries[mIdx++] = std::make_pair(nowNs, 0.f);
                // zero terminate the signal sequence.
            }
        } else {
            mConsecutiveZeroes = 0;
            mEntries[mIdx++] = std::make_pair(mCurrentTime, mCurrentEnergy);
            ALOGV("writing %lld %f", (long long)mCurrentTime, mCurrentEnergy);
        }
        if (mIdx >= mEntries.size()) {
            mIdx -= mEntries.size();
        }
        mCurrentTime = 0;
        mCurrentEnergy = 0;
        mCurrentFrames = 0;
        frames -= process;
        buffer = (const uint8_t *)buffer + mCurrentFrames * mChannelCount * bytes_per_sample;
    }
}
コード例 #12
0
// static
status_t AudioRecord::getMinFrameCount(
        size_t* frameCount,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask)
{
    if (frameCount == NULL) {
        return BAD_VALUE;
    }

    // default to 0 in case of error
    *frameCount = 0;

    size_t size = 0;
    status_t status = AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &size);
    if (status != NO_ERROR) {
        ALOGE("AudioSystem could not query the input buffer size; status %d", status);
        return NO_INIT;
    }

    if (size == 0) {
        ALOGE("Unsupported configuration: sampleRate %u, format %d, channelMask %#x",
            sampleRate, format, channelMask);
        return BAD_VALUE;
    }

    // We double the size of input buffer for ping pong use of record buffer.
    size <<= 1;
    uint32_t channelCount = popcount(channelMask);
#ifdef QCOM_HARDWARE
    if (audio_is_linear_pcm(format))
#endif
        size /= channelCount * audio_bytes_per_sample(format);
#ifdef QCOM_HARDWARE
    else
        size /= sizeof(uint8_t);
#endif

    *frameCount = size;
    return NO_ERROR;
}
コード例 #13
0
// Play audio from a WAV file.
//
// Parameters:
//   out_stream: A pointer to the output audio stream.
//   in_file: A pointer to a SNDFILE object.
//   config: A pointer to struct that contains audio configuration data.
//
// Returns: An int which has a non-negative number on success.
int PlayFile(audio_stream_out_t* out_stream, SNDFILE* in_file,
             audio_config_t* config) {
  size_t buffer_size = out_stream->common.get_buffer_size(&out_stream->common);
  size_t kFrameSize =
      audio_bytes_per_sample(kAudioPlaybackFormat) *
      audio_channel_count_from_out_mask(config->channel_mask);
  short* data = new short[buffer_size / kFrameSize];
  int rc = 0;
  sf_count_t frames_read = 1;
  while (frames_read != 0) {
    size_t bytes_wanted =
        out_stream->common.get_buffer_size(&out_stream->common);
    frames_read = sf_readf_short(in_file, data, bytes_wanted / kFrameSize);
    rc = out_stream->write(out_stream, data, frames_read * kFrameSize);
    if (rc < 0) {
      LOG(ERROR) << "Writing data to hal failed. (" << strerror(rc) << ")";
      break;
    }
  }
  return rc;
}
コード例 #14
0
TimestretchBufferProvider::TimestretchBufferProvider(int32_t channelCount,
        audio_format_t format, uint32_t sampleRate, const AudioPlaybackRate &playbackRate) :
        mChannelCount(channelCount),
        mFormat(format),
        mSampleRate(sampleRate),
        mFrameSize(channelCount * audio_bytes_per_sample(format)),
        mLocalBufferFrameCount(0),
        mLocalBufferData(NULL),
        mRemaining(0),
        mSonicStream(sonicCreateStream(sampleRate, mChannelCount)),
        mFallbackFailErrorShown(false),
        mAudioPlaybackRateValid(false)
{
    LOG_ALWAYS_FATAL_IF(mSonicStream == NULL,
            "TimestretchBufferProvider can't allocate Sonic stream");

    setPlaybackRate(playbackRate);
    ALOGV("TimestretchBufferProvider(%p)(%u, %#x, %u %f %f %d %d)",
            this, channelCount, format, sampleRate, playbackRate.mSpeed,
            playbackRate.mPitch, playbackRate.mStretchMode, playbackRate.mFallbackMode);
    mBuffer.frameCount = 0;
}
コード例 #15
0
ファイル: format.c プロジェクト: 602147629/PlanetWar
void memcpy_by_audio_format(void *dst, audio_format_t dst_format,
        const void *src, audio_format_t src_format, size_t count)
{
    /* default cases for error falls through to fatal log below. */
    if (dst_format == src_format) {
        switch (dst_format) {
        case AUDIO_FORMAT_PCM_16_BIT:
        case AUDIO_FORMAT_PCM_FLOAT:
        case AUDIO_FORMAT_PCM_8_BIT:
        case AUDIO_FORMAT_PCM_24_BIT_PACKED:
        case AUDIO_FORMAT_PCM_32_BIT:
        case AUDIO_FORMAT_PCM_8_24_BIT:
            memcpy(dst, src, count * audio_bytes_per_sample(dst_format));
            return;
        default:
            break;
        }
    }
    switch (dst_format) {
    case AUDIO_FORMAT_PCM_16_BIT:
        switch (src_format) {
        case AUDIO_FORMAT_PCM_FLOAT:
            memcpy_to_i16_from_float((int16_t*)dst, (float*)src, count);
            return;
        case AUDIO_FORMAT_PCM_8_BIT:
            memcpy_to_i16_from_u8((int16_t*)dst, (uint8_t*)src, count);
            return;
        case AUDIO_FORMAT_PCM_24_BIT_PACKED:
            memcpy_to_i16_from_p24((int16_t*)dst, (uint8_t*)src, count);
            return;
        case AUDIO_FORMAT_PCM_32_BIT:
            memcpy_to_i16_from_i32((int16_t*)dst, (int32_t*)src, count);
            return;
        case AUDIO_FORMAT_PCM_8_24_BIT:
            memcpy_to_i16_from_q8_23((int16_t*)dst, (int32_t*)src, count);
            return;
        default:
            break;
        }
        break;
    case AUDIO_FORMAT_PCM_FLOAT:
        switch (src_format) {
        case AUDIO_FORMAT_PCM_16_BIT:
            memcpy_to_float_from_i16((float*)dst, (int16_t*)src, count);
            return;
        case AUDIO_FORMAT_PCM_8_BIT:
            memcpy_to_float_from_u8((float*)dst, (uint8_t*)src, count);
            return;
        case AUDIO_FORMAT_PCM_24_BIT_PACKED:
            memcpy_to_float_from_p24((float*)dst, (uint8_t*)src, count);
            return;
        case AUDIO_FORMAT_PCM_32_BIT:
            memcpy_to_float_from_i32((float*)dst, (int32_t*)src, count);
            return;
        case AUDIO_FORMAT_PCM_8_24_BIT:
            memcpy_to_float_from_q8_23((float*)dst, (int32_t*)src, count);
            return;
        default:
            break;
        }
        break;
    case AUDIO_FORMAT_PCM_8_BIT:
        switch (src_format) {
        case AUDIO_FORMAT_PCM_16_BIT:
            memcpy_to_u8_from_i16((uint8_t*)dst, (int16_t*)src, count);
            return;
        case AUDIO_FORMAT_PCM_FLOAT:
            memcpy_to_u8_from_float((uint8_t*)dst, (float*)src, count);
            return;
        default:
            break;
        }
        break;
    case AUDIO_FORMAT_PCM_24_BIT_PACKED:
        switch (src_format) {
        case AUDIO_FORMAT_PCM_16_BIT:
            memcpy_to_p24_from_i16((uint8_t*)dst, (int16_t*)src, count);
            return;
        case AUDIO_FORMAT_PCM_FLOAT:
            memcpy_to_p24_from_float((uint8_t*)dst, (float*)src, count);
            return;
        default:
            break;
        }
        break;
    case AUDIO_FORMAT_PCM_32_BIT:
        switch (src_format) {
        case AUDIO_FORMAT_PCM_16_BIT:
            memcpy_to_i32_from_i16((int32_t*)dst, (int16_t*)src, count);
            return;
        case AUDIO_FORMAT_PCM_FLOAT:
            memcpy_to_i32_from_float((int32_t*)dst, (float*)src, count);
            return;
        default:
            break;
        }
        break;
    case AUDIO_FORMAT_PCM_8_24_BIT:
        switch (src_format) {
        case AUDIO_FORMAT_PCM_16_BIT:
            memcpy_to_q8_23_from_i16((int32_t*)dst, (int16_t*)src, count);
            return;
        case AUDIO_FORMAT_PCM_FLOAT:
            memcpy_to_q8_23_from_float_with_clamp((int32_t*)dst, (float*)src, count);
            return;
        case AUDIO_FORMAT_PCM_24_BIT_PACKED: {
            memcpy_to_q8_23_from_p24((int32_t *)dst, (uint8_t *)src, count);
            return;
        }
        default:
            break;
        }
        break;
    default:
        break;
    }
    LOG_ALWAYS_FATAL("invalid src format %#x for dst format %#x",
            src_format, dst_format);
}
コード例 #16
0
ファイル: AudioRecord.cpp プロジェクト: aurorarom/JsonUtil
status_t AudioRecord::set(
        audio_source_t inputSource,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t frameCount,
        callback_t cbf,
        void* user,
        uint32_t notificationFrames,
        bool threadCanCallJava,
        int sessionId,
        transfer_type transferType,
        audio_input_flags_t flags)
{
    ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
          "notificationFrames %u, sessionId %d, transferType %d, flags %#x",
          inputSource, sampleRate, format, channelMask, frameCount, notificationFrames,
          sessionId, transferType, flags);

    switch (transferType) {
    case TRANSFER_DEFAULT:
        if (cbf == NULL || threadCanCallJava) {
            transferType = TRANSFER_SYNC;
        } else {
            transferType = TRANSFER_CALLBACK;
        }
        break;
    case TRANSFER_CALLBACK:
        if (cbf == NULL) {
            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL");
            return BAD_VALUE;
        }
        break;
    case TRANSFER_OBTAIN:
    case TRANSFER_SYNC:
        break;
    default:
        ALOGE("Invalid transfer type %d", transferType);
        return BAD_VALUE;
    }
    mTransfer = transferType;

    AutoMutex lock(mLock);

    // invariant that mAudioRecord != 0 is true only after set() returns successfully
    if (mAudioRecord != 0) {
        ALOGE("Track already in use");
        return INVALID_OPERATION;
    }

    // handle default values first.
    if (inputSource == AUDIO_SOURCE_DEFAULT) {
        inputSource = AUDIO_SOURCE_MIC;
    }
    mInputSource = inputSource;

    if (sampleRate == 0) {
        ALOGE("Invalid sample rate %u", sampleRate);
        return BAD_VALUE;
    }
    mSampleRate = sampleRate;

    // these below should probably come from the audioFlinger too...
    if (format == AUDIO_FORMAT_DEFAULT) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }

    // validate parameters
    if (!audio_is_valid_format(format)) {
        ALOGE("Invalid format %#x", format);
        return BAD_VALUE;
    }
    // Temporary restriction: AudioFlinger currently supports 16-bit PCM only
    if (format != AUDIO_FORMAT_PCM_16_BIT) {
        ALOGE("Format %#x is not supported", format);
        return BAD_VALUE;
    }
    mFormat = format;

    if (!audio_is_input_channel(channelMask)) {
        ALOGE("Invalid channel mask %#x", channelMask);
        return BAD_VALUE;
    }
    mChannelMask = channelMask;
    uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
    mChannelCount = channelCount;

    if (audio_is_linear_pcm(format)) {
        mFrameSize = channelCount * audio_bytes_per_sample(format);
    } else {
        mFrameSize = sizeof(uint8_t);
    }

    // mFrameCount is initialized in openRecord_l
    mReqFrameCount = frameCount;

    mNotificationFramesReq = notificationFrames;
    // mNotificationFramesAct is initialized in openRecord_l

    if (sessionId == AUDIO_SESSION_ALLOCATE) {
        mSessionId = AudioSystem::newAudioUniqueId();
    } else {
        mSessionId = sessionId;
    }
    ALOGV("set(): mSessionId %d", mSessionId);

    mFlags = flags;
    mCbf = cbf;

    if (cbf != NULL) {
        mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
        mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
    }

    // create the IAudioRecord
    status_t status = openRecord_l(0 /*epoch*/);

    if (status != NO_ERROR) {
        if (mAudioRecordThread != 0) {
            mAudioRecordThread->requestExit();   // see comment in AudioRecord.h
            mAudioRecordThread->requestExitAndWait();
            mAudioRecordThread.clear();
        }
        return status;
    }

    mStatus = NO_ERROR;
    mActive = false;
    mUserData = user;
    // TODO: add audio hardware input latency here
    mLatency = (1000*mFrameCount) / sampleRate;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    AudioSystem::acquireAudioSessionId(mSessionId, -1);
    mSequence = 1;
    mObservedSequence = mSequence;
    mInOverrun = false;

    return NO_ERROR;
}
コード例 #17
0
/* TODO mutex stuff here (see out_write) */
static ssize_t in_read(struct audio_stream_in *stream, void* buffer, size_t bytes)
{
    size_t num_read_buff_bytes = 0;
    void * read_buff = buffer;
    void * out_buff = buffer;
    int ret = 0;

    struct stream_in * in = (struct stream_in *)stream;

    lock_input_stream(in);
    if (in->standby) {
        pthread_mutex_lock(&in->dev->lock);
        ret = start_input_stream(in);
        pthread_mutex_unlock(&in->dev->lock);
        if (ret != 0) {
            goto err;
        }
        in->standby = false;
    }

    alsa_device_profile * profile = in->profile;

    /*
     * OK, we need to figure out how much data to read to be able to output the requested
     * number of bytes in the HAL format (16-bit, stereo).
     */
    num_read_buff_bytes = bytes;
    int num_device_channels = proxy_get_channel_count(&in->proxy); /* what we told Alsa */
    int num_req_channels = in->hal_channel_count; /* what we told AudioFlinger */

    if (num_device_channels != num_req_channels) {
        num_read_buff_bytes = (num_device_channels * num_read_buff_bytes) / num_req_channels;
    }

    /* Setup/Realloc the conversion buffer (if necessary). */
    if (num_read_buff_bytes != bytes) {
        if (num_read_buff_bytes > in->conversion_buffer_size) {
            /*TODO Remove this when AudioPolicyManger/AudioFlinger support arbitrary formats
              (and do these conversions themselves) */
            in->conversion_buffer_size = num_read_buff_bytes;
            in->conversion_buffer = realloc(in->conversion_buffer, in->conversion_buffer_size);
        }
        read_buff = in->conversion_buffer;
    }

    ret = proxy_read(&in->proxy, read_buff, num_read_buff_bytes);
    if (ret == 0) {
        if (num_device_channels != num_req_channels) {
            // ALOGV("chans dev:%d req:%d", num_device_channels, num_req_channels);

            out_buff = buffer;
            /* Num Channels conversion */
            if (num_device_channels != num_req_channels) {
                audio_format_t audio_format = in_get_format(&(in->stream.common));
                unsigned sample_size_in_bytes = audio_bytes_per_sample(audio_format);

                num_read_buff_bytes =
                    adjust_channels(read_buff, num_device_channels,
                                    out_buff, num_req_channels,
                                    sample_size_in_bytes, num_read_buff_bytes);
            }
        }

        /* no need to acquire in->dev->lock to read mic_muted here as we don't change its state */
        if (num_read_buff_bytes > 0 && in->dev->mic_muted)
            memset(buffer, 0, num_read_buff_bytes);
    } else {
        num_read_buff_bytes = 0; // reset the value after headset is unplugged
    }

err:
    pthread_mutex_unlock(&in->lock);

    return num_read_buff_bytes;
}
コード例 #18
0
status_t AudioRecord::set(
        audio_source_t inputSource,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        int frameCountInt,
        callback_t cbf,
        void* user,
        int notificationFrames,
        bool threadCanCallJava,
        int sessionId,
        transfer_type transferType,
        audio_input_flags_t flags)
{
    ALOGV("sampleRate %u, channelMask %#x, format %d", sampleRate, channelMask, format);
    ALOGV("inputSource %d", inputSource);
    switch (transferType) {
    case TRANSFER_DEFAULT:
        if (cbf == NULL || threadCanCallJava) {
            transferType = TRANSFER_SYNC;
        } else {
            transferType = TRANSFER_CALLBACK;
        }
        break;
    case TRANSFER_CALLBACK:
        if (cbf == NULL) {
            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL");
            return BAD_VALUE;
        }
        break;
    case TRANSFER_OBTAIN:
    case TRANSFER_SYNC:
        break;
    default:
        ALOGE("Invalid transfer type %d", transferType);
        return BAD_VALUE;
    }
    mTransfer = transferType;

    // FIXME "int" here is legacy and will be replaced by size_t later
    if (frameCountInt < 0) {
        ALOGE("Invalid frame count %d", frameCountInt);
        return BAD_VALUE;
    }
    size_t frameCount = frameCountInt;

    ALOGV("set(): sampleRate %u, channelMask %#x, frameCount %u", sampleRate, channelMask,
            frameCount);

    AutoMutex lock(mLock);

    if (mAudioRecord != 0) {
        ALOGE("Track already in use");
        return INVALID_OPERATION;
    }

    if (inputSource == AUDIO_SOURCE_DEFAULT) {
        inputSource = AUDIO_SOURCE_MIC;
    }
    mInputSource = inputSource;

    if (sampleRate == 0) {
        ALOGE("Invalid sample rate %u", sampleRate);
        return BAD_VALUE;
    }
    mSampleRate = sampleRate;

    // these below should probably come from the audioFlinger too...
    if (format == AUDIO_FORMAT_DEFAULT) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }

    // validate parameters
    if (!audio_is_valid_format(format)) {
        ALOGE("Invalid format %d", format);
        return BAD_VALUE;
    }
#if defined(QCOM_HARDWARE) && !defined(QCOM_DIRECTTRACK)
    if (format != AUDIO_FORMAT_PCM_16_BIT &&
           !audio_is_compress_voip_format(format) &&
           !audio_is_compress_capture_format(format)) {
#else
#ifndef QCOM_DIRECTTRACK
    // Temporary restriction: AudioFlinger currently supports 16-bit PCM only
    if (format != AUDIO_FORMAT_PCM_16_BIT) {
#endif
#endif
#ifndef QCOM_DIRECTTRACK
        ALOGE("Format %d is not supported", format);
        return BAD_VALUE;
    }
#endif

    mFormat = format;

    if (!audio_is_input_channel(channelMask)) {
        ALOGE("Invalid channel mask %#x", channelMask);
        return BAD_VALUE;
    }
    mChannelMask = channelMask;
    uint32_t channelCount = popcount(channelMask);
    mChannelCount = channelCount;

#ifdef QCOM_DIRECTTRACK
    mFrameSize = frameSize();

    size_t inputBuffSizeInBytes = -1;
    status_t status = AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &inputBuffSizeInBytes);
    if (status != NO_ERROR) {
        ALOGE("AudioSystem could not query the input buffer size; status %d", status);
        return NO_INIT;
    }

    if (inputBuffSizeInBytes == 0) {
        ALOGE("Unsupported configuration: sampleRate %u, format %d, channelMask %#x",
            sampleRate, format, channelMask);
        return BAD_VALUE;
    }

    int minFrameCount = (inputBuffSizeInBytes * 2)/mFrameSize;
#else
    // Assumes audio_is_linear_pcm(format), else sizeof(uint8_t)
#ifdef QCOM_HARDWARE
    if (audio_is_linear_pcm(format))
        mFrameSize = channelCount * audio_bytes_per_sample(format);
    else
        mFrameSize = sizeof(uint8_t);
#else
    mFrameSize = channelCount * audio_bytes_per_sample(format);
#endif

    // validate framecount
    size_t minFrameCount = 0;
    status_t status = AudioRecord::getMinFrameCount(&minFrameCount,
            sampleRate, format, channelMask);
    if (status != NO_ERROR) {
        ALOGE("getMinFrameCount() failed; status %d", status);
        return status;
    }
#endif

    ALOGV("AudioRecord::set() minFrameCount = %d", minFrameCount);

    if (frameCount == 0) {
        frameCount = minFrameCount;
    } else if (frameCount < minFrameCount) {
        ALOGE("frameCount %u < minFrameCount %u", frameCount, minFrameCount);
        return BAD_VALUE;
    }
    mFrameCount = frameCount;

    mNotificationFramesReq = notificationFrames;
    mNotificationFramesAct = 0;

    if (sessionId == 0 ) {
        mSessionId = AudioSystem::newAudioSessionId();
    } else {
        mSessionId = sessionId;
    }
    ALOGV("set(): mSessionId %d", mSessionId);

    mFlags = flags;

    // create the IAudioRecord
    status = openRecord_l(0 /*epoch*/);
    if (status) {
        return status;
    }

    if (cbf != NULL) {
        mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
        mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
    }

    mStatus = NO_ERROR;

    // Update buffer size in case it has been limited by AudioFlinger during track creation
    mFrameCount = mCblk->frameCount_;

    mActive = false;
    mCbf = cbf;
    mRefreshRemaining = true;
    mUserData = user;
    // TODO: add audio hardware input latency here
    mLatency = (1000*mFrameCount) / sampleRate;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    AudioSystem::acquireAudioSessionId(mSessionId);
    mSequence = 1;
    mObservedSequence = mSequence;
    mInOverrun = false;

    return NO_ERROR;
}

#ifdef QCOM_DIRECTTRACK
audio_source_t AudioRecord::inputSource() const
{
    return mInputSource;
}
#endif

// -------------------------------------------------------------------------

status_t AudioRecord::start(AudioSystem::sync_event_t event, int triggerSession)
{
    ALOGV("start, sync event %d trigger session %d", event, triggerSession);

    AutoMutex lock(mLock);
    if (mActive) {
        return NO_ERROR;
    }

    // reset current position as seen by client to 0
    mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());

    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
    int32_t flags = android_atomic_acquire_load(&mCblk->mFlags);

    status_t status = NO_ERROR;
    if (!(flags & CBLK_INVALID)) {
        ALOGV("mAudioRecord->start()");
        status = mAudioRecord->start(event, triggerSession);
        if (status == DEAD_OBJECT) {
            flags |= CBLK_INVALID;
        }
    }
    if (flags & CBLK_INVALID) {
        status = restoreRecord_l("start");
    }

    if (status != NO_ERROR) {
        ALOGE("start() status %d", status);
    } else {
        mActive = true;
        sp<AudioRecordThread> t = mAudioRecordThread;
        if (t != 0) {
            t->resume();
        } else {
            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
            get_sched_policy(0, &mPreviousSchedulingGroup);
            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
        }
    }

    return status;
}
コード例 #19
0
status_t AudioRecord::set(
        audio_source_t inputSource,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t frameCount,
        callback_t cbf,
        void* user,
        uint32_t notificationFrames,
        bool threadCanCallJava,
        audio_session_t sessionId,
        transfer_type transferType,
        audio_input_flags_t flags,
        int uid,
        pid_t pid,
        const audio_attributes_t* pAttributes)
{
    ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
          "notificationFrames %u, sessionId %d, transferType %d, flags %#x, opPackageName %s "
          "uid %d, pid %d",
          inputSource, sampleRate, format, channelMask, frameCount, notificationFrames,
          sessionId, transferType, flags, String8(mOpPackageName).string(), uid, pid);

    switch (transferType) {
    case TRANSFER_DEFAULT:
        if (cbf == NULL || threadCanCallJava) {
            transferType = TRANSFER_SYNC;
        } else {
            transferType = TRANSFER_CALLBACK;
        }
        break;
    case TRANSFER_CALLBACK:
        if (cbf == NULL) {
            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL");
            return BAD_VALUE;
        }
        break;
    case TRANSFER_OBTAIN:
    case TRANSFER_SYNC:
        break;
    default:
        ALOGE("Invalid transfer type %d", transferType);
        return BAD_VALUE;
    }
    mTransfer = transferType;

    // invariant that mAudioRecord != 0 is true only after set() returns successfully
    if (mAudioRecord != 0) {
        ALOGE("Track already in use");
        return INVALID_OPERATION;
    }

    if (pAttributes == NULL) {
        memset(&mAttributes, 0, sizeof(audio_attributes_t));
        mAttributes.source = inputSource;
    } else {
        // stream type shouldn't be looked at, this track has audio attributes
        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
        ALOGV("Building AudioRecord with attributes: source=%d flags=0x%x tags=[%s]",
              mAttributes.source, mAttributes.flags, mAttributes.tags);
    }

    mSampleRate = sampleRate;

    // these below should probably come from the audioFlinger too...
    if (format == AUDIO_FORMAT_DEFAULT) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }

    // validate parameters
    // AudioFlinger capture only supports linear PCM
    if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) {
        ALOGE("Format %#x is not linear pcm", format);
        return BAD_VALUE;
    }
    mFormat = format;

    if (!audio_is_input_channel(channelMask)) {
        ALOGE("Invalid channel mask %#x", channelMask);
        return BAD_VALUE;
    }
    mChannelMask = channelMask;
    uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
    mChannelCount = channelCount;

    if (audio_is_linear_pcm(format)) {
        mFrameSize = channelCount * audio_bytes_per_sample(format);
    } else {
        mFrameSize = sizeof(uint8_t);
    }

    // mFrameCount is initialized in openRecord_l
    mReqFrameCount = frameCount;

    mNotificationFramesReq = notificationFrames;
    // mNotificationFramesAct is initialized in openRecord_l

    if (sessionId == AUDIO_SESSION_ALLOCATE) {
        mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
    } else {
        mSessionId = sessionId;
    }
    ALOGV("set(): mSessionId %d", mSessionId);

    int callingpid = IPCThreadState::self()->getCallingPid();
    int mypid = getpid();
    if (uid == -1 || (callingpid != mypid)) {
        mClientUid = IPCThreadState::self()->getCallingUid();
    } else {
        mClientUid = uid;
    }
    if (pid == -1 || (callingpid != mypid)) {
        mClientPid = callingpid;
    } else {
        mClientPid = pid;
    }

    mOrigFlags = mFlags = flags;
    mCbf = cbf;

    if (cbf != NULL) {
        mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
        mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
        // thread begins in paused state, and will not reference us until start()
    }

    // create the IAudioRecord
    status_t status = openRecord_l(0 /*epoch*/, mOpPackageName);

    if (status != NO_ERROR) {
        if (mAudioRecordThread != 0) {
            mAudioRecordThread->requestExit();   // see comment in AudioRecord.h
            mAudioRecordThread->requestExitAndWait();
            mAudioRecordThread.clear();
        }
        return status;
    }

    mStatus = NO_ERROR;
    mUserData = user;
    // TODO: add audio hardware input latency here
    mLatency = (1000 * mFrameCount) / mSampleRate;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    AudioSystem::acquireAudioSessionId(mSessionId, -1);
    mSequence = 1;
    mObservedSequence = mSequence;
    mInOverrun = false;
    mFramesRead = 0;
    mFramesReadServerOffset = 0;

    return NO_ERROR;
}
コード例 #20
0
DownmixerBufferProvider::DownmixerBufferProvider(
        audio_channel_mask_t inputChannelMask,
        audio_channel_mask_t outputChannelMask, audio_format_t format,
        uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount) :
        CopyBufferProvider(
            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask),
            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask),
            bufferFrameCount)  // set bufferFrameCount to 0 to do in-place
{
    ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)",
            this, inputChannelMask, outputChannelMask, format,
            sampleRate, sessionId);
    if (!sIsMultichannelCapable
            || EffectCreate(&sDwnmFxDesc.uuid,
                    sessionId,
                    SESSION_ID_INVALID_AND_IGNORED,
                    &mDownmixHandle) != 0) {
         ALOGE("DownmixerBufferProvider() error creating downmixer effect");
         mDownmixHandle = NULL;
         return;
     }
     // channel input configuration will be overridden per-track
     mDownmixConfig.inputCfg.channels = inputChannelMask;   // FIXME: Should be bits
     mDownmixConfig.outputCfg.channels = outputChannelMask; // FIXME: should be bits
     mDownmixConfig.inputCfg.format = format;
     mDownmixConfig.outputCfg.format = format;
     mDownmixConfig.inputCfg.samplingRate = sampleRate;
     mDownmixConfig.outputCfg.samplingRate = sampleRate;
     mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
     mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
     // input and output buffer provider, and frame count will not be used as the downmix effect
     // process() function is called directly (see DownmixerBufferProvider::getNextBuffer())
     mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS |
             EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE;
     mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask;

     int cmdStatus;
     uint32_t replySize = sizeof(int);

     // Configure downmixer
     status_t status = (*mDownmixHandle)->command(mDownmixHandle,
             EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/,
             &mDownmixConfig /*pCmdData*/,
             &replySize, &cmdStatus /*pReplyData*/);
     if (status != 0 || cmdStatus != 0) {
         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer",
                 status, cmdStatus);
         EffectRelease(mDownmixHandle);
         mDownmixHandle = NULL;
         return;
     }

     // Enable downmixer
     replySize = sizeof(int);
     status = (*mDownmixHandle)->command(mDownmixHandle,
             EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/,
             &replySize, &cmdStatus /*pReplyData*/);
     if (status != 0 || cmdStatus != 0) {
         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer",
                 status, cmdStatus);
         EffectRelease(mDownmixHandle);
         mDownmixHandle = NULL;
         return;
     }

     // Set downmix type
     // parameter size rounded for padding on 32bit boundary
     const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int);
     const int downmixParamSize =
             sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t);
     effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize);
     CHECK(param != NULL);
     param->psize = sizeof(downmix_params_t);
     const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE;
     memcpy(param->data, &downmixParam, param->psize);
     const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD;
     param->vsize = sizeof(downmix_type_t);
     memcpy(param->data + psizePadded, &downmixType, param->vsize);
     replySize = sizeof(int);
     status = (*mDownmixHandle)->command(mDownmixHandle,
             EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */,
             param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/);
     free(param);
     if (status != 0 || cmdStatus != 0) {
         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type",
                 status, cmdStatus);
         EffectRelease(mDownmixHandle);
         mDownmixHandle = NULL;
         return;
     }
     ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType);
}