Пример #1
0
void ZslProcessor::findMatchesLocked() {
    ALOGVV("Scanning");
    for (size_t i = 0; i < mZslQueue.size(); i++) {
        ZslPair &queueEntry = mZslQueue.editItemAt(i);
        nsecs_t bufferTimestamp = queueEntry.buffer.mTimestamp;
        IF_ALOGV() {
            camera_metadata_entry_t entry;
            nsecs_t frameTimestamp = 0;
            if (!queueEntry.frame.isEmpty()) {
                entry = queueEntry.frame.find(ANDROID_SENSOR_TIMESTAMP);
                frameTimestamp = entry.data.i64[0];
            }
            ALOGVV("   %d: b: %" PRId64 "\tf: %" PRId64, i,
                    bufferTimestamp, frameTimestamp );
        }
        if (queueEntry.frame.isEmpty() && bufferTimestamp != 0) {
            // Have buffer, no matching frame. Look for one
            for (size_t j = 0; j < mFrameList.size(); j++) {
                bool match = false;
                CameraMetadata &frame = mFrameList.editItemAt(j);
                if (!frame.isEmpty()) {
                    camera_metadata_entry_t entry;
                    entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
                    if (entry.count == 0) {
                        ALOGE("%s: Can't find timestamp in frame!",
                                __FUNCTION__);
                        continue;
                    }
                    nsecs_t frameTimestamp = entry.data.i64[0];
                    if (bufferTimestamp == frameTimestamp) {
                        ALOGVV("%s: Found match %" PRId64, __FUNCTION__,
                                frameTimestamp);
                        match = true;
                    } else {
                        int64_t delta = abs(bufferTimestamp - frameTimestamp);
                        if ( delta < 1000000) {
                            ALOGVV("%s: Found close match %" PRId64 " (delta %" PRId64 ")",
                                    __FUNCTION__, bufferTimestamp, delta);
                            match = true;
                        }
                    }
                }
                if (match) {
                    queueEntry.frame.acquire(frame);
                    break;
                }
            }
        }
    }
}
Пример #2
0
void PcmBufferProvider::releaseBuffer(Buffer *buffer) {
    if (buffer->frameCount > _unrel) {
        ALOGVV("ERROR releaseBuffer() released %zu frames but only %zu available "
                "to release", buffer->frameCount, _unrel);
        _nextFrame += _unrel;
        _unrel = 0;
    } else {
        ALOGVV("releaseBuffer() released %zu frames out of %zu frames available "
                           "to release", buffer->frameCount, _unrel);
        _nextFrame += buffer->frameCount;
        _unrel -= buffer->frameCount;
    }
    buffer->frameCount = 0;
    buffer->raw = NULL;
}
Пример #3
0
static ssize_t out_write(struct audio_stream_out *stream, const void* buffer,
                         size_t bytes)
{
    int ret = 0;
    struct stream_out *out = (struct stream_out *)stream;
    struct audio_device *adev = out ? out->dev : NULL;

    if(adev == NULL) return -ENOSYS;

    ALOGV("%s enter",__func__);

    pthread_mutex_lock(&out->dev->lock);
    pthread_mutex_lock(&out->lock);

    // there is a possibility that the HD interface is open
    // and normal pcm stream is still active. Feed the new
    // interface to normal pcm stream
    if(adev->active_pcm) {
      if(adev->active_pcm != out->pcm)
         out->pcm = adev->active_pcm;
    }

    if ((out->standby) || (!adev->active_pcm)) {
        ret = start_output_stream(out);
        if (ret != 0) {
            goto err;
        }
        out->standby = false;
    }

    if(!out->pcm){
       ALOGD("%s: null handle to write - device already closed",__func__);
       goto err;
    }

    ret = pcm_write(out->pcm, (void *)buffer, bytes);

    ALOGVV("%s: pcm_write returned = %d rate = %d",__func__,ret,out->pcm_config.rate);

err:
    pthread_mutex_unlock(&out->lock);
    pthread_mutex_unlock(&out->dev->lock);


    if (ret != 0) {
        uint64_t duration_ms = ((bytes * 1000)/
                                (audio_stream_frame_size(&stream->common)) /
                                (out_get_sample_rate(&stream->common)));
        ALOGV("%s : silence written", __func__);
        usleep(duration_ms * 1000);
    }

    ALOGV("%s exit",__func__);

    return bytes;
}
Пример #4
0
static char* get_card_name_from_substr(const char*name)
{
    FILE *fp;
    char alsacard[500];
    char* substr;
    int found = 0;

    if((fp = fopen("/proc/asound/cards","r")) == NULL) {
        ALOGE("Cannot open /proc/asound/cards file to get sound card info");
    } else {
        while((fgets(alsacard, sizeof(alsacard), fp) != NULL)) {
              ALOGVV("alsacard %s", alsacard);
              if (strstr(alsacard, "USB-Audio")) {
                  found = 1;
                  break;
              } else if (strstr(alsacard, "USB Audio")) {
                  found = 1;
                  break;
              }
         }
         fclose(fp);
    }
    if(found) {
      ALOGD("Found USB card %s",alsacard);

      substr = strtok(alsacard,"[") ? strtok(NULL,"]") : NULL;
      ALOGVV("filter 1 substr %s",substr);
      if(!substr) return NULL;

      // remove spaces if any in the stripped name
      substr = strtok(substr," ");
      if(!substr) return NULL;
      ALOGV("usb string substr %s",substr);

      return substr;
    }
    else
      return NULL;
}
Пример #5
0
void offload_eq_set_bands_level(struct eq_params *eq, int num_bands,
                                const uint16_t *band_freq_list,
                                int *band_gain_list)
{
    int i;
    ALOGVV("%s", __func__);
    eq->config.num_bands = num_bands;
    for (i=0; i<num_bands; i++) {
        eq->per_band_cfg[i].band_idx = i;
        eq->per_band_cfg[i].filter_type = EQ_BAND_BOOST;
        eq->per_band_cfg[i].freq_millihertz = band_freq_list[i] * 1000;
        eq->per_band_cfg[i].gain_millibels = band_gain_list[i] * 100;
        eq->per_band_cfg[i].quality_factor = Q8_UNITY;
    }
}
Пример #6
0
status_t ZslProcessor::processNewZslBuffer() {
    ATRACE_CALL();
    status_t res;
    sp<BufferItemConsumer> zslConsumer;
    {
        Mutex::Autolock l(mInputMutex);
        if (mZslConsumer == 0) return OK;
        zslConsumer = mZslConsumer;
    }
    ALOGVV("Trying to get next buffer");
    BufferItemConsumer::BufferItem item;
    res = zslConsumer->acquireBuffer(&item, 0);
    if (res != OK) {
        if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
            ALOGE("%s: Camera %d: Error receiving ZSL image buffer: "
                    "%s (%d)", __FUNCTION__,
                    mId, strerror(-res), res);
        } else {
            ALOGVV("  No buffer");
        }
        return res;
    }

    Mutex::Autolock l(mInputMutex);

    if (mState == LOCKED) {
        ALOGVV("In capture, discarding new ZSL buffers");
        zslConsumer->releaseBuffer(item);
        return OK;
    }

    ALOGVV("Got ZSL buffer: head: %d, tail: %d", mZslQueueHead, mZslQueueTail);

    if ( (mZslQueueHead + 1) % kZslBufferDepth == mZslQueueTail) {
        ALOGVV("Releasing oldest buffer");
        zslConsumer->releaseBuffer(mZslQueue[mZslQueueTail].buffer);
        mZslQueue.replaceAt(mZslQueueTail);
        mZslQueueTail = (mZslQueueTail + 1) % kZslBufferDepth;
    }

    ZslPair &queueHead = mZslQueue.editItemAt(mZslQueueHead);

    queueHead.buffer = item;
    queueHead.frame.release();

    mZslQueueHead = (mZslQueueHead + 1) % kZslBufferDepth;

    ALOGVV("  Acquired buffer, timestamp %" PRId64, queueHead.buffer.mTimestamp);

    findMatchesLocked();

    return OK;
}
//static
float Gains::volIndexToDb(Volume::device_category deviceCategory,
                          const StreamDescriptor& streamDesc,
                          int indexInUi)
{
    const VolumeCurvePoint *curve = streamDesc.getVolumeCurvePoint(deviceCategory);

    // the volume index in the UI is relative to the min and max volume indices for this stream type
    int nbSteps = 1 + curve[Volume::VOLMAX].mIndex -
            curve[Volume::VOLMIN].mIndex;
    int volIdx = (nbSteps * (indexInUi - streamDesc.getVolumeIndexMin())) /
            (streamDesc.getVolumeIndexMax() - streamDesc.getVolumeIndexMin());

    // find what part of the curve this index volume belongs to, or if it's out of bounds
    int segment = 0;
    if (volIdx < curve[Volume::VOLMIN].mIndex) {         // out of bounds
        return VOLUME_MIN_DB;
    } else if (volIdx < curve[Volume::VOLKNEE1].mIndex) {
        segment = 0;
    } else if (volIdx < curve[Volume::VOLKNEE2].mIndex) {
        segment = 1;
    } else if (volIdx <= curve[Volume::VOLMAX].mIndex) {
        segment = 2;
    } else {                                                               // out of bounds
        return 0.0f;
    }

    // linear interpolation in the attenuation table in dB
    float decibels = curve[segment].mDBAttenuation +
            ((float)(volIdx - curve[segment].mIndex)) *
                ( (curve[segment+1].mDBAttenuation -
                        curve[segment].mDBAttenuation) /
                    ((float)(curve[segment+1].mIndex -
                            curve[segment].mIndex)) );

    ALOGVV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f]",
            curve[segment].mIndex, volIdx,
            curve[segment+1].mIndex,
            curve[segment].mDBAttenuation,
            decibels,
            curve[segment+1].mDBAttenuation);

    return decibels;
}
Пример #8
0
status_t PcmBufferProvider::getNextBuffer(Buffer *buffer,
                                          int64_t pts/* = kInvalidPTS*/) {
    (void) pts; // suppress warning
    size_t requestedFrames = buffer->frameCount;
    if (requestedFrames > _numFrames - _nextFrame) {
        buffer->frameCount = _numFrames - _nextFrame;
    }

    ALOGVV("getNextBuffer() requested %zu frames out of %zu frames available,"
                  " and returned %zu frames",
              requestedFrames, (size_t) (_numFrames - _nextFrame), buffer->frameCount);

    _unrel = buffer->frameCount;
    if (buffer->frameCount > 0) {
        buffer->raw = (char *) _addr + _frameSize * _nextFrame;
        return NO_ERROR;
    } else {
        buffer->raw = NULL;
        return NOT_ENOUGH_DATA;
    }
}
Пример #9
0
int start_input_stream(struct stream_in *in)
{
    int ret = 0;
    struct audio_device *adev = in->dev;

    adev->card = get_card_number_by_name("USB Audio");
    adev->device = DEFAULT_DEVICE;
    ALOGVV("%s: USB card number = %d, device = %d",__func__,adev->card,adev->device);

    ALOGV("%s :: requested params \
         \r \t channels = %d,    \
         \r \t rate     = %d,    \
         \r \t period_size = %d, \
         \r \t period_count = %d, \
         \r \t format = %d", __func__,
       in->pcm_config.channels,in->pcm_config.rate,
       in->pcm_config.period_size,in->pcm_config.period_count,
       in->pcm_config.format);

    ret = validate_input_hardware_params(in);
    if(ret != 0) {
        ALOGE("Unsupport input stream parameters");
        in->pcm = NULL;
        return -ENOSYS;
    }

    in->pcm = pcm_open(adev->card, adev->device,
                           PCM_IN, &in->pcm_config);
    if (in->pcm && !pcm_is_ready(in->pcm)) {
        ALOGE("%s: %s", __func__, pcm_get_error(in->pcm));
        pcm_close(in->pcm);
        in->pcm = NULL;
        ret = -EIO;
        ALOGD("%s: exit: status(%d)", __func__, ret);
        return ret;
    } else {
        ALOGV("%s: exit capture pcm = %x", __func__,in->pcm);
        return ret;
    }
}
Пример #10
0
int offload_eq_get_enable_flag(struct eq_params *eq)
{
    ALOGVV("%s: enabled=%d", __func__, (int)eq->enable_flag);
    return eq->enable_flag;
}
Пример #11
0
void offload_reverb_set_density(struct reverb_params *reverb, int density)
{
    ALOGVV("%s: density %d", __func__, density);
    reverb->density = density;
}
Пример #12
0
void offload_reverb_set_diffusion(struct reverb_params *reverb, int diffusion)
{
    ALOGVV("%s: diffusion %d", __func__, diffusion);
    reverb->diffusion = diffusion;
}
Пример #13
0
void offload_reverb_set_reflections_delay(struct reverb_params *reverb,
                                          int reflections_delay)
{
    ALOGVV("%s: ref delay", __func__, reflections_delay);
    reverb->reflections_delay = reflections_delay;
}
Пример #14
0
void offload_reverb_set_reflections_level(struct reverb_params *reverb,
                                          int reflections_level)
{
    ALOGVV("%s: ref level %d", __func__, reflections_level);
    reverb->reflections_level = reflections_level;
}
Пример #15
0
void offload_reverb_set_enable_flag(struct reverb_params *reverb, bool enable)
{
    ALOGVV("%s: enable=%d", __func__, (int)enable);
    reverb->enable_flag = enable;
}
audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
{
    const DeviceVector &availableOutputDevices = mApmObserver->getAvailableOutputDevices();
    const DeviceVector &availableInputDevices = mApmObserver->getAvailableInputDevices();

    const SwAudioOutputCollection &outputs = mApmObserver->getOutputs();

    uint32_t device = AUDIO_DEVICE_NONE;
    uint32_t availableOutputDevicesType = availableOutputDevices.types();

    switch (strategy) {

    case STRATEGY_TRANSMITTED_THROUGH_SPEAKER:
        device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
        if (!device) {
            ALOGE("getDeviceForStrategy() no device found for "\
                    "STRATEGY_TRANSMITTED_THROUGH_SPEAKER");
        }
        break;

    case STRATEGY_SONIFICATION_RESPECTFUL:
        if (isInCall()) {
            device = getDeviceForStrategy(STRATEGY_SONIFICATION);
        } else if (outputs.isStreamActiveRemotely(AUDIO_STREAM_MUSIC,
                SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
            // while media is playing on a remote device, use the the sonification behavior.
            // Note that we test this usecase before testing if media is playing because
            //   the isStreamActive() method only informs about the activity of a stream, not
            //   if it's for local playback. Note also that we use the same delay between both tests
            device = getDeviceForStrategy(STRATEGY_SONIFICATION);
            //user "safe" speaker if available instead of normal speaker to avoid triggering
            //other acoustic safety mechanisms for notification
            if ((device & AUDIO_DEVICE_OUT_SPEAKER) &&
                    (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
                device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
                device &= ~AUDIO_DEVICE_OUT_SPEAKER;
            }
        } else if (outputs.isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
            // while media is playing (or has recently played), use the same device
            device = getDeviceForStrategy(STRATEGY_MEDIA);
        } else {
            // when media is not playing anymore, fall back on the sonification behavior
            device = getDeviceForStrategy(STRATEGY_SONIFICATION);
            //user "safe" speaker if available instead of normal speaker to avoid triggering
            //other acoustic safety mechanisms for notification
            if ((device & AUDIO_DEVICE_OUT_SPEAKER) &&
                    (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
                device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
                device &= ~AUDIO_DEVICE_OUT_SPEAKER;
            }
        }
        break;

    case STRATEGY_DTMF:
        if (!isInCall()) {
            // when off call, DTMF strategy follows the same rules as MEDIA strategy
            device = getDeviceForStrategy(STRATEGY_MEDIA);
            break;
        }
        // when in call, DTMF and PHONE strategies follow the same rules
        // FALL THROUGH

    case STRATEGY_PHONE:
        // Force use of only devices on primary output if:
        // - in call AND
        //   - cannot route from voice call RX OR
        //   - audio HAL version is < 3.0 and TX device is on the primary HW module
        if (getPhoneState() == AUDIO_MODE_IN_CALL) {
            audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
            sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
            audio_devices_t availPrimaryInputDevices =
                 availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle());
            audio_devices_t availPrimaryOutputDevices =
                    primaryOutput->supportedDevices() & availableOutputDevices.types();

            if (((availableInputDevices.types() &
                    AUDIO_DEVICE_IN_TELEPHONY_RX & ~AUDIO_DEVICE_BIT_IN) == 0) ||
                    (((txDevice & availPrimaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
                         (primaryOutput->getAudioPort()->getModuleVersion() <
                             AUDIO_DEVICE_API_VERSION_3_0))) {
                availableOutputDevicesType = availPrimaryOutputDevices;
            }
        }
        // for phone strategy, we first consider the forced use and then the available devices by order
        // of priority
        switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
        case AUDIO_POLICY_FORCE_BT_SCO:
            if (!isInCall() || strategy != STRATEGY_DTMF) {
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
                if (device) break;
            }
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
            if (device) break;
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
            if (device) break;
            // if SCO device is requested but no SCO device is available, fall back to default case
            // FALL THROUGH

        default:    // FORCE_NONE
            // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
            if (!isInCall() &&
                    (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                    (outputs.getA2dpOutput() != 0)) {
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
                if (device) break;
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
                if (device) break;
            }
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
            if (device) break;
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
            if (device) break;
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
            if (device) break;
            if (!isInCall()) {
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
                if (device) break;
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
                if (device) break;
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
                if (device) break;
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
                if (device) break;
            }
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_EARPIECE;
            if (device) break;
            device = mApmObserver->getDefaultOutputDevice()->type();
            if (device == AUDIO_DEVICE_NONE) {
                ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE");
            }
            break;

        case AUDIO_POLICY_FORCE_SPEAKER:
            // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to
            // A2DP speaker when forcing to speaker output
            if (!isInCall() &&
                    (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                    (outputs.getA2dpOutput() != 0)) {
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
                if (device) break;
            }
            if (!isInCall()) {
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
                if (device) break;
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
                if (device) break;
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
                if (device) break;
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
                if (device) break;
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
                if (device) break;
            }
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
            if (device) break;
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
            if (device) break;
            device = mApmObserver->getDefaultOutputDevice()->type();
            if (device == AUDIO_DEVICE_NONE) {
                ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE, FORCE_SPEAKER");
            }
            break;
        }
    break;

    case STRATEGY_SONIFICATION:

        // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
        // handleIncallSonification().
        if (isInCall()) {
            device = getDeviceForStrategy(STRATEGY_PHONE);
            break;
        }
        // FALL THROUGH

    case STRATEGY_ENFORCED_AUDIBLE:
        // strategy STRATEGY_ENFORCED_AUDIBLE uses same routing policy as STRATEGY_SONIFICATION
        // except:
        //   - when in call where it doesn't default to STRATEGY_PHONE behavior
        //   - in countries where not enforced in which case it follows STRATEGY_MEDIA

        if ((strategy == STRATEGY_SONIFICATION) ||
                (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
            if (device == AUDIO_DEVICE_NONE) {
                ALOGE("getDeviceForStrategy() speaker device not found for STRATEGY_SONIFICATION");
            }
        }
        // The second device used for sonification is the same as the device used by media strategy
        // FALL THROUGH

    // FIXME: STRATEGY_ACCESSIBILITY and STRATEGY_REROUTING follow STRATEGY_MEDIA for now
    case STRATEGY_ACCESSIBILITY:
        if (strategy == STRATEGY_ACCESSIBILITY) {
            // do not route accessibility prompts to a digital output currently configured with a
            // compressed format as they would likely not be mixed and dropped.
            for (size_t i = 0; i < outputs.size(); i++) {
                sp<AudioOutputDescriptor> desc = outputs.valueAt(i);
                audio_devices_t devices = desc->device() &
                    (AUDIO_DEVICE_OUT_HDMI | AUDIO_DEVICE_OUT_SPDIF | AUDIO_DEVICE_OUT_HDMI_ARC);
                if (desc->isActive() && !audio_is_linear_pcm(desc->mFormat) &&
                        devices != AUDIO_DEVICE_NONE) {
                    availableOutputDevicesType = availableOutputDevices.types() & ~devices;
                }
            }
        }
        // FALL THROUGH

    case STRATEGY_REROUTING:
    case STRATEGY_MEDIA: {
        uint32_t device2 = AUDIO_DEVICE_NONE;
        if (strategy != STRATEGY_SONIFICATION) {
            // no sonification on remote submix (e.g. WFD)
            if (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, String8("0")) != 0) {
                device2 = availableOutputDevices.types() & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
            }
        }
        if (isInCall() && (strategy == STRATEGY_MEDIA)) {
            device = getDeviceForStrategy(STRATEGY_PHONE);
            break;
        }
        if ((device2 == AUDIO_DEVICE_NONE) &&
                (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                (outputs.getA2dpOutput() != 0)) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
            if (device2 == AUDIO_DEVICE_NONE) {
                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
            }
            if (device2 == AUDIO_DEVICE_NONE) {
                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
            }
        }
        if ((device2 == AUDIO_DEVICE_NONE) &&
            (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] == AUDIO_POLICY_FORCE_SPEAKER)) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
        }
        if ((device2 == AUDIO_DEVICE_NONE) && (strategy != STRATEGY_SONIFICATION)) {
            // no sonification on aux digital (e.g. HDMI)
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
        }
        if ((device2 == AUDIO_DEVICE_NONE) &&
                (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
        }
        int device3 = AUDIO_DEVICE_NONE;
        if (strategy == STRATEGY_MEDIA) {
            // ARC, SPDIF and AUX_LINE can co-exist with others.
            device3 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HDMI_ARC;
            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPDIF);
            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_LINE);
        }

        device2 |= device3;
        // device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION or
        // STRATEGY_ENFORCED_AUDIBLE, AUDIO_DEVICE_NONE otherwise
        device |= device2;

        // If hdmi system audio mode is on, remove speaker out of output list.
        if ((strategy == STRATEGY_MEDIA) &&
            (mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] ==
                AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
            device &= ~AUDIO_DEVICE_OUT_SPEAKER;
        }

        if (device) break;
        device = mApmObserver->getDefaultOutputDevice()->type();
        if (device == AUDIO_DEVICE_NONE) {
            ALOGE("getDeviceForStrategy() no device found for STRATEGY_MEDIA");
        }
        } break;

    default:
        ALOGW("getDeviceForStrategy() unknown strategy: %d", strategy);
        break;
    }

    ALOGVV("getDeviceForStrategy() strategy %d, device %x", strategy, device);
    return device;
}
Пример #17
0
void StreamingProcessor::releaseRecordingFrame(const sp<IMemory>& mem) {
    ATRACE_CALL();
    status_t res;

    Mutex::Autolock m(mMutex);
    // Make sure this is for the current heap
    ssize_t offset;
    size_t size;
    sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
    if (heap->getHeapID() != mRecordingHeap->mHeap->getHeapID()) {
        ALOGW("%s: Camera %d: Mismatched heap ID, ignoring release "
                "(got %x, expected %x)", __FUNCTION__, mId,
                heap->getHeapID(), mRecordingHeap->mHeap->getHeapID());
        return;
    }
    uint8_t *data = (uint8_t*)heap->getBase() + offset;
    uint32_t type = *(uint32_t*)data;
    if (type != kMetadataBufferTypeGrallocSource) {
        ALOGE("%s: Camera %d: Recording frame type invalid (got %x, expected %x)",
                __FUNCTION__, mId, type,
                kMetadataBufferTypeGrallocSource);
        return;
    }

    // Release the buffer back to the recording queue

    buffer_handle_t imgHandle = *(buffer_handle_t*)(data + 4);

    size_t itemIndex;
    for (itemIndex = 0; itemIndex < mRecordingBuffers.size(); itemIndex++) {
        const BufferItemConsumer::BufferItem item =
                mRecordingBuffers[itemIndex];
        if (item.mBuf != BufferItemConsumer::INVALID_BUFFER_SLOT &&
                item.mGraphicBuffer->handle == imgHandle) {
            break;
        }
    }
    if (itemIndex == mRecordingBuffers.size()) {
        ALOGE("%s: Camera %d: Can't find buffer_handle_t %p in list of "
                "outstanding buffers", __FUNCTION__, mId,
                imgHandle);
        return;
    }

    ALOGVV("%s: Camera %d: Freeing buffer_handle_t %p", __FUNCTION__,
            mId, imgHandle);

    res = mRecordingConsumer->releaseBuffer(mRecordingBuffers[itemIndex]);
    if (res != OK) {
        ALOGE("%s: Camera %d: Unable to free recording frame "
                "(buffer_handle_t: %p): %s (%d)", __FUNCTION__,
                mId, imgHandle, strerror(-res), res);
        return;
    }
    mRecordingBuffers.replaceAt(itemIndex);

    mRecordingHeapFree++;
    ALOGV_IF(mRecordingHeapFree == mRecordingHeapCount,
            "%s: Camera %d: All %d recording buffers returned",
            __FUNCTION__, mId, mRecordingHeapCount);
}
Пример #18
0
void offload_eq_set_preset(struct eq_params *eq, int preset)
{
    ALOGVV("%s: preset %d", __func__, preset);
    eq->config.preset_id = preset;
    eq->config.eq_pregain = Q27_UNITY;
}
Пример #19
0
void offload_reverb_set_wet_mix(struct reverb_params *reverb, int wet_mix)
{
    ALOGVV("%s: wet_mix %d", __func__, wet_mix);
    reverb->wet_mix = wet_mix;
}
Пример #20
0
void offload_reverb_set_preset(struct reverb_params *reverb, int preset)
{
    ALOGVV("%s: preset %d", __func__, preset);
    if (preset && (preset <= NUM_OSL_REVERB_PRESETS_SUPPORTED))
        reverb->preset = map_reverb_opensl_preset_2_offload_preset[preset-1][1];
}
Пример #21
0
void offload_reverb_set_mode(struct reverb_params *reverb, int mode)
{
    ALOGVV("%s", __func__);
    reverb->mode = mode;
}
Пример #22
0
int offload_reverb_get_enable_flag(struct reverb_params *reverb)
{
    ALOGVV("%s: enabled=%d", __func__, reverb->enable_flag);
    return reverb->enable_flag;
}
Пример #23
0
void offload_reverb_set_gain_adjust(struct reverb_params *reverb,
                                    int gain_adjust)
{
    ALOGVV("%s: gain %d", __func__, gain_adjust);
    reverb->gain_adjust = gain_adjust;
}
Пример #24
0
void offload_reverb_set_device(struct reverb_params *reverb, uint32_t device)
{
    ALOGVV("%s: device 0x%x", __func__, device);
    reverb->device = device;
}
Пример #25
0
status_t StreamingProcessor::processRecordingFrame() {
    ATRACE_CALL();
    status_t res;
    sp<Camera2Heap> recordingHeap;
    size_t heapIdx = 0;
    nsecs_t timestamp;

    sp<Camera2Client> client = mClient.promote();
    if (client == 0) {
        // Discard frames during shutdown
        BufferItemConsumer::BufferItem imgBuffer;
        res = mRecordingConsumer->acquireBuffer(&imgBuffer, 0);
        if (res != OK) {
            if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
                ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)",
                        __FUNCTION__, mId, strerror(-res), res);
            }
            return res;
        }
        mRecordingConsumer->releaseBuffer(imgBuffer);
        return OK;
    }

    {
        /* acquire SharedParameters before mMutex so we don't dead lock
            with Camera2Client code calling into StreamingProcessor */
        SharedParameters::Lock l(client->getParameters());
        Mutex::Autolock m(mMutex);
        BufferItemConsumer::BufferItem imgBuffer;
        res = mRecordingConsumer->acquireBuffer(&imgBuffer, 0);
        if (res != OK) {
            if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
                ALOGE("%s: Camera %d: Can't acquire recording buffer: %s (%d)",
                        __FUNCTION__, mId, strerror(-res), res);
            }
            return res;
        }
        timestamp = imgBuffer.mTimestamp;

        mRecordingFrameCount++;
        ALOGVV("OnRecordingFrame: Frame %d", mRecordingFrameCount);

        if (l.mParameters.state != Parameters::RECORD &&
                l.mParameters.state != Parameters::VIDEO_SNAPSHOT) {
            ALOGV("%s: Camera %d: Discarding recording image buffers "
                    "received after recording done", __FUNCTION__,
                    mId);
            mRecordingConsumer->releaseBuffer(imgBuffer);
            return INVALID_OPERATION;
        }

        if (mRecordingHeap == 0) {
            const size_t bufferSize = 4 + sizeof(buffer_handle_t);
            ALOGV("%s: Camera %d: Creating recording heap with %zu buffers of "
                    "size %zu bytes", __FUNCTION__, mId,
                    mRecordingHeapCount, bufferSize);

            mRecordingHeap = new Camera2Heap(bufferSize, mRecordingHeapCount,
                    "Camera2Client::RecordingHeap");
            if (mRecordingHeap->mHeap->getSize() == 0) {
                ALOGE("%s: Camera %d: Unable to allocate memory for recording",
                        __FUNCTION__, mId);
                mRecordingConsumer->releaseBuffer(imgBuffer);
                return NO_MEMORY;
            }
            for (size_t i = 0; i < mRecordingBuffers.size(); i++) {
                if (mRecordingBuffers[i].mBuf !=
                        BufferItemConsumer::INVALID_BUFFER_SLOT) {
                    ALOGE("%s: Camera %d: Non-empty recording buffers list!",
                            __FUNCTION__, mId);
                }
            }
            mRecordingBuffers.clear();
            mRecordingBuffers.setCapacity(mRecordingHeapCount);
            mRecordingBuffers.insertAt(0, mRecordingHeapCount);

            mRecordingHeapHead = 0;
            mRecordingHeapFree = mRecordingHeapCount;
        }

        if ( mRecordingHeapFree == 0) {
            ALOGE("%s: Camera %d: No free recording buffers, dropping frame",
                    __FUNCTION__, mId);
            mRecordingConsumer->releaseBuffer(imgBuffer);
            return NO_MEMORY;
        }

        heapIdx = mRecordingHeapHead;
        mRecordingHeapHead = (mRecordingHeapHead + 1) % mRecordingHeapCount;
        mRecordingHeapFree--;

        ALOGVV("%s: Camera %d: Timestamp %lld",
                __FUNCTION__, mId, timestamp);

        ssize_t offset;
        size_t size;
        sp<IMemoryHeap> heap =
                mRecordingHeap->mBuffers[heapIdx]->getMemory(&offset,
                        &size);

        uint8_t *data = (uint8_t*)heap->getBase() + offset;
        uint32_t type = kMetadataBufferTypeGrallocSource;
        *((uint32_t*)data) = type;
        *((buffer_handle_t*)(data + 4)) = imgBuffer.mGraphicBuffer->handle;
        ALOGVV("%s: Camera %d: Sending out buffer_handle_t %p",
                __FUNCTION__, mId,
                imgBuffer.mGraphicBuffer->handle);
        mRecordingBuffers.replaceAt(imgBuffer, heapIdx);
        recordingHeap = mRecordingHeap;
    }

    // Call outside locked parameters to allow re-entrancy from notification
    Camera2Client::SharedCameraCallbacks::Lock l(client->mSharedCameraCallbacks);
    if (l.mRemoteCallback != 0) {
        l.mRemoteCallback->dataCallbackTimestamp(timestamp,
                CAMERA_MSG_VIDEO_FRAME,
                recordingHeap->mBuffers[heapIdx]);
    } else {
        ALOGW("%s: Camera %d: Remote callback gone", __FUNCTION__, mId);
    }

    return OK;
}
Пример #26
0
void offload_reverb_set_room_hf_level(struct reverb_params *reverb,
                                      int room_hf_level)
{
    ALOGVV("%s: level %d", __func__, room_hf_level);
    reverb->room_hf_level = room_hf_level;
}
nsecs_t ZslProcessor::getCandidateTimestampLocked(size_t* metadataIdx) const {
    /**
     * Find the smallest timestamp we know about so far
     * - ensure that aeState is either converged or locked
     */

    size_t idx = 0;
    nsecs_t minTimestamp = -1;

    size_t emptyCount = mFrameList.size();

    for (size_t j = 0; j < mFrameList.size(); j++) {
        const CameraMetadata &frame = mFrameList[j];
        if (!frame.isEmpty()) {

            emptyCount--;

            camera_metadata_ro_entry_t entry;
            entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
            if (entry.count == 0) {
                ALOGE("%s: Can't find timestamp in frame!",
                        __FUNCTION__);
                continue;
            }
            nsecs_t frameTimestamp = entry.data.i64[0];
            if (minTimestamp > frameTimestamp || minTimestamp == -1) {

                entry = frame.find(ANDROID_CONTROL_AE_STATE);

                if (entry.count == 0) {
                    /**
                     * This is most likely a HAL bug. The aeState field is
                     * mandatory, so it should always be in a metadata packet.
                     */
                    ALOGW("%s: ZSL queue frame has no AE state field!",
                            __FUNCTION__);
                    continue;
                }
                if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED &&
                        entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) {
                    ALOGVV("%s: ZSL queue frame AE state is %d, need "
                           "full capture",  __FUNCTION__, entry.data.u8[0]);
                    continue;
                }

                entry = frame.find(ANDROID_CONTROL_AF_MODE);
                if (entry.count == 0) {
                    ALOGW("%s: ZSL queue frame has no AF mode field!",
                            __FUNCTION__);
                    continue;
                }
                uint8_t afMode = entry.data.u8[0];
                if (afMode == ANDROID_CONTROL_AF_MODE_OFF) {
                    // Skip all the ZSL buffer for manual AF mode, as we don't really
                    // know the af state.
                    continue;
                }

                // Check AF state if device has focuser and focus mode isn't fixed
                if (mHasFocuser && !isFixedFocusMode(afMode)) {
                    // Make sure the candidate frame has good focus.
                    entry = frame.find(ANDROID_CONTROL_AF_STATE);
                    if (entry.count == 0) {
                        ALOGW("%s: ZSL queue frame has no AF state field!",
                                __FUNCTION__);
                        continue;
                    }
                    uint8_t afState = entry.data.u8[0];
                    if (afState != ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED &&
                            afState != ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED &&
                            afState != ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) {
                        ALOGVV("%s: ZSL queue frame AF state is %d is not good for capture, skip it",
                                __FUNCTION__, afState);
                        continue;
                    }
                }

                minTimestamp = frameTimestamp;
                idx = j;
            }

            ALOGVV("%s: Saw timestamp %" PRId64, __FUNCTION__, frameTimestamp);
        }
    }

    if (emptyCount == mFrameList.size()) {
        /**
         * This could be mildly bad and means our ZSL was triggered before
         * there were any frames yet received by the camera framework.
         *
         * This is a fairly corner case which can happen under:
         * + a user presses the shutter button real fast when the camera starts
         *     (startPreview followed immediately by takePicture).
         * + burst capture case (hitting shutter button as fast possible)
         *
         * If this happens in steady case (preview running for a while, call
         *     a single takePicture) then this might be a fwk bug.
         */
        ALOGW("%s: ZSL queue has no metadata frames", __FUNCTION__);
    }

    ALOGV("%s: Candidate timestamp %" PRId64 " (idx %zu), empty frames: %zu",
          __FUNCTION__, minTimestamp, idx, emptyCount);

    if (metadataIdx) {
        *metadataIdx = idx;
    }

    return minTimestamp;
}
Пример #28
0
void offload_reverb_set_decay_time(struct reverb_params *reverb, int decay_time)
{
    ALOGVV("%s: decay time %d", __func__, decay_time);
    reverb->decay_time = decay_time;
}
nsecs_t ZslProcessor3::getCandidateTimestampLocked(size_t* metadataIdx) const {
    /**
     * Find the smallest timestamp we know about so far
     * - ensure that aeState is either converged or locked
     */

    size_t idx = 0;
    nsecs_t minTimestamp = -1;

    size_t emptyCount = mFrameList.size();

    for (size_t j = 0; j < mFrameList.size(); j++) {
        const CameraMetadata &frame = mFrameList[j];
        if (!frame.isEmpty()) {

            emptyCount--;

            camera_metadata_ro_entry_t entry;
            entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
            if (entry.count == 0) {
                ALOGE("%s: Can't find timestamp in frame!",
                        __FUNCTION__);
                continue;
            }
            nsecs_t frameTimestamp = entry.data.i64[0];
            if (minTimestamp > frameTimestamp || minTimestamp == -1) {

                entry = frame.find(ANDROID_CONTROL_AE_STATE);

                if (entry.count == 0) {
                    /**
                     * This is most likely a HAL bug. The aeState field is
                     * mandatory, so it should always be in a metadata packet.
                     */
                    ALOGW("%s: ZSL queue frame has no AE state field!",
                            __FUNCTION__);
                    continue;
                }
                if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED &&
                        entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) {
                    ALOGVV("%s: ZSL queue frame AE state is %d, need "
                           "full capture",  __FUNCTION__, entry.data.u8[0]);
                    continue;
                }

                minTimestamp = frameTimestamp;
                idx = j;
            }

            ALOGVV("%s: Saw timestamp %lld", __FUNCTION__, frameTimestamp);
        }
    }

    if (emptyCount == mFrameList.size()) {
        /**
         * This could be mildly bad and means our ZSL was triggered before
         * there were any frames yet received by the camera framework.
         *
         * This is a fairly corner case which can happen under:
         * + a user presses the shutter button real fast when the camera starts
         *     (startPreview followed immediately by takePicture).
         * + burst capture case (hitting shutter button as fast possible)
         *
         * If this happens in steady case (preview running for a while, call
         *     a single takePicture) then this might be a fwk bug.
         */
        ALOGW("%s: ZSL queue has no metadata frames", __FUNCTION__);
    }

    ALOGV("%s: Candidate timestamp %lld (idx %d), empty frames: %d",
          __FUNCTION__, minTimestamp, idx, emptyCount);

    if (metadataIdx) {
        *metadataIdx = idx;
    }

    return minTimestamp;
}
Пример #30
0
void offload_reverb_set_decay_hf_ratio(struct reverb_params *reverb,
                                       int decay_hf_ratio)
{
    ALOGVV("%s: decay_hf_ratio %d", __func__, decay_hf_ratio);
    reverb->decay_hf_ratio = decay_hf_ratio;
}