bool CameraDeviceClient::roundBufferDimensionNearest(int32_t width, int32_t height,
        int32_t format, android_dataspace dataSpace, const CameraMetadata& info,
        /*out*/int32_t* outWidth, /*out*/int32_t* outHeight) {

    camera_metadata_ro_entry streamConfigs =
            (dataSpace == HAL_DATASPACE_DEPTH) ?
            info.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS) :
            info.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);

    int32_t bestWidth = -1;
    int32_t bestHeight = -1;

    // Iterate through listed stream configurations and find the one with the smallest euclidean
    // distance from the given dimensions for the given format.
    for (size_t i = 0; i < streamConfigs.count; i += 4) {
        int32_t fmt = streamConfigs.data.i32[i];
        int32_t w = streamConfigs.data.i32[i + 1];
        int32_t h = streamConfigs.data.i32[i + 2];

        // Ignore input/output type for now
        if (fmt == format) {
            if (w == width && h == height) {
                bestWidth = width;
                bestHeight = height;
                break;
            } else if (w <= ROUNDING_WIDTH_CAP && (bestWidth == -1 ||
                    CameraDeviceClient::euclidDistSquare(w, h, width, height) <
                    CameraDeviceClient::euclidDistSquare(bestWidth, bestHeight, width, height))) {
                bestWidth = w;
                bestHeight = h;
            }
        }
    }

    if (bestWidth == -1) {
        // Return false if no configurations for this format were listed
        return false;
    }

    // Set the outputs to the closet width/height
    if (outWidth != NULL) {
        *outWidth = bestWidth;
    }
    if (outHeight != NULL) {
        *outHeight = bestHeight;
    }

    // Return true if at least one configuration for this format was listed
    return true;
}
// TODO: move to Camera2ClientBase
bool CameraDeviceClient::enforceRequestPermissions(CameraMetadata& metadata) {

    const int pid = IPCThreadState::self()->getCallingPid();
    const int selfPid = getpid();
    camera_metadata_entry_t entry;

    /**
     * Mixin default important security values
     * - android.led.transmit = defaulted ON
     */
    CameraMetadata staticInfo = mDevice->info();
    entry = staticInfo.find(ANDROID_LED_AVAILABLE_LEDS);
    for(size_t i = 0; i < entry.count; ++i) {
        uint8_t led = entry.data.u8[i];

        switch(led) {
            case ANDROID_LED_AVAILABLE_LEDS_TRANSMIT: {
                uint8_t transmitDefault = ANDROID_LED_TRANSMIT_ON;
                if (!metadata.exists(ANDROID_LED_TRANSMIT)) {
                    metadata.update(ANDROID_LED_TRANSMIT,
                                    &transmitDefault, 1);
                }
                break;
            }
        }
    }

    // We can do anything!
    if (pid == selfPid) {
        return true;
    }

    /**
     * Permission check special fields in the request
     * - android.led.transmit = android.permission.CAMERA_DISABLE_TRANSMIT
     */
    entry = metadata.find(ANDROID_LED_TRANSMIT);
    if (entry.count > 0 && entry.data.u8[0] != ANDROID_LED_TRANSMIT_ON) {
        String16 permissionString =
            String16("android.permission.CAMERA_DISABLE_TRANSMIT_LED");
        if (!checkCallingPermission(permissionString)) {
            const int uid = IPCThreadState::self()->getCallingUid();
            ALOGE("Permission Denial: "
                  "can't disable transmit LED pid=%d, uid=%d", pid, uid);
            return false;
        }
    }

    return true;
}
コード例 #3
0
status_t FrameProcessorBase::processListeners(const CameraMetadata &frame,
        const sp<CameraDeviceBase> &device) {
    ATRACE_CALL();
    camera_metadata_ro_entry_t entry;

    // Quirks: Don't deliver partial results to listeners that don't want them
    bool quirkIsPartial = false;
    entry = frame.find(ANDROID_QUIRKS_PARTIAL_RESULT);
    if (entry.count != 0 &&
            entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
        ALOGV("%s: Camera %d: Not forwarding partial result to listeners",
                __FUNCTION__, device->getId());
        quirkIsPartial = true;
    }

    entry = frame.find(ANDROID_REQUEST_ID);
    if (entry.count == 0) {
        ALOGE("%s: Camera %d: Error reading frame id",
                __FUNCTION__, device->getId());
        return BAD_VALUE;
    }
    int32_t requestId = entry.data.i32[0];

    List<sp<FilteredListener> > listeners;
    {
        Mutex::Autolock l(mInputMutex);

        List<RangeListener>::iterator item = mRangeListeners.begin();
        while (item != mRangeListeners.end()) {
            if (requestId >= item->minId &&
                    requestId < item->maxId &&
                    (!quirkIsPartial || item->quirkSendPartials) ) {
                sp<FilteredListener> listener = item->listener.promote();
                if (listener == 0) {
                    item = mRangeListeners.erase(item);
                    continue;
                } else {
                    listeners.push_back(listener);
                }
            }
            item++;
        }
    }
    ALOGV("Got %d range listeners out of %d", listeners.size(), mRangeListeners.size());
    List<sp<FilteredListener> >::iterator item = listeners.begin();
    for (; item != listeners.end(); item++) {
        (*item)->onFrameAvailable(requestId, frame);
    }
    return OK;
}
コード例 #4
0
status_t CameraDeviceClientFlashControl::getSmallestSurfaceSize(
        const camera_info& info, int32_t *width, int32_t *height) {
    if (!width || !height) {
        return BAD_VALUE;
    }

    int32_t w = INT32_MAX;
    int32_t h = 1;

    CameraMetadata metadata;
    metadata = info.static_camera_characteristics;
    camera_metadata_entry streamConfigs =
            metadata.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
    for (size_t i = 0; i < streamConfigs.count; i += 4) {
        int32_t fmt = streamConfigs.data.i32[i];
        if (fmt == ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED) {
            int32_t ww = streamConfigs.data.i32[i + 1];
            int32_t hh = streamConfigs.data.i32[i + 2];

            if (w * h > ww * hh) {
                w = ww;
                h = hh;
            }
        }
    }

    // if stream configuration is not found, try available processed sizes.
    if (streamConfigs.count == 0) {
        camera_metadata_entry availableProcessedSizes =
            metadata.find(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES);
        for (size_t i = 0; i < availableProcessedSizes.count; i += 2) {
            int32_t ww = availableProcessedSizes.data.i32[i];
            int32_t hh = availableProcessedSizes.data.i32[i + 1];
            if (w * h > ww * hh) {
                w = ww;
                h = hh;
            }
        }
    }

    if (w == INT32_MAX) {
        return NAME_NOT_FOUND;
    }

    *width = w;
    *height = h;

    return OK;
}
コード例 #5
0
status_t CameraDeviceClientFlashControl::hasFlashUnitLocked(
        const String8& cameraId, bool *hasFlash) {
    if (!hasFlash) {
        return BAD_VALUE;
    }

    camera_info info;
    status_t res = mCameraModule->getCameraInfo(
            atoi(cameraId.string()), &info);
    if (res != 0) {
        ALOGE("%s: failed to get camera info for camera %s", __FUNCTION__,
                cameraId.string());
        return res;
    }

    CameraMetadata metadata;
    metadata = info.static_camera_characteristics;
    camera_metadata_entry flashAvailable =
            metadata.find(ANDROID_FLASH_INFO_AVAILABLE);
    if (flashAvailable.count == 1 && flashAvailable.data.u8[0] == 1) {
        *hasFlash = true;
    }

    return OK;
}
コード例 #6
0
status_t ModuleFlashControl::hasFlashUnit(const String8& cameraId, bool *hasFlash) {
    if (!hasFlash) {
        return BAD_VALUE;
    }

    *hasFlash = false;
    Mutex::Autolock l(mLock);

    camera_info info;
    status_t res = mCameraModule->getCameraInfo(atoi(cameraId.string()),
            &info);
    if (res != 0) {
        return res;
    }

    CameraMetadata metadata;
    metadata = info.static_camera_characteristics;
    camera_metadata_entry flashAvailable =
            metadata.find(ANDROID_FLASH_INFO_AVAILABLE);
    if (flashAvailable.count == 1 && flashAvailable.data.u8[0] == 1) {
        *hasFlash = true;
    }

    return OK;
}
status_t CameraDeviceClient::endConfigure(bool isConstrainedHighSpeed) {
    ALOGV("%s: ending configure (%d input stream, %zu output streams)",
            __FUNCTION__, mInputStream.configured ? 1 : 0, mStreamMap.size());

    // Sanitize the high speed session against necessary capability bit.
    if (isConstrainedHighSpeed) {
        CameraMetadata staticInfo = mDevice->info();
        camera_metadata_entry_t entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
        bool isConstrainedHighSpeedSupported = false;
        for(size_t i = 0; i < entry.count; ++i) {
            uint8_t capability = entry.data.u8[i];
            if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO) {
                isConstrainedHighSpeedSupported = true;
                break;
            }
        }
        if (!isConstrainedHighSpeedSupported) {
            ALOGE("%s: Camera %d: Try to create a constrained high speed configuration on a device"
                    " that doesn't support it.",
                          __FUNCTION__, mCameraId);
            return INVALID_OPERATION;
        }
    }

    status_t res;
    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;

    Mutex::Autolock icl(mBinderSerializationLock);

    if (!mDevice.get()) return DEAD_OBJECT;

    return mDevice->configureStreams(isConstrainedHighSpeed);
}
コード例 #8
0
bool FrameProcessor::get3aResult(const CameraMetadata& result, int32_t tag,
        T* value, int32_t frameNumber, int cameraId) {
    camera_metadata_ro_entry_t entry;
    if (value == NULL) {
        ALOGE("%s: Camera %d: Value to write to is NULL",
                __FUNCTION__, cameraId);
        return false;
    }

    entry = result.find(tag);
    if (entry.count == 0) {
        ALOGE("%s: Camera %d: No %s provided by HAL for frame %d!",
                __FUNCTION__, cameraId,
                get_camera_metadata_tag_name(tag), frameNumber);
        return false;
    } else {
        switch(sizeof(Src)){
            case sizeof(uint8_t):
                *value = static_cast<T>(entry.data.u8[0]);
                break;
            case sizeof(int32_t):
                *value = static_cast<T>(entry.data.i32[0]);
                break;
            default:
                ALOGE("%s: Camera %d: Unsupported source",
                        __FUNCTION__, cameraId);
                return false;
        }
    }
    return true;
}
コード例 #9
0
status_t ZslProcessor::updateRequestWithDefaultStillRequest(CameraMetadata &request) const {
    sp<Camera2Client> client = mClient.promote();
    if (client == 0) {
        ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
        return INVALID_OPERATION;
    }
    sp<Camera3Device> device =
        static_cast<Camera3Device*>(client->getCameraDevice().get());
    if (device == 0) {
        ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
        return INVALID_OPERATION;
    }

    CameraMetadata stillTemplate;
    device->createDefaultRequest(CAMERA3_TEMPLATE_STILL_CAPTURE, &stillTemplate);

    // Find some of the post-processing tags, and assign the value from template to the request.
    // Only check the aberration mode and noise reduction mode for now, as they are very important
    // for image quality.
    uint32_t postProcessingTags[] = {
            ANDROID_NOISE_REDUCTION_MODE,
            ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
            ANDROID_COLOR_CORRECTION_MODE,
            ANDROID_TONEMAP_MODE,
            ANDROID_SHADING_MODE,
            ANDROID_HOT_PIXEL_MODE,
            ANDROID_EDGE_MODE
    };

    camera_metadata_entry_t entry;
    for (size_t i = 0; i < sizeof(postProcessingTags) / sizeof(uint32_t); i++) {
        entry = stillTemplate.find(postProcessingTags[i]);
        if (entry.count > 0) {
            request.update(postProcessingTags[i], entry.data.u8, 1);
        }
    }

    return OK;
}
static jbyteArray CameraMetadata_readValues(JNIEnv *env, jobject thiz, jint tag) {
    ALOGV("%s (tag = %d)", __FUNCTION__, tag);

    CameraMetadata* metadata = CameraMetadata_getPointerThrow(env, thiz);
    if (metadata == NULL) return NULL;

    int tagType = get_camera_metadata_tag_type(tag);
    if (tagType == -1) {
        jniThrowExceptionFmt(env, "java/lang/IllegalArgumentException",
                             "Tag (%d) did not have a type", tag);
        return NULL;
    }
    size_t tagSize = Helpers::getTypeSize(tagType);

    camera_metadata_entry entry = metadata->find(tag);
    if (entry.count == 0) {
         if (!metadata->exists(tag)) {
             ALOGV("%s: Tag %d does not have any entries", __FUNCTION__, tag);
             return NULL;
         } else {
             // OK: we will return a 0-sized array.
             ALOGV("%s: Tag %d had an entry, but it had 0 data", __FUNCTION__,
                   tag);
         }
    }

    jsize byteCount = entry.count * tagSize;
    jbyteArray byteArray = env->NewByteArray(byteCount);
    if (env->ExceptionCheck()) return NULL;

    // Copy into java array from native array
    ScopedByteArrayRW arrayWriter(env, byteArray);
    memcpy(arrayWriter.get(), entry.data.u8, byteCount);

    return byteArray;
}
コード例 #11
0
void FrameProcessorBase::processNewFrames(const sp<CameraDeviceBase> &device) {
    status_t res;
    ATRACE_CALL();
    CameraMetadata frame;

    ALOGV("%s: Camera %d: Process new frames", __FUNCTION__, device->getId());

    while ( (res = device->getNextFrame(&frame)) == OK) {

        camera_metadata_entry_t entry;

        entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
        if (entry.count == 0) {
            ALOGE("%s: Camera %d: Error reading frame number",
                    __FUNCTION__, device->getId());
            break;
        }
        ATRACE_INT("cam2_frame", entry.data.i32[0]);

        if (!processSingleFrame(frame, device)) {
            break;
        }

        if (!frame.isEmpty()) {
            Mutex::Autolock al(mLastFrameMutex);
            mLastFrame.acquire(frame);
        }
    }
    if (res != NOT_ENOUGH_DATA) {
        ALOGE("%s: Camera %d: Error getting next frame: %s (%d)",
                __FUNCTION__, device->getId(), strerror(-res), res);
        return;
    }

    return;
}
コード例 #12
0
status_t ZslProcessor3::pushToReprocess(int32_t requestId) {
    ALOGV("%s: Send in reprocess request with id %d",
            __FUNCTION__, requestId);
    Mutex::Autolock l(mInputMutex);
    status_t res;
    sp<Camera2Client> client = mClient.promote();

    if (client == 0) {
        ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
        return INVALID_OPERATION;
    }

    IF_ALOGV() {
        dumpZslQueue(-1);
    }

    size_t metadataIdx;
    nsecs_t candidateTimestamp = getCandidateTimestampLocked(&metadataIdx);

    if (candidateTimestamp == -1) {
        ALOGE("%s: Could not find good candidate for ZSL reprocessing",
              __FUNCTION__);
        return NOT_ENOUGH_DATA;
    }

    res = mZslStream->enqueueInputBufferByTimestamp(candidateTimestamp,
                                                    /*actualTimestamp*/NULL);

    if (res == mZslStream->NO_BUFFER_AVAILABLE) {
        ALOGV("%s: No ZSL buffers yet", __FUNCTION__);
        return NOT_ENOUGH_DATA;
    } else if (res != OK) {
        ALOGE("%s: Unable to push buffer for reprocessing: %s (%d)",
                __FUNCTION__, strerror(-res), res);
        return res;
    }

    {
        CameraMetadata request = mFrameList[metadataIdx];

        // Verify that the frame is reasonable for reprocessing

        camera_metadata_entry_t entry;
        entry = request.find(ANDROID_CONTROL_AE_STATE);
        if (entry.count == 0) {
            ALOGE("%s: ZSL queue frame has no AE state field!",
                    __FUNCTION__);
            return BAD_VALUE;
        }
        if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED &&
                entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) {
            ALOGV("%s: ZSL queue frame AE state is %d, need full capture",
                    __FUNCTION__, entry.data.u8[0]);
            return NOT_ENOUGH_DATA;
        }

        uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS;
        res = request.update(ANDROID_REQUEST_TYPE,
                &requestType, 1);
        int32_t inputStreams[1] =
                { mZslStreamId };
        if (res == OK) request.update(ANDROID_REQUEST_INPUT_STREAMS,
                inputStreams, 1);
        // TODO: Shouldn't we also update the latest preview frame?
        int32_t outputStreams[1] =
                { client->getCaptureStreamId() };
        if (res == OK) request.update(ANDROID_REQUEST_OUTPUT_STREAMS,
                outputStreams, 1);
        res = request.update(ANDROID_REQUEST_ID,
                &requestId, 1);

        if (res != OK ) {
            ALOGE("%s: Unable to update frame to a reprocess request",
                  __FUNCTION__);
            return INVALID_OPERATION;
        }

        res = client->stopStream();
        if (res != OK) {
            ALOGE("%s: Camera %d: Unable to stop preview for ZSL capture: "
                "%s (%d)",
                __FUNCTION__, client->getCameraId(), strerror(-res), res);
            return INVALID_OPERATION;
        }

        // Update JPEG settings
        {
            SharedParameters::Lock l(client->getParameters());
            res = l.mParameters.updateRequestJpeg(&request);
            if (res != OK) {
                ALOGE("%s: Camera %d: Unable to update JPEG entries of ZSL "
                        "capture request: %s (%d)", __FUNCTION__,
                        client->getCameraId(),
                        strerror(-res), res);
                return res;
            }
        }

        mLatestCapturedRequest = request;
        res = client->getCameraDevice()->capture(request);
        if (res != OK ) {
            ALOGE("%s: Unable to send ZSL reprocess request to capture: %s"
                  " (%d)", __FUNCTION__, strerror(-res), res);
            return res;
        }

        mState = LOCKED;
    }

    return OK;
}
コード例 #13
0
status_t CameraUtils::getRotationTransform(const CameraMetadata& staticInfo,
                /*out*/int32_t* transform) {
    ALOGV("%s", __FUNCTION__);

    if (transform == NULL) {
        ALOGW("%s: null transform", __FUNCTION__);
        return BAD_VALUE;
    }

    *transform = 0;

    camera_metadata_ro_entry_t entry = staticInfo.find(ANDROID_SENSOR_ORIENTATION);
    if (entry.count == 0) {
        ALOGE("%s: Can't find android.sensor.orientation in static metadata!", __FUNCTION__);
        return INVALID_OPERATION;
    }

    camera_metadata_ro_entry_t entryFacing = staticInfo.find(ANDROID_LENS_FACING);
    if (entry.count == 0) {
        ALOGE("%s: Can't find android.lens.facing in static metadata!", __FUNCTION__);
        return INVALID_OPERATION;
    }

    int32_t& flags = *transform;

    bool mirror = (entryFacing.data.u8[0] == ANDROID_LENS_FACING_FRONT);
    int orientation = entry.data.i32[0];
    if (!mirror) {
        switch (orientation) {
            case 0:
                flags = 0;
                break;
            case 90:
                flags = NATIVE_WINDOW_TRANSFORM_ROT_90;
                break;
            case 180:
                flags = NATIVE_WINDOW_TRANSFORM_ROT_180;
                break;
            case 270:
                flags = NATIVE_WINDOW_TRANSFORM_ROT_270;
                break;
            default:
                ALOGE("%s: Invalid HAL android.sensor.orientation value: %d",
                      __FUNCTION__, orientation);
                return INVALID_OPERATION;
        }
    } else {
        // Front camera needs to be horizontally flipped for mirror-like behavior.
        // Note: Flips are applied before rotates; using XOR here as some of these flags are
        // composed in terms of other flip/rotation flags, and are not bitwise-ORable.
        switch (orientation) {
            case 0:
                flags = NATIVE_WINDOW_TRANSFORM_FLIP_H;
                break;
            case 90:
                flags = NATIVE_WINDOW_TRANSFORM_FLIP_H ^
                        NATIVE_WINDOW_TRANSFORM_ROT_270;
                break;
            case 180:
                flags = NATIVE_WINDOW_TRANSFORM_FLIP_H ^
                        NATIVE_WINDOW_TRANSFORM_ROT_180;
                break;
            case 270:
                flags = NATIVE_WINDOW_TRANSFORM_FLIP_H ^
                        NATIVE_WINDOW_TRANSFORM_ROT_90;

                break;
            default:
                ALOGE("%s: Invalid HAL android.sensor.orientation value: %d",
                      __FUNCTION__, orientation);
                return INVALID_OPERATION;
        }

    }

    /**
     * This magic flag makes surfaceflinger un-rotate the buffers
     * to counter the extra global device UI rotation whenever the user
     * physically rotates the device.
     *
     * By doing this, the camera buffer always ends up aligned
     * with the physical camera for a "see through" effect.
     *
     * In essence, the buffer only gets rotated during preview use-cases.
     * The user is still responsible to re-create streams of the proper
     * aspect ratio, or the preview will end up looking non-uniformly
     * stretched.
     */
    flags |= NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY;

    ALOGV("%s: final transform = 0x%x", __FUNCTION__, flags);

    return OK;
}
コード例 #14
0
void CameraModule::deriveCameraCharacteristicsKeys(
        uint32_t deviceVersion, CameraMetadata &chars) {
    // HAL1 devices should not reach here
    if (deviceVersion < CAMERA_DEVICE_API_VERSION_2_0) {
        ALOGV("%s: Cannot derive keys for HAL version < 2.0");
        return;
    }

    // Keys added in HAL3.3
    if (deviceVersion < CAMERA_DEVICE_API_VERSION_3_3) {
        const size_t NUM_DERIVED_KEYS_HAL3_3 = 5;
        Vector<uint8_t> controlModes;
        uint8_t data = ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE;
        chars.update(ANDROID_CONTROL_AE_LOCK_AVAILABLE, &data, /*count*/1);
        data = ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE;
        chars.update(ANDROID_CONTROL_AWB_LOCK_AVAILABLE, &data, /*count*/1);
        controlModes.push(ANDROID_CONTROL_MODE_AUTO);
        camera_metadata_entry entry = chars.find(ANDROID_CONTROL_AVAILABLE_SCENE_MODES);
        if (entry.count > 1 || entry.data.u8[0] != ANDROID_CONTROL_SCENE_MODE_DISABLED) {
            controlModes.push(ANDROID_CONTROL_MODE_USE_SCENE_MODE);
        }

        // Only advertise CONTROL_OFF mode if 3A manual controls are supported.
        bool isManualAeSupported = false;
        bool isManualAfSupported = false;
        bool isManualAwbSupported = false;
        entry = chars.find(ANDROID_CONTROL_AE_AVAILABLE_MODES);
        if (entry.count > 0) {
            for (size_t i = 0; i < entry.count; i++) {
                if (entry.data.u8[i] == ANDROID_CONTROL_AE_MODE_OFF) {
                    isManualAeSupported = true;
                    break;
                }
            }
        }
        entry = chars.find(ANDROID_CONTROL_AF_AVAILABLE_MODES);
        if (entry.count > 0) {
            for (size_t i = 0; i < entry.count; i++) {
                if (entry.data.u8[i] == ANDROID_CONTROL_AF_MODE_OFF) {
                    isManualAfSupported = true;
                    break;
                }
            }
        }
        entry = chars.find(ANDROID_CONTROL_AWB_AVAILABLE_MODES);
        if (entry.count > 0) {
            for (size_t i = 0; i < entry.count; i++) {
                if (entry.data.u8[i] == ANDROID_CONTROL_AWB_MODE_OFF) {
                    isManualAwbSupported = true;
                    break;
                }
            }
        }
        if (isManualAeSupported && isManualAfSupported && isManualAwbSupported) {
            controlModes.push(ANDROID_CONTROL_MODE_OFF);
        }

        chars.update(ANDROID_CONTROL_AVAILABLE_MODES, controlModes);

        entry = chars.find(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS);
        // HAL3.2 devices passing existing CTS test should all support all LSC modes and LSC map
        bool lensShadingModeSupported = false;
        if (entry.count > 0) {
            for (size_t i = 0; i < entry.count; i++) {
                if (entry.data.i32[i] == ANDROID_SHADING_MODE) {
                    lensShadingModeSupported = true;
                    break;
                }
            }
        }
        Vector<uint8_t> lscModes;
        Vector<uint8_t> lscMapModes;
        lscModes.push(ANDROID_SHADING_MODE_FAST);
        lscModes.push(ANDROID_SHADING_MODE_HIGH_QUALITY);
        lscMapModes.push(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF);
        if (lensShadingModeSupported) {
            lscModes.push(ANDROID_SHADING_MODE_OFF);
            lscMapModes.push(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_ON);
        }
        chars.update(ANDROID_SHADING_AVAILABLE_MODES, lscModes);
        chars.update(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES, lscMapModes);

        entry = chars.find(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS);
        Vector<int32_t> availableCharsKeys;
        availableCharsKeys.setCapacity(entry.count + NUM_DERIVED_KEYS_HAL3_3);
        for (size_t i = 0; i < entry.count; i++) {
            availableCharsKeys.push(entry.data.i32[i]);
        }
        availableCharsKeys.push(ANDROID_CONTROL_AE_LOCK_AVAILABLE);
        availableCharsKeys.push(ANDROID_CONTROL_AWB_LOCK_AVAILABLE);
        availableCharsKeys.push(ANDROID_CONTROL_AVAILABLE_MODES);
        availableCharsKeys.push(ANDROID_SHADING_AVAILABLE_MODES);
        availableCharsKeys.push(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES);
        chars.update(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, availableCharsKeys);

        // Need update android.control.availableHighSpeedVideoConfigurations since HAL3.3
        // adds batch size to this array.
        entry = chars.find(ANDROID_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS);
        if (entry.count > 0) {
            Vector<int32_t> highSpeedConfig;
            for (size_t i = 0; i < entry.count; i += 4) {
                highSpeedConfig.add(entry.data.i32[i]); // width
                highSpeedConfig.add(entry.data.i32[i + 1]); // height
                highSpeedConfig.add(entry.data.i32[i + 2]); // fps_min
                highSpeedConfig.add(entry.data.i32[i + 3]); // fps_max
                highSpeedConfig.add(1); // batchSize_max. default to 1 for HAL3.2
            }
            chars.update(ANDROID_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS,
                    highSpeedConfig);
        }
    }

    // Always add a default for the pre-correction active array if the vendor chooses to omit this
    camera_metadata_entry entry = chars.find(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE);
    if (entry.count == 0) {
        Vector<int32_t> preCorrectionArray;
        entry = chars.find(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE);
        preCorrectionArray.appendArray(entry.data.i32, entry.count);
        chars.update(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, preCorrectionArray);
    }

    return;
}
コード例 #15
0
status_t ZslProcessor::pushToReprocess(int32_t requestId) {
    ALOGV("%s: Send in reprocess request with id %d",
            __FUNCTION__, requestId);
    Mutex::Autolock l(mInputMutex);
    status_t res;
    sp<Camera2Client> client = mClient.promote();

    if (client == 0) {
        ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
        return INVALID_OPERATION;
    }

    IF_ALOGV() {
        dumpZslQueue(-1);
    }

    if (mZslQueueTail != mZslQueueHead) {
        CameraMetadata request;
        size_t index = mZslQueueTail;
        while (index != mZslQueueHead) {
            if (!mZslQueue[index].frame.isEmpty()) {
                request = mZslQueue[index].frame;
                break;
            }
            index = (index + 1) % kZslBufferDepth;
        }
        if (index == mZslQueueHead) {
            ALOGV("%s: ZSL queue has no valid frames to send yet.",
                  __FUNCTION__);
            return NOT_ENOUGH_DATA;
        }
        // Verify that the frame is reasonable for reprocessing

        camera_metadata_entry_t entry;
        entry = request.find(ANDROID_CONTROL_AE_STATE);
        if (entry.count == 0) {
            ALOGE("%s: ZSL queue frame has no AE state field!",
                    __FUNCTION__);
            return BAD_VALUE;
        }
        if (entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_CONVERGED &&
                entry.data.u8[0] != ANDROID_CONTROL_AE_STATE_LOCKED) {
            ALOGV("%s: ZSL queue frame AE state is %d, need full capture",
                    __FUNCTION__, entry.data.u8[0]);
            return NOT_ENOUGH_DATA;
        }

        buffer_handle_t *handle =
            &(mZslQueue[index].buffer.mGraphicBuffer->handle);

        uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS;
        res = request.update(ANDROID_REQUEST_TYPE,
                &requestType, 1);
        int32_t inputStreams[1] =
                { mZslReprocessStreamId };
        if (res == OK) request.update(ANDROID_REQUEST_INPUT_STREAMS,
                inputStreams, 1);
        int32_t outputStreams[1] =
                { client->getCaptureStreamId() };
        if (res == OK) request.update(ANDROID_REQUEST_OUTPUT_STREAMS,
                outputStreams, 1);
        res = request.update(ANDROID_REQUEST_ID,
                &requestId, 1);

        if (res != OK ) {
            ALOGE("%s: Unable to update frame to a reprocess request", __FUNCTION__);
            return INVALID_OPERATION;
        }

        res = client->stopStream();
        if (res != OK) {
            ALOGE("%s: Camera %d: Unable to stop preview for ZSL capture: "
                "%s (%d)",
                __FUNCTION__, mId, strerror(-res), res);
            return INVALID_OPERATION;
        }
        // TODO: have push-and-clear be atomic
        res = client->getCameraDevice()->pushReprocessBuffer(mZslReprocessStreamId,
                handle, this);
        if (res != OK) {
            ALOGE("%s: Unable to push buffer for reprocessing: %s (%d)",
                    __FUNCTION__, strerror(-res), res);
            return res;
        }

        // Update JPEG settings
        {
            SharedParameters::Lock l(client->getParameters());
            res = l.mParameters.updateRequestJpeg(&request);
            if (res != OK) {
                ALOGE("%s: Camera %d: Unable to update JPEG entries of ZSL "
                        "capture request: %s (%d)", __FUNCTION__,
                        mId,
                        strerror(-res), res);
                return res;
            }
        }

        mLatestCapturedRequest = request;
        res = client->getCameraDevice()->capture(request);
        if (res != OK ) {
            ALOGE("%s: Unable to send ZSL reprocess request to capture: %s (%d)",
                    __FUNCTION__, strerror(-res), res);
            return res;
        }

        mState = LOCKED;
    } else {
        ALOGV("%s: No ZSL buffers yet", __FUNCTION__);
        return NOT_ENOUGH_DATA;
    }
    return OK;
}
コード例 #16
0
status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame,
        const sp<Camera2Client> &client) {
    status_t res = BAD_VALUE;
    ATRACE_CALL();
    camera_metadata_ro_entry_t entry;
    bool enableFaceDetect;

    {
        SharedParameters::Lock l(client->getParameters());
        enableFaceDetect = l.mParameters.enableFaceDetect;
    }
    entry = frame.find(ANDROID_STATISTICS_FACE_DETECT_MODE);

    // TODO: This should be an error once implementations are compliant
    if (entry.count == 0) {
        return OK;
    }

    uint8_t faceDetectMode = entry.data.u8[0];

    camera_frame_metadata metadata;
    Vector<camera_face_t> faces;
    metadata.number_of_faces = 0;

    if (enableFaceDetect &&
        faceDetectMode != ANDROID_STATISTICS_FACE_DETECT_MODE_OFF) {

        SharedParameters::Lock l(client->getParameters());
        entry = frame.find(ANDROID_STATISTICS_FACE_RECTANGLES);
        if (entry.count == 0) {
            // No faces this frame
            /* warning: locks SharedCameraCallbacks */
            callbackFaceDetection(client, metadata);
            return OK;
        }
        metadata.number_of_faces = entry.count / 4;
        if (metadata.number_of_faces >
                l.mParameters.fastInfo.maxFaces) {
            ALOGE("%s: Camera %d: More faces than expected! (Got %d, max %d)",
                    __FUNCTION__, client->getCameraId(),
                    metadata.number_of_faces, l.mParameters.fastInfo.maxFaces);
            return res;
        }
        const int32_t *faceRects = entry.data.i32;

        entry = frame.find(ANDROID_STATISTICS_FACE_SCORES);
        if (entry.count == 0) {
            ALOGE("%s: Camera %d: Unable to read face scores",
                    __FUNCTION__, client->getCameraId());
            return res;
        }
        const uint8_t *faceScores = entry.data.u8;

        const int32_t *faceLandmarks = NULL;
        const int32_t *faceIds = NULL;

        if (faceDetectMode == ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) {
            entry = frame.find(ANDROID_STATISTICS_FACE_LANDMARKS);
            if (entry.count == 0) {
                ALOGE("%s: Camera %d: Unable to read face landmarks",
                        __FUNCTION__, client->getCameraId());
                return res;
            }
            faceLandmarks = entry.data.i32;

            entry = frame.find(ANDROID_STATISTICS_FACE_IDS);

            if (entry.count == 0) {
                ALOGE("%s: Camera %d: Unable to read face IDs",
                        __FUNCTION__, client->getCameraId());
                return res;
            }
            faceIds = entry.data.i32;
        }

        entry = frame.find(ANDROID_SCALER_CROP_REGION);
        if (entry.count < 4) {
            ALOGE("%s: Camera %d: Unable to read crop region (count = %d)",
                    __FUNCTION__, client->getCameraId(), entry.count);
            return res;
        }

        Parameters::CropRegion scalerCrop = {
            static_cast<float>(entry.data.i32[0]),
            static_cast<float>(entry.data.i32[1]),
            static_cast<float>(entry.data.i32[2]),
            static_cast<float>(entry.data.i32[3])};

        faces.setCapacity(metadata.number_of_faces);

        size_t maxFaces = metadata.number_of_faces;
        for (size_t i = 0; i < maxFaces; i++) {
            if (faceScores[i] == 0) {
                metadata.number_of_faces--;
                continue;
            }
            if (faceScores[i] > 100) {
                ALOGW("%s: Face index %zu with out of range score %d",
                        __FUNCTION__, i, faceScores[i]);
            }

            camera_face_t face;

            face.rect[0] = l.mParameters.arrayXToNormalizedWithCrop(
                                faceRects[i*4 + 0], scalerCrop);
            face.rect[1] = l.mParameters.arrayYToNormalizedWithCrop(
                                faceRects[i*4 + 1], scalerCrop);
            face.rect[2] = l.mParameters.arrayXToNormalizedWithCrop(
                                faceRects[i*4 + 2], scalerCrop);
            face.rect[3] = l.mParameters.arrayYToNormalizedWithCrop(
                                faceRects[i*4 + 3], scalerCrop);

            face.score = faceScores[i];
            if (faceDetectMode == ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) {
                face.id = faceIds[i];
                face.left_eye[0] = l.mParameters.arrayXToNormalizedWithCrop(
                        faceLandmarks[i*6 + 0], scalerCrop);
                face.left_eye[1] = l.mParameters.arrayYToNormalizedWithCrop(
                        faceLandmarks[i*6 + 1], scalerCrop);
                face.right_eye[0] = l.mParameters.arrayXToNormalizedWithCrop(
                        faceLandmarks[i*6 + 2], scalerCrop);
                face.right_eye[1] = l.mParameters.arrayYToNormalizedWithCrop(
                        faceLandmarks[i*6 + 3], scalerCrop);
                face.mouth[0] = l.mParameters.arrayXToNormalizedWithCrop(
                        faceLandmarks[i*6 + 4], scalerCrop);
                face.mouth[1] = l.mParameters.arrayYToNormalizedWithCrop(
                        faceLandmarks[i*6 + 5], scalerCrop);
            } else {
                face.id = 0;
                face.left_eye[0] = face.left_eye[1] = -2000;
                face.right_eye[0] = face.right_eye[1] = -2000;
                face.mouth[0] = face.mouth[1] = -2000;
            }
            faces.push_back(face);
        }

        metadata.faces = faces.editArray();
    }

    /* warning: locks SharedCameraCallbacks */
    callbackFaceDetection(client, metadata);

    return OK;
}