// Create a request object from a template. // -- Caller owns the newly allocated metadata status_t ProCamera2Client::createDefaultRequest(int templateId, /*out*/ camera_metadata** request) { ATRACE_CALL(); ALOGV("%s (templateId = 0x%x)", __FUNCTION__, templateId); if (request) { *request = NULL; } status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return res; Mutex::Autolock icl(mBinderSerializationLock); if (!mDevice.get()) return DEAD_OBJECT; CameraMetadata metadata; if ( (res = mDevice->createDefaultRequest(templateId, &metadata) ) == OK) { *request = metadata.release(); } return res; }
status_t ModuleFlashControl::hasFlashUnit(const String8& cameraId, bool *hasFlash) { if (!hasFlash) { return BAD_VALUE; } *hasFlash = false; Mutex::Autolock l(mLock); camera_info info; status_t res = mCameraModule->getCameraInfo(atoi(cameraId.string()), &info); if (res != 0) { return res; } CameraMetadata metadata; metadata = info.static_camera_characteristics; camera_metadata_entry flashAvailable = metadata.find(ANDROID_FLASH_INFO_AVAILABLE); if (flashAvailable.count == 1 && flashAvailable.data.u8[0] == 1) { *hasFlash = true; } return OK; }
/** * returnResult * * Returns a partial result metadata buffer, just one. * * \param reqState[IN]: Request State control structure * \param returnIndex[IN]: index of the result buffer in the array of result * buffers stored in the request */ status_t ResultProcessor::returnResult(RequestState_t* reqState, int returnIndex) { status_t status = NO_ERROR; camera3_capture_result result; CameraMetadata *resultMetadata; CLEAR(result); resultMetadata = reqState->request->getPartialResultBuffer(returnIndex); if (resultMetadata == NULL) { LOGE("Cannot get partial result buffer"); return UNKNOWN_ERROR; } // This value should be between 1 and android.request.partialResultCount // The index goes between 0-partialResultCount -1 result.partial_result = returnIndex + 1; result.frame_number = reqState->reqId; result.result = resultMetadata->getAndLock(); result.num_output_buffers = 0; mCallbackOps->process_capture_result(mCallbackOps, &result); resultMetadata->unlock(result.result); reqState->partialResultReturned += 1; LOGR("<Request %d> result cb done", reqState->reqId); return status; }
/** * Returns the single partial result stored in the vector. * In the future we will have more than one. */ void ResultProcessor::returnPendingPartials(RequestState_t* reqState) { camera3_capture_result result; CLEAR(result); // it must be 1 for >= CAMERA_DEVICE_API_VERSION_3_2 if we don't support partial metadata result.partial_result = mPartialResultCount; //TODO: combine them all in one metadata buffer and return result.frame_number = reqState->reqId; // check if metadata result of the previous request is returned int pre_reqId = reqState->reqId - 1; int index = mRequestsInTransit.indexOfKey(pre_reqId); if (index != NAME_NOT_FOUND) { RequestState_t *pre_reqState = mRequestsInTransit.valueAt(index); if (pre_reqState->partialResultReturned == 0) { LOGR("wait the metadata of the previous request return"); mRequestsPendingMetaReturn.add(reqState->reqId); return; } } CameraMetadata * settings = reqState->pendingPartialResults[0]; result.result = settings->getAndLock(); result.num_output_buffers = 0; mCallbackOps->process_capture_result(mCallbackOps, &result); settings->unlock(result.result); reqState->partialResultReturned += 1; LOGR("<Request %d> result cb done",reqState->reqId); reqState->pendingPartialResults.clear(); }
status_t CameraDeviceClient::endConfigure(bool isConstrainedHighSpeed) { ALOGV("%s: ending configure (%d input stream, %zu output streams)", __FUNCTION__, mInputStream.configured ? 1 : 0, mStreamMap.size()); // Sanitize the high speed session against necessary capability bit. if (isConstrainedHighSpeed) { CameraMetadata staticInfo = mDevice->info(); camera_metadata_entry_t entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES); bool isConstrainedHighSpeedSupported = false; for(size_t i = 0; i < entry.count; ++i) { uint8_t capability = entry.data.u8[i]; if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO) { isConstrainedHighSpeedSupported = true; break; } } if (!isConstrainedHighSpeedSupported) { ALOGE("%s: Camera %d: Try to create a constrained high speed configuration on a device" " that doesn't support it.", __FUNCTION__, mCameraId); return INVALID_OPERATION; } } status_t res; if ( (res = checkPid(__FUNCTION__) ) != OK) return res; Mutex::Autolock icl(mBinderSerializationLock); if (!mDevice.get()) return DEAD_OBJECT; return mDevice->configureStreams(isConstrainedHighSpeed); }
status_t CameraDeviceClientFlashControl::hasFlashUnitLocked( const String8& cameraId, bool *hasFlash) { if (!hasFlash) { return BAD_VALUE; } camera_info info; status_t res = mCameraModule->getCameraInfo( atoi(cameraId.string()), &info); if (res != 0) { ALOGE("%s: failed to get camera info for camera %s", __FUNCTION__, cameraId.string()); return res; } CameraMetadata metadata; metadata = info.static_camera_characteristics; camera_metadata_entry flashAvailable = metadata.find(ANDROID_FLASH_INFO_AVAILABLE); if (flashAvailable.count == 1 && flashAvailable.data.u8[0] == 1) { *hasFlash = true; } return OK; }
static jint CameraMetadata_getEntryCount(JNIEnv *env, jobject thiz) { ALOGV("%s", __FUNCTION__); CameraMetadata* metadata = CameraMetadata_getPointerThrow(env, thiz); if (metadata == NULL) return 0; // actually throws java exc. return metadata->entryCount(); }
void FrameProcessorBase::dump(int fd, const Vector<String16>& /*args*/) { String8 result(" Latest received frame:\n"); write(fd, result.string(), result.size()); CameraMetadata lastFrame; { // Don't race while dumping metadata Mutex::Autolock al(mLastFrameMutex); lastFrame = CameraMetadata(mLastFrame); } lastFrame.dump(fd, 2, 6); }
bool CameraDeviceClient::roundBufferDimensionNearest(int32_t width, int32_t height, int32_t format, android_dataspace dataSpace, const CameraMetadata& info, /*out*/int32_t* outWidth, /*out*/int32_t* outHeight) { camera_metadata_ro_entry streamConfigs = (dataSpace == HAL_DATASPACE_DEPTH) ? info.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS) : info.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS); int32_t bestWidth = -1; int32_t bestHeight = -1; // Iterate through listed stream configurations and find the one with the smallest euclidean // distance from the given dimensions for the given format. for (size_t i = 0; i < streamConfigs.count; i += 4) { int32_t fmt = streamConfigs.data.i32[i]; int32_t w = streamConfigs.data.i32[i + 1]; int32_t h = streamConfigs.data.i32[i + 2]; // Ignore input/output type for now if (fmt == format) { if (w == width && h == height) { bestWidth = width; bestHeight = height; break; } else if (w <= ROUNDING_WIDTH_CAP && (bestWidth == -1 || CameraDeviceClient::euclidDistSquare(w, h, width, height) < CameraDeviceClient::euclidDistSquare(bestWidth, bestHeight, width, height))) { bestWidth = w; bestHeight = h; } } } if (bestWidth == -1) { // Return false if no configurations for this format were listed return false; } // Set the outputs to the closet width/height if (outWidth != NULL) { *outWidth = bestWidth; } if (outHeight != NULL) { *outHeight = bestHeight; } // Return true if at least one configuration for this format was listed return true; }
// TODO: move to Camera2ClientBase bool CameraDeviceClient::enforceRequestPermissions(CameraMetadata& metadata) { const int pid = IPCThreadState::self()->getCallingPid(); const int selfPid = getpid(); camera_metadata_entry_t entry; /** * Mixin default important security values * - android.led.transmit = defaulted ON */ CameraMetadata staticInfo = mDevice->info(); entry = staticInfo.find(ANDROID_LED_AVAILABLE_LEDS); for(size_t i = 0; i < entry.count; ++i) { uint8_t led = entry.data.u8[i]; switch(led) { case ANDROID_LED_AVAILABLE_LEDS_TRANSMIT: { uint8_t transmitDefault = ANDROID_LED_TRANSMIT_ON; if (!metadata.exists(ANDROID_LED_TRANSMIT)) { metadata.update(ANDROID_LED_TRANSMIT, &transmitDefault, 1); } break; } } } // We can do anything! if (pid == selfPid) { return true; } /** * Permission check special fields in the request * - android.led.transmit = android.permission.CAMERA_DISABLE_TRANSMIT */ entry = metadata.find(ANDROID_LED_TRANSMIT); if (entry.count > 0 && entry.data.u8[0] != ANDROID_LED_TRANSMIT_ON) { String16 permissionString = String16("android.permission.CAMERA_DISABLE_TRANSMIT_LED"); if (!checkCallingPermission(permissionString)) { const int uid = IPCThreadState::self()->getCallingUid(); ALOGE("Permission Denial: " "can't disable transmit LED pid=%d, uid=%d", pid, uid); return false; } } return true; }
static void CameraMetadata_writeValues(JNIEnv *env, jobject thiz, jint tag, jbyteArray src) { ALOGV("%s (tag = %d)", __FUNCTION__, tag); CameraMetadata* metadata = CameraMetadata_getPointerThrow(env, thiz); if (metadata == NULL) return; int tagType = get_camera_metadata_tag_type(tag); if (tagType == -1) { jniThrowExceptionFmt(env, "java/lang/IllegalArgumentException", "Tag (%d) did not have a type", tag); return; } size_t tagSize = Helpers::getTypeSize(tagType); status_t res; if (src == NULL) { // If array is NULL, delete the entry if (metadata->exists(tag)) { res = metadata->erase(tag); ALOGV("%s: Erase values (res = %d)", __FUNCTION__, res); } else { res = OK; ALOGV("%s: Don't need to erase", __FUNCTION__); } } else { // Copy from java array into native array ScopedByteArrayRO arrayReader(env, src); if (arrayReader.get() == NULL) return; res = Helpers::updateAny(metadata, static_cast<uint32_t>(tag), tagType, arrayReader.get(), arrayReader.size()); ALOGV("%s: Update values (res = %d)", __FUNCTION__, res); } if (res == OK) { return; } else if (res == BAD_VALUE) { jniThrowExceptionFmt(env, "java/lang/IllegalArgumentException", "Src byte array was poorly formed"); } else if (res == INVALID_OPERATION) { jniThrowExceptionFmt(env, "java/lang/IllegalStateException", "Internal error while trying to update metadata"); } else { jniThrowExceptionFmt(env, "java/lang/IllegalStateException", "Unknown error (%d) while trying to update " "metadata", res); } }
status_t FrameProcessorBase::processListeners(const CameraMetadata &frame, const sp<CameraDeviceBase> &device) { ATRACE_CALL(); camera_metadata_ro_entry_t entry; // Quirks: Don't deliver partial results to listeners that don't want them bool quirkIsPartial = false; entry = frame.find(ANDROID_QUIRKS_PARTIAL_RESULT); if (entry.count != 0 && entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) { ALOGV("%s: Camera %d: Not forwarding partial result to listeners", __FUNCTION__, device->getId()); quirkIsPartial = true; } entry = frame.find(ANDROID_REQUEST_ID); if (entry.count == 0) { ALOGE("%s: Camera %d: Error reading frame id", __FUNCTION__, device->getId()); return BAD_VALUE; } int32_t requestId = entry.data.i32[0]; List<sp<FilteredListener> > listeners; { Mutex::Autolock l(mInputMutex); List<RangeListener>::iterator item = mRangeListeners.begin(); while (item != mRangeListeners.end()) { if (requestId >= item->minId && requestId < item->maxId && (!quirkIsPartial || item->quirkSendPartials) ) { sp<FilteredListener> listener = item->listener.promote(); if (listener == 0) { item = mRangeListeners.erase(item); continue; } else { listeners.push_back(listener); } } item++; } } ALOGV("Got %d range listeners out of %d", listeners.size(), mRangeListeners.size()); List<sp<FilteredListener> >::iterator item = listeners.begin(); for (; item != listeners.end(); item++) { (*item)->onFrameAvailable(requestId, frame); } return OK; }
status_t CameraDeviceClientFlashControl::getSmallestSurfaceSize( const camera_info& info, int32_t *width, int32_t *height) { if (!width || !height) { return BAD_VALUE; } int32_t w = INT32_MAX; int32_t h = 1; CameraMetadata metadata; metadata = info.static_camera_characteristics; camera_metadata_entry streamConfigs = metadata.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS); for (size_t i = 0; i < streamConfigs.count; i += 4) { int32_t fmt = streamConfigs.data.i32[i]; if (fmt == ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED) { int32_t ww = streamConfigs.data.i32[i + 1]; int32_t hh = streamConfigs.data.i32[i + 2]; if (w * h > ww * hh) { w = ww; h = hh; } } } // if stream configuration is not found, try available processed sizes. if (streamConfigs.count == 0) { camera_metadata_entry availableProcessedSizes = metadata.find(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES); for (size_t i = 0; i < availableProcessedSizes.count; i += 2) { int32_t ww = availableProcessedSizes.data.i32[i]; int32_t hh = availableProcessedSizes.data.i32[i + 1]; if (w * h > ww * hh) { w = ww; h = hh; } } } if (w == INT32_MAX) { return NAME_NOT_FOUND; } *width = w; *height = h; return OK; }
static void CameraMetadata_swap(JNIEnv *env, jobject thiz, jobject other) { ALOGV("%s", __FUNCTION__); CameraMetadata* metadata = CameraMetadata_getPointerThrow(env, thiz); // order is important: we can't call another JNI method // if there is an exception pending if (metadata == NULL) return; CameraMetadata* otherMetadata = CameraMetadata_getPointerThrow(env, other, "other"); if (otherMetadata == NULL) return; metadata->swap(*otherMetadata); }
status_t BnCameraDeviceCallbacks::onTransact( uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { ALOGV("onTransact - code = %d", code); switch(code) { case CAMERA_ERROR: { ALOGV("onDeviceError"); CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply); CameraErrorCode errorCode = static_cast<CameraErrorCode>(data.readInt32()); onDeviceError(errorCode); data.readExceptionCode(); return NO_ERROR; } break; case CAMERA_IDLE: { ALOGV("onDeviceIdle"); CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply); onDeviceIdle(); data.readExceptionCode(); return NO_ERROR; } break; case CAPTURE_STARTED: { ALOGV("onCaptureStarted"); CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply); int32_t requestId = data.readInt32(); int64_t timestamp = data.readInt64(); onCaptureStarted(requestId, timestamp); data.readExceptionCode(); return NO_ERROR; } break; case RESULT_RECEIVED: { ALOGV("onResultReceived"); CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply); int32_t requestId = data.readInt32(); CameraMetadata result; if (data.readInt32() != 0) { result.readFromParcel(const_cast<Parcel*>(&data)); } else { ALOGW("No metadata object is present in result"); } onResultReceived(requestId, result); data.readExceptionCode(); return NO_ERROR; } break; default: return BBinder::onTransact(code, data, reply, flags); } }
TEST_P(CameraFrameTest, GetFrame) { TEST_EXTENSION_FORKING_INIT; /* Submit a PREVIEW type request, then wait until we get the frame back */ CameraMetadata previewRequest; ASSERT_EQ(OK, mDevice->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW, &previewRequest)); { Vector<uint8_t> outputStreamIds; outputStreamIds.push(mStreamId); ASSERT_EQ(OK, previewRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS, outputStreamIds)); if (CAMERA_FRAME_DEBUGGING) { int frameCount = 0; ASSERT_EQ(OK, previewRequest.update(ANDROID_REQUEST_FRAME_COUNT, &frameCount, 1)); } } if (CAMERA_FRAME_DEBUGGING) { previewRequest.dump(STDOUT_FILENO); } for (int i = 0; i < GetParam(); ++i) { ALOGV("Submitting capture request %d", i); CameraMetadata tmpRequest = previewRequest; ASSERT_EQ(OK, mDevice->capture(tmpRequest)); } for (int i = 0; i < GetParam(); ++i) { ALOGV("Reading capture request %d", i); ASSERT_EQ(OK, mDevice->waitForNextFrame(CAMERA_FRAME_TIMEOUT)); CameraMetadata frameMetadata; ASSERT_EQ(OK, mDevice->getNextFrame(&frameMetadata)); // wait for buffer to be available ASSERT_EQ(OK, mFrameListener->waitForFrame(CAMERA_FRAME_TIMEOUT)); ALOGV("We got the frame now"); // mark buffer consumed so producer can re-dequeue it CpuConsumer::LockedBuffer imgBuffer; ASSERT_EQ(OK, mCpuConsumer->lockNextBuffer(&imgBuffer)); ASSERT_EQ(OK, mCpuConsumer->unlockBuffer(imgBuffer)); } }
void CameraMetadata::acquire(CameraMetadata &other) { if (mLocked) { ALOGE("%s: CameraMetadata is locked", __FUNCTION__); return; } acquire(other.release()); }
bool FrameProcessor::get3aResult(const CameraMetadata& result, int32_t tag, T* value, int32_t frameNumber, int cameraId) { camera_metadata_ro_entry_t entry; if (value == NULL) { ALOGE("%s: Camera %d: Value to write to is NULL", __FUNCTION__, cameraId); return false; } entry = result.find(tag); if (entry.count == 0) { ALOGE("%s: Camera %d: No %s provided by HAL for frame %d!", __FUNCTION__, cameraId, get_camera_metadata_tag_name(tag), frameNumber); return false; } else { switch(sizeof(Src)){ case sizeof(uint8_t): *value = static_cast<T>(entry.data.u8[0]); break; case sizeof(int32_t): *value = static_cast<T>(entry.data.i32[0]); break; default: ALOGE("%s: Camera %d: Unsupported source", __FUNCTION__, cameraId); return false; } } return true; }
status_t ProCamera2Client::getCameraInfo(int cameraId, /*out*/ camera_metadata** info) { if (cameraId != mCameraId) { return INVALID_OPERATION; } Mutex::Autolock icl(mBinderSerializationLock); if (!mDevice.get()) return DEAD_OBJECT; CameraMetadata deviceInfo = mDevice->info(); *info = deviceInfo.release(); return OK; }
void onResultReceived(int32_t requestId, const CameraMetadata& result) { Parcel data, reply; data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor()); data.writeInt32(requestId); data.writeInt32(1); // to mark presence of metadata object result.writeToParcel(&data); remote()->transact(RESULT_RECEIVED, data, &reply, IBinder::FLAG_ONEWAY); data.writeNoException(); }
static jboolean CameraMetadata_isEmpty(JNIEnv *env, jobject thiz) { ALOGV("%s", __FUNCTION__); CameraMetadata* metadata = CameraMetadata_getPointerThrow(env, thiz); if (metadata == NULL) { ALOGW("%s: Returning early due to exception being thrown", __FUNCTION__); return JNI_TRUE; // actually throws java exc. } jboolean empty = metadata->isEmpty(); ALOGV("%s: Empty returned %d, entry count was %zu", __FUNCTION__, empty, metadata->entryCount()); return empty; }
static void CameraMetadata_writeToParcel(JNIEnv *env, jobject thiz, jobject parcel) { ALOGV("%s", __FUNCTION__); CameraMetadata* metadata = CameraMetadata_getPointerThrow(env, thiz); if (metadata == NULL) { return; } Parcel* parcelNative = parcelForJavaObject(env, parcel); if (parcelNative == NULL) { jniThrowNullPointerException(env, "parcel"); return; } status_t err; if ((err = metadata->writeToParcel(parcelNative)) != OK) { jniThrowExceptionFmt(env, "java/lang/IllegalStateException", "Failed to write to parcel (error code %d)", err); return; } }
virtual status_t getCameraInfo(CameraMetadata* info) { Parcel data, reply; data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor()); remote()->transact(GET_CAMERA_INFO, data, &reply); reply.readExceptionCode(); status_t result = reply.readInt32(); CameraMetadata out; if (reply.readInt32() != 0) { out.readFromParcel(&reply); } if (info != NULL) { info->swap(out); } return result; }
void onResultReceived(const CameraMetadata& metadata, const CaptureResultExtras& resultExtras) { ALOGV("onResultReceived"); Parcel data, reply; data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor()); data.writeInt32(1); // to mark presence of metadata object metadata.writeToParcel(&data); data.writeInt32(1); // to mark presence of CaptureResult object resultExtras.writeToParcel(&data); remote()->transact(RESULT_RECEIVED, data, &reply, IBinder::FLAG_ONEWAY); data.writeNoException(); }
// get camera characteristics (static metadata) virtual status_t getCameraCharacteristics(int cameraId, CameraMetadata* cameraInfo) { Parcel data, reply; data.writeInterfaceToken(ICameraService::getInterfaceDescriptor()); data.writeInt32(cameraId); remote()->transact(BnCameraService::GET_CAMERA_CHARACTERISTICS, data, &reply); if (readExceptionCode(reply)) return -EPROTO; status_t result = reply.readInt32(); CameraMetadata out; if (reply.readInt32() != 0) { out.readFromParcel(&reply); } if (cameraInfo != NULL) { cameraInfo->swap(out); } return result; }
int CameraModule::getCameraInfo(int cameraId, struct camera_info *info) { Mutex::Autolock lock(mCameraInfoLock); if (cameraId < 0) { ALOGE("%s: Invalid camera ID %d", __FUNCTION__, cameraId); return -EINVAL; } // Only override static_camera_characteristics for API2 devices int apiVersion = mModule->common.module_api_version; if (apiVersion < CAMERA_MODULE_API_VERSION_2_0) { return mModule->get_camera_info(cameraId, info); } ssize_t index = mCameraInfoMap.indexOfKey(cameraId); if (index == NAME_NOT_FOUND) { // Get camera info from raw module and cache it camera_info rawInfo, cameraInfo; int ret = mModule->get_camera_info(cameraId, &rawInfo); if (ret != 0) { return ret; } int deviceVersion = rawInfo.device_version; if (deviceVersion < CAMERA_DEVICE_API_VERSION_2_0) { // static_camera_characteristics is invalid *info = rawInfo; return ret; } CameraMetadata m; m = rawInfo.static_camera_characteristics; deriveCameraCharacteristicsKeys(rawInfo.device_version, m); cameraInfo = rawInfo; cameraInfo.static_camera_characteristics = m.release(); index = mCameraInfoMap.add(cameraId, cameraInfo); } assert(index != NAME_NOT_FOUND); // return the cached camera info *info = mCameraInfoMap[index]; return OK; }
status_t ZslProcessor::updateRequestWithDefaultStillRequest(CameraMetadata &request) const { sp<Camera2Client> client = mClient.promote(); if (client == 0) { ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId); return INVALID_OPERATION; } sp<Camera3Device> device = static_cast<Camera3Device*>(client->getCameraDevice().get()); if (device == 0) { ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); return INVALID_OPERATION; } CameraMetadata stillTemplate; device->createDefaultRequest(CAMERA3_TEMPLATE_STILL_CAPTURE, &stillTemplate); // Find some of the post-processing tags, and assign the value from template to the request. // Only check the aberration mode and noise reduction mode for now, as they are very important // for image quality. uint32_t postProcessingTags[] = { ANDROID_NOISE_REDUCTION_MODE, ANDROID_COLOR_CORRECTION_ABERRATION_MODE, ANDROID_COLOR_CORRECTION_MODE, ANDROID_TONEMAP_MODE, ANDROID_SHADING_MODE, ANDROID_HOT_PIXEL_MODE, ANDROID_EDGE_MODE }; camera_metadata_entry_t entry; for (size_t i = 0; i < sizeof(postProcessingTags) / sizeof(uint32_t); i++) { entry = stillTemplate.find(postProcessingTags[i]); if (entry.count > 0) { request.update(postProcessingTags[i], entry.data.u8, 1); } } return OK; }
// Create a request object from a template. virtual status_t createDefaultRequest(int templateId, /*out*/ CameraMetadata* request) { Parcel data, reply; data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor()); data.writeInt32(templateId); remote()->transact(CREATE_DEFAULT_REQUEST, data, &reply); reply.readExceptionCode(); status_t result = reply.readInt32(); CameraMetadata out; if (reply.readInt32() != 0) { out.readFromParcel(&reply); } if (request != NULL) { request->swap(out); } return result; }
static jbyteArray CameraMetadata_readValues(JNIEnv *env, jobject thiz, jint tag) { ALOGV("%s (tag = %d)", __FUNCTION__, tag); CameraMetadata* metadata = CameraMetadata_getPointerThrow(env, thiz); if (metadata == NULL) return NULL; int tagType = get_camera_metadata_tag_type(tag); if (tagType == -1) { jniThrowExceptionFmt(env, "java/lang/IllegalArgumentException", "Tag (%d) did not have a type", tag); return NULL; } size_t tagSize = Helpers::getTypeSize(tagType); camera_metadata_entry entry = metadata->find(tag); if (entry.count == 0) { if (!metadata->exists(tag)) { ALOGV("%s: Tag %d does not have any entries", __FUNCTION__, tag); return NULL; } else { // OK: we will return a 0-sized array. ALOGV("%s: Tag %d had an entry, but it had 0 data", __FUNCTION__, tag); } } jsize byteCount = entry.count * tagSize; jbyteArray byteArray = env->NewByteArray(byteCount); if (env->ExceptionCheck()) return NULL; // Copy into java array from native array ScopedByteArrayRW arrayWriter(env, byteArray); memcpy(arrayWriter.get(), entry.data.u8, byteCount); return byteArray; }
void FrameProcessorBase::processNewFrames(const sp<CameraDeviceBase> &device) { status_t res; ATRACE_CALL(); CameraMetadata frame; ALOGV("%s: Camera %d: Process new frames", __FUNCTION__, device->getId()); while ( (res = device->getNextFrame(&frame)) == OK) { camera_metadata_entry_t entry; entry = frame.find(ANDROID_REQUEST_FRAME_COUNT); if (entry.count == 0) { ALOGE("%s: Camera %d: Error reading frame number", __FUNCTION__, device->getId()); break; } ATRACE_INT("cam2_frame", entry.data.i32[0]); if (!processSingleFrame(frame, device)) { break; } if (!frame.isEmpty()) { Mutex::Autolock al(mLastFrameMutex); mLastFrame.acquire(frame); } } if (res != NOT_ENOUGH_DATA) { ALOGE("%s: Camera %d: Error getting next frame: %s (%d)", __FUNCTION__, device->getId(), strerror(-res), res); return; } return; }