static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_format *f, bool ceu_can_scale) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; struct v4l2_pix_format *pix = &f->fmt.pix; unsigned int width = pix->width, height = pix->height, tmp_w, tmp_h; unsigned int max_width, max_height; struct v4l2_cropcap cap; int ret; cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = v4l2_subdev_call(sd, video, cropcap, &cap); if (ret < 0) return ret; max_width = min(cap.bounds.width, 2560); max_height = min(cap.bounds.height, 1920); ret = v4l2_subdev_call(sd, video, s_fmt, f); if (ret < 0) return ret; dev_geo(dev, "camera scaled to %ux%u\n", pix->width, pix->height); if ((width == pix->width && height == pix->height) || !ceu_can_scale) return 0; tmp_w = pix->width; tmp_h = pix->height; while ((width > tmp_w || height > tmp_h) && tmp_w < max_width && tmp_h < max_height) { tmp_w = min(2 * tmp_w, max_width); tmp_h = min(2 * tmp_h, max_height); pix->width = tmp_w; pix->height = tmp_h; ret = v4l2_subdev_call(sd, video, s_fmt, f); dev_geo(dev, "Camera scaled to %ux%u\n", pix->width, pix->height); if (ret < 0) { dev_err(dev, "Client failed to set format: %d\n", ret); return ret; } } return 0; }
static int get_camera_subwin(struct soc_camera_device *icd, struct v4l2_rect *cam_subrect, unsigned int cam_hscale, unsigned int cam_vscale) { struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_rect *ceu_rect = &cam->ceu_rect; if (!ceu_rect->width) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; struct v4l2_format f; struct v4l2_pix_format *pix = &f.fmt.pix; int ret; f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = v4l2_subdev_call(sd, video, g_fmt, &f); if (ret < 0) return ret; dev_geo(dev, "camera fmt %ux%u\n", pix->width, pix->height); if (pix->width > 2560) { ceu_rect->width = 2560; ceu_rect->left = (pix->width - 2560) / 2; } else { ceu_rect->width = pix->width; ceu_rect->left = 0; } if (pix->height > 1920) { ceu_rect->height = 1920; ceu_rect->top = (pix->height - 1920) / 2; } else { ceu_rect->height = pix->height; ceu_rect->top = 0; } dev_geo(dev, "initialised CEU rect %ux%u@%u:%u\n", ceu_rect->width, ceu_rect->height, ceu_rect->left, ceu_rect->top); } cam_subrect->width = scale_up(ceu_rect->width, cam_hscale); cam_subrect->left = scale_up(ceu_rect->left, cam_hscale); cam_subrect->height = scale_up(ceu_rect->height, cam_vscale); cam_subrect->top = scale_up(ceu_rect->top, cam_vscale); return 0; }
static int client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect, struct v4l2_rect *sub_rect, struct v4l2_rect *ceu_rect, struct v4l2_format *f, bool ceu_can_scale) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct sh_mobile_ceu_cam *cam = icd->host_priv; struct device *dev = icd->dev.parent; struct v4l2_format f_tmp = *f; struct v4l2_pix_format *pix_tmp = &f_tmp.fmt.pix; unsigned int scale_h, scale_v; int ret; ret = client_s_fmt(icd, &f_tmp, ceu_can_scale); if (ret < 0) return ret; dev_geo(dev, "5: camera scaled to %ux%u\n", pix_tmp->width, pix_tmp->height); ret = get_camera_scales(sd, rect, &scale_h, &scale_v); if (ret < 0) return ret; dev_geo(dev, "7: camera scales %u:%u\n", scale_h, scale_v); cam->cam_width = pix_tmp->width; cam->cam_height = pix_tmp->height; f->fmt.pix.width = pix_tmp->width; f->fmt.pix.height = pix_tmp->height; ceu_rect->left = scale_down(sub_rect->left, scale_h); ceu_rect->width = scale_down(sub_rect->width, scale_h); ceu_rect->top = scale_down(sub_rect->top, scale_v); ceu_rect->height = scale_down(sub_rect->height, scale_v); dev_geo(dev, "8: new CEU rect %ux%u@%u:%u\n", ceu_rect->width, ceu_rect->height, ceu_rect->left, ceu_rect->top); return 0; }
/** * @icd - soc-camera device * @rect - camera cropping window * @subrect - part of rect, sent to the user * @mf - in- / output camera output window * @width - on input: max host input width * on output: user width, mapped back to input * @height - on input: max host input height * on output: user height, mapped back to input * @host_can_scale - host can scale this pixel format * @shift - shift, used for scaling */ int soc_camera_client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect, struct v4l2_rect *subrect, struct v4l2_mbus_framefmt *mf, unsigned int *width, unsigned int *height, bool host_can_scale, unsigned int shift) { struct device *dev = icd->parent; struct v4l2_mbus_framefmt mf_tmp = *mf; unsigned int scale_h, scale_v; int ret; /* * 5. Apply iterative camera S_FMT for camera user window (also updates * client crop cache and the imaginary sub-rectangle). */ ret = client_s_fmt(icd, rect, subrect, *width, *height, &mf_tmp, host_can_scale); if (ret < 0) return ret; dev_geo(dev, "5: camera scaled to %ux%u\n", mf_tmp.width, mf_tmp.height); /* 6. Retrieve camera output window (g_fmt) */ /* unneeded - it is already in "mf_tmp" */ /* 7. Calculate new client scales. */ scale_h = soc_camera_calc_scale(rect->width, shift, mf_tmp.width); scale_v = soc_camera_calc_scale(rect->height, shift, mf_tmp.height); mf->width = mf_tmp.width; mf->height = mf_tmp.height; mf->colorspace = mf_tmp.colorspace; /* * 8. Calculate new host crop - apply camera scales to previously * updated "effective" crop. */ *width = soc_camera_shift_scale(subrect->width, shift, scale_h); *height = soc_camera_shift_scale(subrect->height, shift, scale_v); dev_geo(dev, "8: new client sub-window %ux%u\n", *width, *height); return 0; }
/* * Calculate real client output window by applying new scales to the current * client crop. New scales are calculated from the requested output format and * host crop, mapped backed onto the client input (subrect). */ void soc_camera_calc_client_output(struct soc_camera_device *icd, struct v4l2_rect *rect, struct v4l2_rect *subrect, const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf, unsigned int shift) { struct device *dev = icd->parent; unsigned int scale_v, scale_h; if (subrect->width == rect->width && subrect->height == rect->height) { /* No sub-cropping */ mf->width = pix->width; mf->height = pix->height; return; } /* 1.-2. Current camera scales and subwin - cached. */ dev_geo(dev, "2: subwin %ux%u@%u:%u\n", subrect->width, subrect->height, subrect->left, subrect->top); /* * 3. Calculate new combined scales from input sub-window to requested * user window. */ /* * TODO: CEU cannot scale images larger than VGA to smaller than SubQCIF * (128x96) or larger than VGA. This and similar limitations have to be * taken into account here. */ scale_h = soc_camera_calc_scale(subrect->width, shift, pix->width); scale_v = soc_camera_calc_scale(subrect->height, shift, pix->height); dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v); /* * 4. Calculate desired client output window by applying combined scales * to client (real) input window. */ mf->width = soc_camera_shift_scale(rect->width, shift, scale_h); mf->height = soc_camera_shift_scale(rect->height, shift, scale_v); }
/* * The common for both scaling and cropping iterative approach is: * 1. try if the client can produce exactly what requested by the user * 2. if (1) failed, try to double the client image until we get one big enough * 3. if (2) failed, try to request the maximum image */ int soc_camera_client_s_selection(struct v4l2_subdev *sd, struct v4l2_selection *sel, struct v4l2_selection *cam_sel, struct v4l2_rect *target_rect, struct v4l2_rect *subrect) { struct v4l2_subdev_selection sdsel = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .target = sel->target, .flags = sel->flags, .r = sel->r, }; struct v4l2_subdev_selection bounds = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .target = V4L2_SEL_TGT_CROP_BOUNDS, }; struct v4l2_rect *rect = &sel->r, *cam_rect = &cam_sel->r; struct device *dev = sd->v4l2_dev->dev; int ret; unsigned int width, height; v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel); sel->r = sdsel.r; ret = soc_camera_client_g_rect(sd, cam_rect); if (ret < 0) return ret; /* * Now cam_crop contains the current camera input rectangle, and it must * be within camera cropcap bounds */ if (!memcmp(rect, cam_rect, sizeof(*rect))) { /* Even if camera S_SELECTION failed, but camera rectangle matches */ dev_dbg(dev, "Camera S_SELECTION successful for %dx%d@%d:%d\n", rect->width, rect->height, rect->left, rect->top); *target_rect = *cam_rect; return 0; } /* Try to fix cropping, that camera hasn't managed to set */ dev_geo(dev, "Fix camera S_SELECTION for %dx%d@%d:%d to %dx%d@%d:%d\n", cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top, rect->width, rect->height, rect->left, rect->top); /* We need sensor maximum rectangle */ ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &bounds); if (ret < 0) return ret; /* Put user requested rectangle within sensor bounds */ soc_camera_limit_side(&rect->left, &rect->width, sdsel.r.left, 2, bounds.r.width); soc_camera_limit_side(&rect->top, &rect->height, sdsel.r.top, 4, bounds.r.height); /* * Popular special case - some cameras can only handle fixed sizes like * QVGA, VGA,... Take care to avoid infinite loop. */ width = max_t(unsigned int, cam_rect->width, 2); height = max_t(unsigned int, cam_rect->height, 2); /* * Loop as long as sensor is not covering the requested rectangle and * is still within its bounds */ while (!ret && (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) && (bounds.r.width > width || bounds.r.height > height)) { width *= 2; height *= 2; cam_rect->width = width; cam_rect->height = height; /* * We do not know what capabilities the camera has to set up * left and top borders. We could try to be smarter in iterating * them, e.g., if camera current left is to the right of the * target left, set it to the middle point between the current * left and minimum left. But that would add too much * complexity: we would have to iterate each border separately. * Instead we just drop to the left and top bounds. */ if (cam_rect->left > rect->left) cam_rect->left = bounds.r.left; if (cam_rect->left + cam_rect->width < rect->left + rect->width) cam_rect->width = rect->left + rect->width - cam_rect->left; if (cam_rect->top > rect->top) cam_rect->top = bounds.r.top; if (cam_rect->top + cam_rect->height < rect->top + rect->height) cam_rect->height = rect->top + rect->height - cam_rect->top; sdsel.r = *cam_rect; v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel); *cam_rect = sdsel.r; ret = soc_camera_client_g_rect(sd, cam_rect); dev_geo(dev, "Camera S_SELECTION %d for %dx%d@%d:%d\n", ret, cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); } /* S_SELECTION must not modify the rectangle */ if (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) { /* * The camera failed to configure a suitable cropping, * we cannot use the current rectangle, set to max */ sdsel.r = bounds.r; v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel); *cam_rect = sdsel.r; ret = soc_camera_client_g_rect(sd, cam_rect); dev_geo(dev, "Camera S_SELECTION %d for max %dx%d@%d:%d\n", ret, cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); } if (!ret) { *target_rect = *cam_rect; move_and_crop_subrect(target_rect, subrect); } return ret; } EXPORT_SYMBOL(soc_camera_client_s_selection); /* Iterative set_fmt, also updates cached client crop on success */ static int client_set_fmt(struct soc_camera_device *icd, struct v4l2_rect *rect, struct v4l2_rect *subrect, unsigned int max_width, unsigned int max_height, struct v4l2_subdev_format *format, bool host_can_scale) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->parent; struct v4l2_mbus_framefmt *mf = &format->format; unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h; struct v4l2_subdev_selection sdsel = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .target = V4L2_SEL_TGT_CROP_BOUNDS, }; bool host_1to1; int ret; ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), pad, set_fmt, NULL, format); if (ret < 0) return ret; dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height); if (width == mf->width && height == mf->height) { /* Perfect! The client has done it all. */ host_1to1 = true; goto update_cache; } host_1to1 = false; if (!host_can_scale) goto update_cache; ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel); if (ret < 0) return ret; if (max_width > sdsel.r.width) max_width = sdsel.r.width; if (max_height > sdsel.r.height) max_height = sdsel.r.height; /* Camera set a format, but geometry is not precise, try to improve */ tmp_w = mf->width; tmp_h = mf->height; /* width <= max_width && height <= max_height - guaranteed by try_fmt */ while ((width > tmp_w || height > tmp_h) && tmp_w < max_width && tmp_h < max_height) { tmp_w = min(2 * tmp_w, max_width); tmp_h = min(2 * tmp_h, max_height); mf->width = tmp_w; mf->height = tmp_h; ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), pad, set_fmt, NULL, format); dev_geo(dev, "Camera scaled to %ux%u\n", mf->width, mf->height); if (ret < 0) { /* This shouldn't happen */ dev_err(dev, "Client failed to set format: %d\n", ret); return ret; } } update_cache: /* Update cache */ ret = soc_camera_client_g_rect(sd, rect); if (ret < 0) return ret; if (host_1to1) *subrect = *rect; else move_and_crop_subrect(rect, subrect); return 0; } /** * soc_camera_client_scale * @icd: soc-camera device * @rect: camera cropping window * @subrect: part of rect, sent to the user * @mf: in- / output camera output window * @width: on input: max host input width; * on output: user width, mapped back to input * @height: on input: max host input height; * on output: user height, mapped back to input * @host_can_scale: host can scale this pixel format * @shift: shift, used for scaling */ int soc_camera_client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect, struct v4l2_rect *subrect, struct v4l2_mbus_framefmt *mf, unsigned int *width, unsigned int *height, bool host_can_scale, unsigned int shift) { struct device *dev = icd->parent; struct v4l2_subdev_format fmt_tmp = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .format = *mf, }; struct v4l2_mbus_framefmt *mf_tmp = &fmt_tmp.format; unsigned int scale_h, scale_v; int ret; /* * 5. Apply iterative camera S_FMT for camera user window (also updates * client crop cache and the imaginary sub-rectangle). */ ret = client_set_fmt(icd, rect, subrect, *width, *height, &fmt_tmp, host_can_scale); if (ret < 0) return ret; dev_geo(dev, "5: camera scaled to %ux%u\n", mf_tmp->width, mf_tmp->height); /* 6. Retrieve camera output window (g_fmt) */ /* unneeded - it is already in "mf_tmp" */ /* 7. Calculate new client scales. */ scale_h = soc_camera_calc_scale(rect->width, shift, mf_tmp->width); scale_v = soc_camera_calc_scale(rect->height, shift, mf_tmp->height); mf->width = mf_tmp->width; mf->height = mf_tmp->height; mf->colorspace = mf_tmp->colorspace; /* * 8. Calculate new host crop - apply camera scales to previously * updated "effective" crop. */ *width = soc_camera_shift_scale(subrect->width, shift, scale_h); *height = soc_camera_shift_scale(subrect->height, shift, scale_v); dev_geo(dev, "8: new client sub-window %ux%u\n", *width, *height); return 0; } EXPORT_SYMBOL(soc_camera_client_scale); /* * Calculate real client output window by applying new scales to the current * client crop. New scales are calculated from the requested output format and * host crop, mapped backed onto the client input (subrect). */ void soc_camera_calc_client_output(struct soc_camera_device *icd, struct v4l2_rect *rect, struct v4l2_rect *subrect, const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf, unsigned int shift) { struct device *dev = icd->parent; unsigned int scale_v, scale_h; if (subrect->width == rect->width && subrect->height == rect->height) { /* No sub-cropping */ mf->width = pix->width; mf->height = pix->height; return; } /* 1.-2. Current camera scales and subwin - cached. */ dev_geo(dev, "2: subwin %ux%u@%u:%u\n", subrect->width, subrect->height, subrect->left, subrect->top); /* * 3. Calculate new combined scales from input sub-window to requested * user window. */ /* * TODO: CEU cannot scale images larger than VGA to smaller than SubQCIF * (128x96) or larger than VGA. This and similar limitations have to be * taken into account here. */ scale_h = soc_camera_calc_scale(subrect->width, shift, pix->width); scale_v = soc_camera_calc_scale(subrect->height, shift, pix->height); dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v); /* * 4. Calculate desired client output window by applying combined scales * to client (real) input window. */ mf->width = soc_camera_shift_scale(rect->width, shift, scale_h); mf->height = soc_camera_shift_scale(rect->height, shift, scale_v); } EXPORT_SYMBOL(soc_camera_calc_client_output); MODULE_DESCRIPTION("soc-camera scaling-cropping functions"); MODULE_AUTHOR("Guennadi Liakhovetski <*****@*****.**>"); MODULE_LICENSE("GPL");
static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop, struct v4l2_crop *cam_crop) { struct v4l2_rect *rect = &crop->c, *cam_rect = &cam_crop->c; struct device *dev = sd->v4l2_dev->dev; struct v4l2_cropcap cap; int ret; unsigned int width, height; v4l2_subdev_call(sd, video, s_crop, crop); ret = client_g_rect(sd, cam_rect); if (ret < 0) return ret; if (!memcmp(rect, cam_rect, sizeof(*rect))) { dev_dbg(dev, "Camera S_CROP successful for %ux%u@%u:%u\n", rect->width, rect->height, rect->left, rect->top); return 0; } dev_geo(dev, "Fix camera S_CROP for %ux%u@%u:%u to %ux%u@%u:%u\n", cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top, rect->width, rect->height, rect->left, rect->top); ret = v4l2_subdev_call(sd, video, cropcap, &cap); if (ret < 0) return ret; soc_camera_limit_side(&rect->left, &rect->width, cap.bounds.left, 2, cap.bounds.width); soc_camera_limit_side(&rect->top, &rect->height, cap.bounds.top, 4, cap.bounds.height); width = max(cam_rect->width, 2); height = max(cam_rect->height, 2); while (!ret && (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) && (cap.bounds.width > width || cap.bounds.height > height)) { width *= 2; height *= 2; cam_rect->width = width; cam_rect->height = height; if (cam_rect->left > rect->left) cam_rect->left = cap.bounds.left; if (cam_rect->left + cam_rect->width < rect->left + rect->width) cam_rect->width = rect->left + rect->width - cam_rect->left; if (cam_rect->top > rect->top) cam_rect->top = cap.bounds.top; if (cam_rect->top + cam_rect->height < rect->top + rect->height) cam_rect->height = rect->top + rect->height - cam_rect->top; v4l2_subdev_call(sd, video, s_crop, cam_crop); ret = client_g_rect(sd, cam_rect); dev_geo(dev, "Camera S_CROP %d for %ux%u@%u:%u\n", ret, cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); } if (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) { *cam_rect = cap.bounds; v4l2_subdev_call(sd, video, s_crop, cam_crop); ret = client_g_rect(sd, cam_rect); dev_geo(dev, "Camera S_CROP %d for max %ux%u@%u:%u\n", ret, cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); } return ret; }
static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd, unsigned int out_width, unsigned int out_height) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_rect *rect = &cam->ceu_rect; struct sh_mobile_ceu_dev *pcdev = ici->priv; unsigned int height, width, cdwdr_width, in_width, in_height; unsigned int left_offset, top_offset; u32 camor; dev_dbg(icd->dev.parent, "Crop %ux%u@%u:%u\n", rect->width, rect->height, rect->left, rect->top); left_offset = rect->left; top_offset = rect->top; if (pcdev->image_mode) { in_width = rect->width; if (!pcdev->is_16bit) { in_width *= 2; left_offset *= 2; } width = cdwdr_width = out_width; } else { unsigned int w_factor = (icd->current_fmt->depth + 7) >> 3; width = out_width * w_factor / 2; if (!pcdev->is_16bit) w_factor *= 2; in_width = rect->width * w_factor / 2; left_offset = left_offset * w_factor / 2; cdwdr_width = width * 2; } height = out_height; in_height = rect->height; if (pcdev->is_interlaced) { height /= 2; in_height /= 2; top_offset /= 2; cdwdr_width *= 2; } camor = left_offset | (top_offset << 16); dev_geo(icd->dev.parent, "CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor, (in_height << 16) | in_width, (height << 16) | width, cdwdr_width); ceu_write(pcdev, CAMOR, camor); ceu_write(pcdev, CAPWR, (in_height << 16) | in_width); ceu_write(pcdev, CFSZR, (height << 16) | width); ceu_write(pcdev, CDWDR, cdwdr_width); }
static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_format cam_f = *f; struct v4l2_pix_format *cam_pix = &cam_f.fmt.pix; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; __u32 pixfmt = pix->pixelformat; const struct soc_camera_format_xlate *xlate; struct v4l2_crop cam_crop; struct v4l2_rect *cam_rect = &cam_crop.c, cam_subrect, ceu_rect; unsigned int scale_cam_h, scale_cam_v; u16 scale_v, scale_h; int ret; bool is_interlaced, image_mode; switch (pix->field) { case V4L2_FIELD_INTERLACED: is_interlaced = true; break; case V4L2_FIELD_ANY: default: pix->field = V4L2_FIELD_NONE; case V4L2_FIELD_NONE: is_interlaced = false; break; } xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { dev_warn(dev, "Format %x not found\n", pixfmt); return -EINVAL; } cam_crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = client_g_rect(sd, cam_rect); if (ret < 0) return ret; ret = get_camera_scales(sd, cam_rect, &scale_cam_h, &scale_cam_v); if (ret < 0) return ret; dev_geo(dev, "1: camera scales %u:%u\n", scale_cam_h, scale_cam_v); ret = get_camera_subwin(icd, &cam_subrect, scale_cam_h, scale_cam_v); if (ret < 0) return ret; dev_geo(dev, "2: subwin %ux%u@%u:%u\n", cam_subrect.width, cam_subrect.height, cam_subrect.left, cam_subrect.top); scale_h = calc_generic_scale(cam_subrect.width, pix->width); scale_v = calc_generic_scale(cam_subrect.height, pix->height); dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v); cam_pix->width = scale_down(cam_rect->width, scale_h); cam_pix->height = scale_down(cam_rect->height, scale_v); cam_pix->pixelformat = xlate->cam_fmt->fourcc; switch (pixfmt) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: image_mode = true; break; default: image_mode = false; } dev_geo(dev, "4: camera output %ux%u\n", cam_pix->width, cam_pix->height); ret = client_scale(icd, cam_rect, &cam_subrect, &ceu_rect, &cam_f, image_mode && !is_interlaced); dev_geo(dev, "5-9: client scale %d\n", ret); dev_dbg(dev, "Camera %d fmt %ux%u, requested %ux%u\n", ret, cam_pix->width, cam_pix->height, pix->width, pix->height); if (ret < 0) return ret; if (pix->width > cam_pix->width) pix->width = cam_pix->width; if (pix->width > ceu_rect.width) pix->width = ceu_rect.width; if (pix->height > cam_pix->height) pix->height = cam_pix->height; if (pix->height > ceu_rect.height) pix->height = ceu_rect.height; scale_h = calc_scale(ceu_rect.width, &pix->width); scale_v = calc_scale(ceu_rect.height, &pix->height); dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n", ceu_rect.width, scale_h, pix->width, ceu_rect.height, scale_v, pix->height); pcdev->cflcr = scale_h | (scale_v << 16); icd->buswidth = xlate->buswidth; icd->current_fmt = xlate->host_fmt; cam->camera_fmt = xlate->cam_fmt; cam->ceu_rect = ceu_rect; pcdev->is_interlaced = is_interlaced; pcdev->image_mode = image_mode; return 0; }
static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd, struct v4l2_crop *a) { struct v4l2_rect *rect = &a->c; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_crop cam_crop; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_rect *cam_rect = &cam_crop.c, *ceu_rect = &cam->ceu_rect; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; struct v4l2_format f; struct v4l2_pix_format *pix = &f.fmt.pix; unsigned int scale_comb_h, scale_comb_v, scale_ceu_h, scale_ceu_v, out_width, out_height; u32 capsr, cflcr; int ret; ret = get_scales(icd, &scale_comb_h, &scale_comb_v); if (ret < 0) return ret; dev_geo(dev, "1: combined scales %u:%u\n", scale_comb_h, scale_comb_v); ret = client_s_crop(sd, a, &cam_crop); if (ret < 0) return ret; dev_geo(dev, "2: camera cropped to %ux%u@%u:%u\n", cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); out_width = scale_down(rect->width, scale_comb_h); out_height = scale_down(rect->height, scale_comb_v); if (out_width > 2560) out_width = 2560; else if (out_width < 2) out_width = 2; if (out_height > 1920) out_height = 1920; else if (out_height < 4) out_height = 4; dev_geo(dev, "3: Adjusted output %ux%u\n", out_width, out_height); pix->width = scale_down(cam_rect->width, scale_comb_h); pix->height = scale_down(cam_rect->height, scale_comb_v); dev_geo(dev, "5: camera target %ux%u\n", pix->width, pix->height); pix->pixelformat = cam->camera_fmt->fourcc; pix->colorspace = cam->camera_fmt->colorspace; capsr = capture_save_reset(pcdev); dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr); rect->left -= cam_rect->left; rect->top -= cam_rect->top; f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = client_scale(icd, cam_rect, rect, ceu_rect, &f, pcdev->image_mode && !pcdev->is_interlaced); dev_geo(dev, "6-9: %d\n", ret); sh_mobile_ceu_set_rect(icd, out_width, out_height); dev_geo(dev, "10: CEU cropped to %ux%u@%u:%u\n", ceu_rect->width, ceu_rect->height, ceu_rect->left, ceu_rect->top); scale_ceu_h = calc_scale(ceu_rect->width, &out_width); scale_ceu_v = calc_scale(ceu_rect->height, &out_height); dev_geo(dev, "11: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v); cflcr = scale_ceu_h | (scale_ceu_v << 16); if (cflcr != pcdev->cflcr) { pcdev->cflcr = cflcr; ceu_write(pcdev, CFLCR, cflcr); } if (pcdev->active) capsr |= 1; capture_restore(pcdev, capsr); icd->user_width = out_width; icd->user_height = out_height; return ret; }
/* * The common for both scaling and cropping iterative approach is: * 1. try if the client can produce exactly what requested by the user * 2. if (1) failed, try to double the client image until we get one big enough * 3. if (2) failed, try to request the maximum image */ int soc_camera_client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop, struct v4l2_crop *cam_crop, struct v4l2_rect *target_rect, struct v4l2_rect *subrect) { struct v4l2_rect *rect = &crop->c, *cam_rect = &cam_crop->c; struct device *dev = sd->v4l2_dev->dev; struct v4l2_cropcap cap; int ret; unsigned int width, height; v4l2_subdev_call(sd, video, s_crop, crop); ret = soc_camera_client_g_rect(sd, cam_rect); if (ret < 0) return ret; /* * Now cam_crop contains the current camera input rectangle, and it must * be within camera cropcap bounds */ if (!memcmp(rect, cam_rect, sizeof(*rect))) { /* Even if camera S_CROP failed, but camera rectangle matches */ dev_dbg(dev, "Camera S_CROP successful for %dx%d@%d:%d\n", rect->width, rect->height, rect->left, rect->top); *target_rect = *cam_rect; return 0; } /* Try to fix cropping, that camera hasn't managed to set */ dev_geo(dev, "Fix camera S_CROP for %dx%d@%d:%d to %dx%d@%d:%d\n", cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top, rect->width, rect->height, rect->left, rect->top); /* We need sensor maximum rectangle */ ret = v4l2_subdev_call(sd, video, cropcap, &cap); if (ret < 0) return ret; /* Put user requested rectangle within sensor bounds */ soc_camera_limit_side(&rect->left, &rect->width, cap.bounds.left, 2, cap.bounds.width); soc_camera_limit_side(&rect->top, &rect->height, cap.bounds.top, 4, cap.bounds.height); /* * Popular special case - some cameras can only handle fixed sizes like * QVGA, VGA,... Take care to avoid infinite loop. */ width = max_t(unsigned int, cam_rect->width, 2); height = max_t(unsigned int, cam_rect->height, 2); /* * Loop as long as sensor is not covering the requested rectangle and * is still within its bounds */ while (!ret && (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) && (cap.bounds.width > width || cap.bounds.height > height)) { width *= 2; height *= 2; cam_rect->width = width; cam_rect->height = height; /* * We do not know what capabilities the camera has to set up * left and top borders. We could try to be smarter in iterating * them, e.g., if camera current left is to the right of the * target left, set it to the middle point between the current * left and minimum left. But that would add too much * complexity: we would have to iterate each border separately. * Instead we just drop to the left and top bounds. */ if (cam_rect->left > rect->left) cam_rect->left = cap.bounds.left; if (cam_rect->left + cam_rect->width < rect->left + rect->width) cam_rect->width = rect->left + rect->width - cam_rect->left; if (cam_rect->top > rect->top) cam_rect->top = cap.bounds.top; if (cam_rect->top + cam_rect->height < rect->top + rect->height) cam_rect->height = rect->top + rect->height - cam_rect->top; v4l2_subdev_call(sd, video, s_crop, cam_crop); ret = soc_camera_client_g_rect(sd, cam_rect); dev_geo(dev, "Camera S_CROP %d for %dx%d@%d:%d\n", ret, cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); } /* S_CROP must not modify the rectangle */ if (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) { /* * The camera failed to configure a suitable cropping, * we cannot use the current rectangle, set to max */ *cam_rect = cap.bounds; v4l2_subdev_call(sd, video, s_crop, cam_crop); ret = soc_camera_client_g_rect(sd, cam_rect); dev_geo(dev, "Camera S_CROP %d for max %dx%d@%d:%d\n", ret, cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); } if (!ret) { *target_rect = *cam_rect; update_subrect(target_rect, subrect); } return ret; }
/* Iterative s_mbus_fmt, also updates cached client crop on success */ static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_rect *rect, struct v4l2_rect *subrect, unsigned int max_width, unsigned int max_height, struct v4l2_mbus_framefmt *mf, bool host_can_scale) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->parent; unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h; struct v4l2_cropcap cap; bool host_1to1; int ret; ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), video, s_mbus_fmt, mf); if (ret < 0) return ret; dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height); if (width == mf->width && height == mf->height) { /* Perfect! The client has done it all. */ host_1to1 = true; goto update_cache; } host_1to1 = false; if (!host_can_scale) goto update_cache; cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = v4l2_subdev_call(sd, video, cropcap, &cap); if (ret < 0) return ret; if (max_width > cap.bounds.width) max_width = cap.bounds.width; if (max_height > cap.bounds.height) max_height = cap.bounds.height; /* Camera set a format, but geometry is not precise, try to improve */ tmp_w = mf->width; tmp_h = mf->height; /* width <= max_width && height <= max_height - guaranteed by try_fmt */ while ((width > tmp_w || height > tmp_h) && tmp_w < max_width && tmp_h < max_height) { tmp_w = min(2 * tmp_w, max_width); tmp_h = min(2 * tmp_h, max_height); mf->width = tmp_w; mf->height = tmp_h; ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), video, s_mbus_fmt, mf); dev_geo(dev, "Camera scaled to %ux%u\n", mf->width, mf->height); if (ret < 0) { /* This shouldn't happen */ dev_err(dev, "Client failed to set format: %d\n", ret); return ret; } } update_cache: /* Update cache */ ret = soc_camera_client_g_rect(sd, rect); if (ret < 0) return ret; if (host_1to1) *subrect = *rect; else update_subrect(rect, subrect); return 0; }