Esempio n. 1
0
static int plugin_ioctl(void *dev_ops_priv, int fd,
			unsigned long int cmd, void *arg)
{
	struct mplane_plugin *plugin = dev_ops_priv;
	switch (cmd) {
	case VIDIOC_QUERYCAP:
		return querycap_ioctl(fd, cmd, arg);
	case VIDIOC_TRY_FMT:
	case VIDIOC_S_FMT:
		return try_set_fmt_ioctl(fd, cmd, arg);
	case VIDIOC_G_FMT:
		return get_fmt_ioctl(fd, cmd, arg);
	case VIDIOC_ENUM_FMT:
		return SIMPLE_CONVERT_IOCTL(fd, cmd, arg, v4l2_fmtdesc);
	case VIDIOC_S_PARM:
	case VIDIOC_G_PARM:
		return SIMPLE_CONVERT_IOCTL(fd, cmd, arg, v4l2_streamparm);
	case VIDIOC_CROPCAP:
		return SIMPLE_CONVERT_IOCTL(fd, cmd, arg, v4l2_cropcap);
	case VIDIOC_S_CROP:
	case VIDIOC_G_CROP:
		return SIMPLE_CONVERT_IOCTL(fd, cmd, arg, v4l2_crop);
	case VIDIOC_QBUF:
	case VIDIOC_DQBUF:
	case VIDIOC_QUERYBUF:
	case VIDIOC_PREPARE_BUF:
		return buf_ioctl(fd, cmd, arg);
	case VIDIOC_CREATE_BUFS:
		return create_bufs_ioctl(fd, cmd, arg);
	case VIDIOC_REQBUFS:
		return SIMPLE_CONVERT_IOCTL(fd, cmd, arg, v4l2_requestbuffers);
	case VIDIOC_STREAMON:
	case VIDIOC_STREAMOFF:
	{
		int type, ret;

		/*
		 * If the device has both capture and output, weird things
		 * could happen. For now, let's not consider this case. If this
		 * is ever happens in practice, the logic should be changed to
		 * track reqbufs, in order to identify what's required.
		 */
		if (plugin->mplane_capture) {
			type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
			ret = SYS_IOCTL(fd, cmd, &type);
		} else if (plugin->mplane_output) {
			type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
			ret = SYS_IOCTL(fd, cmd, &type);
		} else {
			ret = -1;
			errno = EINVAL;
		}

		return ret;
	}
	/* CASE VIDIOC_EXPBUF: */
	default:
		return SYS_IOCTL(fd, cmd, arg);
	}
}
Esempio n. 2
0
static int buf_ioctl(int fd, unsigned long int cmd, struct v4l2_buffer *arg)
{
	struct v4l2_buffer buf = *arg;
	struct v4l2_plane plane = { 0 };
	int ret;

	buf.type = convert_type(arg->type);

	if (buf.type == arg->type)
		return SYS_IOCTL(fd, cmd, &buf);

	memcpy(&plane.m, &arg->m, sizeof(plane.m));
	plane.length = arg->length;
	plane.bytesused = arg->bytesused;
	
	buf.m.planes = &plane;
	buf.length = 1;

	ret = SYS_IOCTL(fd, cmd, &buf);

	arg->index = buf.index;
	arg->flags = buf.flags;
	arg->field = buf.field;
	arg->timestamp = buf.timestamp;
	arg->timecode = buf.timecode;
	arg->sequence = buf.sequence;

	arg->length = plane.length;
	arg->bytesused = plane.bytesused;

	return ret;
}
Esempio n. 3
0
int v4lconvert_enum_framesizes(struct v4lconvert_data *data,
		struct v4l2_frmsizeenum *frmsize)
{
	if (!v4lconvert_supported_dst_format(frmsize->pixel_format)) {
		if (v4lconvert_supported_dst_fmt_only(data)) {
			errno = EINVAL;
			return -1;
		}
		return SYS_IOCTL(data->fd, VIDIOC_ENUM_FRAMESIZES, frmsize);
	}

	if (frmsize->index >= data->no_framesizes) {
		errno = EINVAL;
		return -1;
	}

	frmsize->type = data->framesizes[frmsize->index].type;
	switch (frmsize->type) {
	case V4L2_FRMSIZE_TYPE_DISCRETE:
		frmsize->discrete = data->framesizes[frmsize->index].discrete;
		/* Apply the same rounding algorithm as v4lconvert_try_format */
		frmsize->discrete.width &= ~7;
		frmsize->discrete.height &= ~1;
		break;
	case V4L2_FRMSIZE_TYPE_CONTINUOUS:
	case V4L2_FRMSIZE_TYPE_STEPWISE:
		frmsize->stepwise = data->framesizes[frmsize->index].stepwise;
		break;
	}

	return 0;
}
Esempio n. 4
0
static int querycap_ioctl(int fd, unsigned long int cmd,
			  struct v4l2_capability *arg)
{
	struct v4l2_capability *cap = arg;
	int ret;
	
	ret = SYS_IOCTL(fd, cmd, cap);
	if (ret)
		return ret;

	/* Report mplane as normal caps */
	if (cap->capabilities & V4L2_CAP_VIDEO_CAPTURE_MPLANE)
		cap->capabilities |= V4L2_CAP_VIDEO_CAPTURE;

	if (cap->capabilities & V4L2_CAP_VIDEO_OUTPUT_MPLANE)
		cap->capabilities |= V4L2_CAP_VIDEO_OUTPUT;

	/*
	 * Don't report mplane caps, as this will be handled via
	 * this plugin
	 */
	cap->capabilities &= ~(V4L2_CAP_VIDEO_OUTPUT_MPLANE |
			       V4L2_CAP_VIDEO_CAPTURE_MPLANE);

	return 0;
}
Esempio n. 5
0
static int v4l2_request_read_buffers(int index)
{
	int result;
	struct v4l2_requestbuffers req;

	/* Note we re-request the buffers if they are already requested as the format
	   and thus the needed buffersize may have changed. */
	req.count = (devices[index].no_frames) ? devices[index].no_frames :
		devices[index].nreadbuffers;
	req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	req.memory = V4L2_MEMORY_MMAP;
	result = SYS_IOCTL(devices[index].fd, VIDIOC_REQBUFS, &req);
	if (result < 0) {
		int saved_err = errno;

		V4L2_LOG("warning reqbuf (%u) failed: %s\n", req.count, strerror(errno));
		errno = saved_err;
		return result;
	}

	if (!devices[index].no_frames && req.count)
		devices[index].flags |= V4L2_BUFFERS_REQUESTED_BY_READ;

	devices[index].no_frames = MIN(req.count, V4L2_MAX_NO_FRAMES);
	return 0;
}
Esempio n. 6
0
static int v4l2_buffers_mapped(int index)
{
	unsigned int i;

	if (!v4l2_needs_conversion(index)) {
		/* Normal (no conversion) mode */
		struct v4l2_buffer buf;

		for (i = 0; i < devices[index].no_frames; i++) {
			buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
			buf.memory = V4L2_MEMORY_MMAP;
			buf.index = i;
			if (SYS_IOCTL(devices[index].fd, VIDIOC_QUERYBUF, &buf)) {
				int saved_err = errno;

				V4L2_LOG_ERR("querying buffer %u: %s\n", i, strerror(errno));
				errno = saved_err;
				break;
			}
			if (buf.flags & V4L2_BUF_FLAG_MAPPED)
				break;
		}
	} else {
		/* Conversion mode */
		for (i = 0; i < devices[index].no_frames; i++)
			if (devices[index].frame_map_count[i])
				break;
	}

	if (i != devices[index].no_frames)
		V4L2_LOG("v4l2_buffers_mapped(): buffers still mapped\n");

	return i != devices[index].no_frames;
}
Esempio n. 7
0
static void v4lconvert_get_framesizes(struct v4lconvert_data *data,
		unsigned int pixelformat, int index)
{
	int i, j, match;
	struct v4l2_frmsizeenum frmsize = { .pixel_format = pixelformat };

	for (i = 0; ; i++) {
		frmsize.index = i;
		if (SYS_IOCTL(data->fd, VIDIOC_ENUM_FRAMESIZES, &frmsize))
			break;

		/* We got a framesize, check we don't have the same one already */
		match = 0;
		for (j = 0; j < data->no_framesizes; j++) {
			if (frmsize.type != data->framesizes[j].type)
				continue;

			switch (frmsize.type) {
			case V4L2_FRMSIZE_TYPE_DISCRETE:
				if (!memcmp(&frmsize.discrete, &data->framesizes[j].discrete,
							sizeof(frmsize.discrete)))
					match = 1;
				break;
			case V4L2_FRMSIZE_TYPE_CONTINUOUS:
			case V4L2_FRMSIZE_TYPE_STEPWISE:
				if (!memcmp(&frmsize.stepwise, &data->framesizes[j].stepwise,
							sizeof(frmsize.stepwise)))
					match = 1;
				break;
			}
			if (match)
				break;
		}
		/* Add this framesize if it is not already in our list */
		if (!match) {
			if (data->no_framesizes == V4LCONVERT_MAX_FRAMESIZES) {
				fprintf(stderr, "libv4lconvert: warning more framesizes then I can handle!\n");
				return;
			}
			data->framesizes[data->no_framesizes].type = frmsize.type;
			/* We use the pixel_format member to store a bitmask of all
			   supported src_formats which can do this size */
			data->framesizes[data->no_framesizes].pixel_format = 1 << index;

			switch (frmsize.type) {
			case V4L2_FRMSIZE_TYPE_DISCRETE:
				data->framesizes[data->no_framesizes].discrete = frmsize.discrete;
				break;
			case V4L2_FRMSIZE_TYPE_CONTINUOUS:
			case V4L2_FRMSIZE_TYPE_STEPWISE:
				data->framesizes[data->no_framesizes].stepwise = frmsize.stepwise;
				break;
			}
			data->no_framesizes++;
		} else {
			data->framesizes[j].pixel_format |= 1 << index;
		}
	}
}
Esempio n. 8
0
static int create_bufs_ioctl(int fd, unsigned long int cmd,
			     struct v4l2_create_buffers *arg)
{
	struct v4l2_format fmt = { 0 };
	struct v4l2_format *org = &arg->format;
	int ret;

	switch (arg->format.type) {
	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
		fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
		break;
	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
		fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
		break;
	default:
		return SYS_IOCTL(fd, cmd, &fmt);
	}

	fmt.fmt.pix_mp.width = org->fmt.pix.width;
	fmt.fmt.pix_mp.height = org->fmt.pix.height;
	fmt.fmt.pix_mp.pixelformat = org->fmt.pix.pixelformat;
	fmt.fmt.pix_mp.field = org->fmt.pix.field;
	fmt.fmt.pix_mp.colorspace = org->fmt.pix.colorspace;
	fmt.fmt.pix_mp.num_planes = 1;
	fmt.fmt.pix_mp.plane_fmt[0].bytesperline = org->fmt.pix.bytesperline;
	fmt.fmt.pix_mp.plane_fmt[0].sizeimage = org->fmt.pix.sizeimage;

	ret = SYS_IOCTL(fd, cmd, &arg);
	if (ret)
		return ret;

	org->fmt.pix.width = fmt.fmt.pix_mp.width;
	org->fmt.pix.height = fmt.fmt.pix_mp.height;
	org->fmt.pix.pixelformat = fmt.fmt.pix_mp.pixelformat;
	org->fmt.pix.field = fmt.fmt.pix_mp.field;
	org->fmt.pix.colorspace = fmt.fmt.pix_mp.colorspace;
	org->fmt.pix.bytesperline = fmt.fmt.pix_mp.plane_fmt[0].bytesperline;
	org->fmt.pix.sizeimage = fmt.fmt.pix_mp.plane_fmt[0].sizeimage;

	return 0;
}
Esempio n. 9
0
static int get_fmt_ioctl(int fd, unsigned long int cmd, struct v4l2_format *arg)
{
	struct v4l2_format fmt = { 0 };
	struct v4l2_format *org = arg;
	int ret;

	switch (arg->type) {
	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
		fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
		break;
	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
		fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
		break;
	default:
		return SYS_IOCTL(fd, cmd, &fmt);
	}

	ret = SYS_IOCTL(fd, cmd, &fmt);
	if (ret)
		return ret;

	org->fmt.pix.width = fmt.fmt.pix_mp.width;
	org->fmt.pix.height = fmt.fmt.pix_mp.height;
	org->fmt.pix.pixelformat = fmt.fmt.pix_mp.pixelformat;
	org->fmt.pix.field = fmt.fmt.pix_mp.field;
	org->fmt.pix.colorspace = fmt.fmt.pix_mp.colorspace;
	org->fmt.pix.bytesperline = fmt.fmt.pix_mp.plane_fmt[0].bytesperline;
	org->fmt.pix.sizeimage = fmt.fmt.pix_mp.plane_fmt[0].sizeimage;

	/*
	 * If the device doesn't support just one plane, there's
	 * nothing we can do, except return an error condition.
	 */
	if (fmt.fmt.pix_mp.num_planes > 1) {
		errno = -EINVAL;
		return -1;
	}

	return ret;
}
Esempio n. 10
0
int v4lconvert_enum_frameintervals(struct v4lconvert_data *data,
		struct v4l2_frmivalenum *frmival)
{
	int res;
	struct v4l2_format src_fmt, dest_fmt;

	if (!v4lconvert_supported_dst_format(frmival->pixel_format)) {
		if (v4lconvert_supported_dst_fmt_only(data)) {
			errno = EINVAL;
			return -1;
		}
		res = SYS_IOCTL(data->fd, VIDIOC_ENUM_FRAMEINTERVALS, frmival);
		if (res)
			V4LCONVERT_ERR("%s\n", strerror(errno));
		return res;
	}

	/* Check which format we will be using to convert to frmival->pixel_format */
	memset(&dest_fmt, 0, sizeof(dest_fmt));
	dest_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	dest_fmt.fmt.pix.pixelformat = frmival->pixel_format;
	dest_fmt.fmt.pix.width = frmival->width;
	dest_fmt.fmt.pix.height = frmival->height;
	res = v4lconvert_try_format(data, &dest_fmt, &src_fmt);
	if (res) {
		V4LCONVERT_ERR("trying format: %s\n", strerror(errno));
		return res;
	}

	/* Check the requested format is supported exactly as requested */
	if (dest_fmt.fmt.pix.pixelformat != frmival->pixel_format ||
			dest_fmt.fmt.pix.width  != frmival->width ||
			dest_fmt.fmt.pix.height != frmival->height) {
		int frmival_pixformat = frmival->pixel_format;
		int dest_pixformat = dest_fmt.fmt.pix.pixelformat;

		V4LCONVERT_ERR("Could not find matching framesize for: %c%c%c%c %dx%d "
				"closest match: %c%c%c%c %dx%d\n",
				frmival_pixformat & 0xff,
				(frmival_pixformat >> 8) & 0xff,
				(frmival_pixformat >> 16) & 0xff,
				frmival_pixformat >> 24,
				frmival->width, frmival->height,
				dest_pixformat & 0xff,
				(dest_pixformat >> 8) & 0xff,
				(dest_pixformat >> 16) & 0xff,
				dest_pixformat >> 24,
				dest_fmt.fmt.pix.width , dest_fmt.fmt.pix.height);
		errno = EINVAL;
		return -1;
	}
Esempio n. 11
0
int v4lcontrol_vidioc_g_ctrl(struct v4lcontrol_data *data, void *arg)
{
	int i;
	struct v4l2_control *ctrl = arg;

	for (i = 0; i < V4LCONTROL_COUNT; i++)
		if ((data->controls & (1 << i)) &&
				ctrl->id == fake_controls[i].id) {
			ctrl->value = data->shm_values[i];
			return 0;
		}

	return SYS_IOCTL(data->fd, VIDIOC_G_CTRL, arg);
}
Esempio n. 12
0
LIBV4L_PUBLIC int open(const char *file, int oflag, ...)
{
	int fd;
	struct v4l2_capability cap;
	int v4l_device = 0;

	/* check if we're opening a video4linux2 device */
	if (!strncmp(file, "/dev/video", 10) || !strncmp(file, "/dev/v4l/", 9)) {
		/* Some apps open the device read only, but we need rw rights as the
		   buffers *MUST* be mapped rw */
		oflag = (oflag & ~O_ACCMODE) | O_RDWR;
		v4l_device = 1;
	}

	/* original open code */
	if (oflag & O_CREAT) {
		va_list ap;
		mode_t mode;

		va_start(ap, oflag);
		mode = va_arg(ap, mode_t);

		fd = SYS_OPEN(file, oflag, mode);

		va_end(ap);
	} else {
		fd = SYS_OPEN(file, oflag, 0);
	}
	/* end of original open code */

	if (fd == -1 || !v4l_device)
		return fd;

	/* check that this is an v4l2 device, libv4l2 only supports v4l2 devices */
	if (SYS_IOCTL(fd, VIDIOC_QUERYCAP, &cap))
		return fd;

	/* libv4l2 only adds functionality to capture capable devices */
	if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE))
		return fd;

	/* Try to Register with libv4l2 (in case of failure pass the fd to the
	   application as is) */
	v4l2_fd_open(fd, 0);

	return fd;
}
Esempio n. 13
0
static void *plugin_init(int fd)
{
	struct v4l2_capability cap;
	int ret;
	struct mplane_plugin plugin, *ret_plugin;

	memset(&plugin, 0, sizeof(plugin));

	/* Check if device needs mplane plugin */
	memset(&cap, 0, sizeof(cap));
	ret = SYS_IOCTL(fd, VIDIOC_QUERYCAP, &cap);
	if (ret) {
		perror("Failed to query video capabilities");
		return NULL;
	}

	if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) &&
	    (cap.capabilities & V4L2_CAP_VIDEO_CAPTURE_MPLANE))
		plugin.mplane_capture = 1;

	if (!(cap.capabilities & V4L2_CAP_VIDEO_OUTPUT) &&
	    (cap.capabilities & V4L2_CAP_VIDEO_OUTPUT_MPLANE))
		plugin.mplane_output = 1;

	/* Device doesn't need it. return NULL to disable the plugin */
	if (!plugin.mplane)
		return NULL;

	/* Allocate and initialize private data */
	ret_plugin = calloc(1, sizeof(*ret_plugin));
	if (!ret_plugin) {
		perror("Couldn't allocate memory for plugin");
		return NULL;
	}
	*ret_plugin = plugin;

	printf("Using mplane plugin for %s%s\n",
	       plugin.mplane_capture ? "capture " : "",
	       plugin.mplane_output ? "output " : "");

	return ret_plugin;
}
Esempio n. 14
0
int v4lcontrol_vidioc_s_ctrl(struct v4lcontrol_data *data, void *arg)
{
	int i;
	struct v4l2_control *ctrl = arg;

	for (i = 0; i < V4LCONTROL_COUNT; i++)
		if ((data->controls & (1 << i)) &&
				ctrl->id == fake_controls[i].id) {
			if (ctrl->value > fake_controls[i].maximum ||
					ctrl->value < fake_controls[i].minimum) {
				errno = EINVAL;
				return -1;
			}

			data->shm_values[i] = ctrl->value;
			return 0;
		}

	return SYS_IOCTL(data->fd, VIDIOC_S_CTRL, arg);
}
Esempio n. 15
0
static void v4l2_unrequest_read_buffers(int index)
{
	struct v4l2_requestbuffers req;

	if (!(devices[index].flags & V4L2_BUFFERS_REQUESTED_BY_READ) ||
			devices[index].no_frames == 0)
		return;

	/* (Un)Request buffers, note not all driver support this, and those
	   who do not support it don't need it. */
	req.count = 0;
	req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	req.memory = V4L2_MEMORY_MMAP;
	if (SYS_IOCTL(devices[index].fd, VIDIOC_REQBUFS, &req) < 0)
		return;

	devices[index].no_frames = MIN(req.count, V4L2_MAX_NO_FRAMES);
	if (devices[index].no_frames == 0)
		devices[index].flags &= ~V4L2_BUFFERS_REQUESTED_BY_READ;
}
Esempio n. 16
0
static int v4l2_streamon(int index)
{
	int result;
	enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

	if (!(devices[index].flags & V4L2_STREAMON)) {
		result = SYS_IOCTL(devices[index].fd, VIDIOC_STREAMON, &type);
		if (result) {
			int saved_err = errno;

			V4L2_LOG_ERR("turning on stream: %s\n", strerror(errno));
			errno = saved_err;
			return result;
		}
		devices[index].flags |= V4L2_STREAMON;
		devices[index].first_frame = V4L2_IGNORE_FIRST_FRAME_ERRORS;
	}

	return 0;
}
Esempio n. 17
0
static int v4l2_map_buffers(int index)
{
	int result = 0;
	unsigned int i;
	struct v4l2_buffer buf;

	for (i = 0; i < devices[index].no_frames; i++) {
		if (devices[index].frame_pointers[i] != MAP_FAILED)
			continue;

		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		buf.memory = V4L2_MEMORY_MMAP;
		buf.index = i;
		result = SYS_IOCTL(devices[index].fd, VIDIOC_QUERYBUF, &buf);
		if (result) {
			int saved_err = errno;

			V4L2_LOG_ERR("querying buffer %u: %s\n", i, strerror(errno));
			errno = saved_err;
			break;
		}

		devices[index].frame_pointers[i] = (void *)SYS_MMAP(NULL,
				(size_t)buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, devices[index].fd,
				buf.m.offset);
		if (devices[index].frame_pointers[i] == MAP_FAILED) {
			int saved_err = errno;

			V4L2_LOG_ERR("mmapping buffer %u: %s\n", i, strerror(errno));
			errno = saved_err;
			result = -1;
			break;
		}
		V4L2_LOG("mapped buffer %u at %p\n", i,
				devices[index].frame_pointers[i]);

		devices[index].frame_sizes[i] = buf.length;
	}

	return result;
}
Esempio n. 18
0
int v4lcontrol_vidioc_queryctrl(struct v4lcontrol_data *data, void *arg)
{
	int i;
	struct v4l2_queryctrl *ctrl = arg;
	int retval;
	uint32_t orig_id = ctrl->id;

	/* if we have an exact match return it */
	for (i = 0; i < V4LCONTROL_COUNT; i++)
		if ((data->controls & (1 << i)) &&
				ctrl->id == fake_controls[i].id) {
			v4lcontrol_copy_queryctrl(data, ctrl, i);
			return 0;
		}

	/* find out what the kernel driver would respond. */
	retval = SYS_IOCTL(data->fd, VIDIOC_QUERYCTRL, arg);

	if ((data->priv_flags & V4LCONTROL_SUPPORTS_NEXT_CTRL) &&
			(orig_id & V4L2_CTRL_FLAG_NEXT_CTRL)) {
		/* If the hardware has no more controls check if we still have any
		   fake controls with a higher id then the hardware's highest */
		if (retval)
			ctrl->id = V4L2_CTRL_ID_MASK;

		/* If any of our controls have an id > orig_id but less than
		   ctrl->id then return that control instead. Note we do not
		   break when we have a match, but keep iterating, so that
		   we end up with the fake ctrl with the lowest CID > orig_id. */
		for (i = 0; i < V4LCONTROL_COUNT; i++)
			if ((data->controls & (1 << i)) &&
					(fake_controls[i].id > (orig_id & ~V4L2_CTRL_FLAG_NEXT_CTRL)) &&
					(fake_controls[i].id <= ctrl->id)) {
				v4lcontrol_copy_queryctrl(data, ctrl, i);
				retval = 0;
			}
	}

	return retval;
}
Esempio n. 19
0
static int v4l2_streamoff(int index)
{
	int result;
	enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

	if (devices[index].flags & V4L2_STREAMON) {
		result = SYS_IOCTL(devices[index].fd, VIDIOC_STREAMOFF, &type);
		if (result) {
			int saved_err = errno;

			V4L2_LOG_ERR("turning off stream: %s\n", strerror(errno));
			errno = saved_err;
			return result;
		}
		devices[index].flags &= ~V4L2_STREAMON;

		/* Stream off also unqueues all our buffers! */
		devices[index].frame_queued = 0;
	}

	return 0;
}
Esempio n. 20
0
/* See libv4lconvert.h for description of in / out parameters */
int v4lconvert_enum_fmt(struct v4lconvert_data *data, struct v4l2_fmtdesc *fmt)
{
	int i, no_faked_fmts = 0;
	unsigned int faked_fmts[ARRAY_SIZE(supported_dst_pixfmts)];

	if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
			(!v4lconvert_supported_dst_fmt_only(data) &&
			 fmt->index < data->no_formats))
		return SYS_IOCTL(data->fd, VIDIOC_ENUM_FMT, fmt);

	for (i = 0; i < ARRAY_SIZE(supported_dst_pixfmts); i++)
		if (v4lconvert_supported_dst_fmt_only(data) ||
				!(data->supported_src_formats & (1 << i))) {
			faked_fmts[no_faked_fmts] = supported_dst_pixfmts[i].fmt;
			no_faked_fmts++;
		}

	if (!v4lconvert_supported_dst_fmt_only(data))
		i = fmt->index - data->no_formats;
	else
		i = fmt->index;

	if (i >= no_faked_fmts) {
		errno = EINVAL;
		return -1;
	}

	fmt->flags = V4L2_FMT_FLAG_EMULATED;
	fmt->pixelformat = faked_fmts[i];
	fmt->description[0] = faked_fmts[i] & 0xff;
	fmt->description[1] = (faked_fmts[i] >> 8) & 0xff;
	fmt->description[2] = (faked_fmts[i] >> 16) & 0xff;
	fmt->description[3] = faked_fmts[i] >> 24;
	fmt->description[4] = '\0';
	memset(fmt->reserved, 0, sizeof(fmt->reserved));

	return 0;
}
Esempio n. 21
0
static int v4l2_queue_read_buffer(int index, int buffer_index)
{
	int result;
	struct v4l2_buffer buf;

	if (devices[index].frame_queued & (1 << buffer_index))
		return 0;

	memset(&buf, 0, sizeof(buf));
	buf.type   = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	buf.memory = V4L2_MEMORY_MMAP;
	buf.index  = buffer_index;
	result = SYS_IOCTL(devices[index].fd, VIDIOC_QBUF, &buf);
	if (result) {
		int saved_err = errno;

		V4L2_LOG_ERR("queuing buf %d: %s\n", buffer_index, strerror(errno));
		errno = saved_err;
		return result;
	}

	devices[index].frame_queued |= 1 << buffer_index;
	return 0;
}
Esempio n. 22
0
static int v4lconvert_do_try_format(struct v4lconvert_data *data,
		struct v4l2_format *dest_fmt, struct v4l2_format *src_fmt)
{
	int i, size_x_diff, size_y_diff, rank, best_rank = 0;
	unsigned int size_diff, closest_fmt_size_diff = -1;
	unsigned int desired_pixfmt = dest_fmt->fmt.pix.pixelformat;
	struct v4l2_format try_fmt, closest_fmt = { .type = 0 };

	if (data->flags & V4LCONVERT_IS_UVC)
		return v4lconvert_do_try_format_uvc(data, dest_fmt, src_fmt);

	for (i = 0; i < ARRAY_SIZE(supported_src_pixfmts); i++) {
		/* is this format supported? */
		if (!(data->supported_src_formats & (1 << i)))
			continue;

		try_fmt = *dest_fmt;
		try_fmt.fmt.pix.pixelformat = supported_src_pixfmts[i].fmt;
		if (SYS_IOCTL(data->fd, VIDIOC_TRY_FMT, &try_fmt))
			continue;

		if (try_fmt.fmt.pix.pixelformat !=
		    supported_src_pixfmts[i].fmt)
			continue;

		/* Did we get a better match then before? */
		size_x_diff = (int)try_fmt.fmt.pix.width -
			      (int)dest_fmt->fmt.pix.width;
		size_y_diff = (int)try_fmt.fmt.pix.height -
			      (int)dest_fmt->fmt.pix.height;
		size_diff = size_x_diff * size_x_diff +
			    size_y_diff * size_y_diff;

		rank = v4lconvert_get_rank(data, i,
					   try_fmt.fmt.pix.width,
					   try_fmt.fmt.pix.height,
					   desired_pixfmt);
		if (size_diff < closest_fmt_size_diff ||
		    (size_diff == closest_fmt_size_diff && rank < best_rank)) {
			closest_fmt = try_fmt;
			closest_fmt_size_diff = size_diff;
			best_rank = rank;
		}
	}

	if (closest_fmt.type == 0)
		return -1;

	*dest_fmt = closest_fmt;
	if (closest_fmt.fmt.pix.pixelformat != desired_pixfmt)
		dest_fmt->fmt.pix.pixelformat = desired_pixfmt;
	*src_fmt = closest_fmt;

	return 0;
}

void v4lconvert_fixup_fmt(struct v4l2_format *fmt)
{
	switch (fmt->fmt.pix.pixelformat) {
	case V4L2_PIX_FMT_RGB24:
	case V4L2_PIX_FMT_BGR24:
		fmt->fmt.pix.bytesperline = fmt->fmt.pix.width * 3;
		fmt->fmt.pix.sizeimage = fmt->fmt.pix.width * fmt->fmt.pix.height * 3;
		break;
	case V4L2_PIX_FMT_YUV420:
	case V4L2_PIX_FMT_YVU420:
		fmt->fmt.pix.bytesperline = fmt->fmt.pix.width;
		fmt->fmt.pix.sizeimage = fmt->fmt.pix.width * fmt->fmt.pix.height * 3 / 2;
		break;
	}
}

/* See libv4lconvert.h for description of in / out parameters */
int v4lconvert_try_format(struct v4lconvert_data *data,
		struct v4l2_format *dest_fmt, struct v4l2_format *src_fmt)
{
	int i, result;
	unsigned int desired_width = dest_fmt->fmt.pix.width;
	unsigned int desired_height = dest_fmt->fmt.pix.height;
	struct v4l2_format try_src, try_dest, try2_src, try2_dest;

	if (dest_fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
			v4lconvert_supported_dst_fmt_only(data) &&
			!v4lconvert_supported_dst_format(dest_fmt->fmt.pix.pixelformat))
		dest_fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_RGB24;

	try_dest = *dest_fmt;

	/* Can we do conversion to the requested format & type? */
	if (!v4lconvert_supported_dst_format(dest_fmt->fmt.pix.pixelformat) ||
			dest_fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
			v4lconvert_do_try_format(data, &try_dest, &try_src)) {
		result = SYS_IOCTL(data->fd, VIDIOC_TRY_FMT, dest_fmt);
		if (src_fmt)
			*src_fmt = *dest_fmt;
		return result;
	}

	/* In case of a non exact resolution match, try again with a slightly larger
	   resolution as some weird devices are not able to crop of the number of
	   extra (border) pixels most sensors have compared to standard resolutions,
	   which we will then just crop of in software */
	if (try_dest.fmt.pix.width != desired_width ||
			try_dest.fmt.pix.height != desired_height) {
		try2_dest = *dest_fmt;
		try2_dest.fmt.pix.width  = desired_width + 7;
		try2_dest.fmt.pix.height = desired_height + 1;
		result = v4lconvert_do_try_format(data, &try2_dest, &try2_src);
		if (result == 0 &&
				try2_dest.fmt.pix.width >= desired_width &&
				try2_dest.fmt.pix.width <= desired_width + 7 &&
				try2_dest.fmt.pix.height >= desired_height &&
				try2_dest.fmt.pix.height <= desired_height + 1) {
			/* Success! */
			try2_dest.fmt.pix.width = desired_width;
			try2_dest.fmt.pix.height = desired_height;
			try_dest = try2_dest;
			try_src = try2_src;
		}
	}

	/* In case of a non exact resolution match, see if this is a well known
	   resolution some apps are hardcoded too and try to give the app what it
	   asked for by cropping a slightly larger resolution or adding a small
	   black border to a slightly smaller resolution */
	if (try_dest.fmt.pix.width != desired_width ||
			try_dest.fmt.pix.height != desired_height) {
		for (i = 0; i < ARRAY_SIZE(v4lconvert_crop_res); i++) {
			if (v4lconvert_crop_res[i][0] == desired_width &&
					v4lconvert_crop_res[i][1] == desired_height) {
				try2_dest = *dest_fmt;

				/* Note these are chosen so that cropping to vga res just works for
				   vv6410 sensor cams, which have 356x292 and 180x148 */
				try2_dest.fmt.pix.width = desired_width * 113 / 100;
				try2_dest.fmt.pix.height = desired_height * 124 / 100;
				result = v4lconvert_do_try_format(data, &try2_dest, &try2_src);
				if (result == 0 &&
						(/* Add a small black border of max 16 pixels */
						 (try2_dest.fmt.pix.width >= desired_width - 16 &&
						  try2_dest.fmt.pix.width <= desired_width &&
						  try2_dest.fmt.pix.height >= desired_height - 16 &&
						  try2_dest.fmt.pix.height <= desired_height) ||
						 /* Standard cropping to max 80% of actual width / height */
						 (try2_dest.fmt.pix.width >= desired_width &&
						  try2_dest.fmt.pix.width <= desired_width * 5 / 4 &&
						  try2_dest.fmt.pix.height >= desired_height &&
						  try2_dest.fmt.pix.height <= desired_height * 5 / 4) ||
						 /* Downscale 2x + cropping to max 80% of actual width / height */
						 (try2_dest.fmt.pix.width >= desired_width * 2 &&
						  try2_dest.fmt.pix.width <= desired_width * 5 / 2 &&
						  try2_dest.fmt.pix.height >= desired_height * 2 &&
						  try2_dest.fmt.pix.height <= desired_height * 5 / 2))) {
					/* Success! */
					try2_dest.fmt.pix.width = desired_width;
					try2_dest.fmt.pix.height = desired_height;
					try_dest = try2_dest;
					try_src = try2_src;
				}
				break;
			}
		}
	}

	/* Some applications / libs (*cough* gstreamer *cough*) will not work
	   correctly with planar YUV formats when the width is not a multiple of 8
	   or the height is not a multiple of 2. With RGB formats these apps require
	   the width to be a multiple of 4. We apply the same rounding to all
	   formats to not end up with 2 close but different resolutions. */
	try_dest.fmt.pix.width &= ~7;
	try_dest.fmt.pix.height &= ~1;

	/* Are we converting / cropping ? */
	if (try_src.fmt.pix.width != try_dest.fmt.pix.width ||
			try_src.fmt.pix.height != try_dest.fmt.pix.height ||
			try_src.fmt.pix.pixelformat != try_dest.fmt.pix.pixelformat)
		v4lconvert_fixup_fmt(&try_dest);

	*dest_fmt = try_dest;
	if (src_fmt)
		*src_fmt = try_src;

	return 0;
}
Esempio n. 23
0
/* auto gain and exposure algorithm based on the knee algorithm described here:
   http://ytse.tricolour.net/docs/LowLightOptimization.html */
static int autogain_calculate_lookup_tables(
  struct v4lprocessing_data *data,
  unsigned char *buf, const struct v4l2_format *fmt)
{
  int x, y, target, steps, avg_lum = 0;
  int gain, exposure, orig_gain, orig_exposure, exposure_low;
  struct v4l2_control ctrl;
  struct v4l2_queryctrl gainctrl, expoctrl;
  const int deadzone = 6;

  ctrl.id = V4L2_CID_EXPOSURE;
  expoctrl.id = V4L2_CID_EXPOSURE;
  if (SYS_IOCTL(data->fd, VIDIOC_QUERYCTRL, &expoctrl) ||
      SYS_IOCTL(data->fd, VIDIOC_G_CTRL, &ctrl))
    return 0;
  exposure = orig_exposure = ctrl.value;
  /* Determine a value below which we try to not lower the exposure,
     as most exposure controls tend to jump with big steps in the low
     range, causing oscilation, so we prefer to use gain when exposure
     has hit this value */
  exposure_low = expoctrl.maximum / 10;
  /* If we have a fine grained exposure control only avoid the last 10 steps */
  if (exposure_low > 10)
    exposure_low = 10;
  exposure_low += expoctrl.minimum;

  ctrl.id = V4L2_CID_GAIN;
  gainctrl.id = V4L2_CID_GAIN;
  if (SYS_IOCTL(data->fd, VIDIOC_QUERYCTRL, &gainctrl) ||
      SYS_IOCTL(data->fd, VIDIOC_G_CTRL, &ctrl))
    return 0;
  gain = orig_gain = ctrl.value;

  switch (fmt->fmt.pix.pixelformat) {
    case V4L2_PIX_FMT_SGBRG8:
    case V4L2_PIX_FMT_SGRBG8:
    case V4L2_PIX_FMT_SBGGR8:
    case V4L2_PIX_FMT_SRGGB8:
      buf += fmt->fmt.pix.height * fmt->fmt.pix.bytesperline / 4 +
	     fmt->fmt.pix.width / 4;

      for (y = 0; y < fmt->fmt.pix.height / 2; y++) {
	for (x = 0; x < fmt->fmt.pix.width / 2; x++) {
	  avg_lum += *buf++;
	}
	buf += fmt->fmt.pix.bytesperline - fmt->fmt.pix.width / 2;
      }
      avg_lum /= fmt->fmt.pix.height * fmt->fmt.pix.width / 4;
      break;

    case V4L2_PIX_FMT_RGB24:
    case V4L2_PIX_FMT_BGR24:
      buf += fmt->fmt.pix.height * fmt->fmt.pix.bytesperline / 4 +
	     fmt->fmt.pix.width * 3 / 4;

      for (y = 0; y < fmt->fmt.pix.height / 2; y++) {
	for (x = 0; x < fmt->fmt.pix.width / 2; x++) {
	  avg_lum += *buf++;
	  avg_lum += *buf++;
	  avg_lum += *buf++;
	}
	buf += fmt->fmt.pix.bytesperline - fmt->fmt.pix.width * 3 / 2;
      }
      avg_lum /= fmt->fmt.pix.height * fmt->fmt.pix.width * 3 / 4;
      break;
  }

  /* If we are off a multiple of deadzone, do multiple steps to reach the
     desired lumination fast (with the risc of a slight overshoot) */
  target = v4lcontrol_get_ctrl(data->control, V4LCONTROL_AUTOGAIN_TARGET);
  steps = (target - avg_lum) / deadzone;

  /* If we were decreasing and are now increasing, or vica versa, half the
     number of steps to avoid overshooting and oscilating */
  if ((steps > 0 && data->last_gain_correction < 0) ||
      (steps < 0 && data->last_gain_correction > 0))
    steps /= 2;

  for (x = 0; x < abs(steps); x++) {
    if (avg_lum > target) {
      if (exposure > expoctrl.default_value)
	exposure--;
      else if (gain > gainctrl.default_value)
	gain--;
      else if (exposure > exposure_low)
	exposure--;
      else if (gain > gainctrl.minimum)
	gain--;
      else if (exposure > expoctrl.minimum)
	exposure--;
      else
	break;
    } else {
      if (exposure < exposure_low)
	exposure++;
      else if (gain < gainctrl.default_value)
	gain++;
      else if (exposure < expoctrl.default_value)
	exposure++;
      else if (gain < gainctrl.maximum)
	gain++;
      else if (exposure < expoctrl.maximum)
	exposure++;
      else
	break;
    }
  }

  if (steps)
    data->last_gain_correction = steps;

  if (gain != orig_gain) {
    ctrl.id = V4L2_CID_GAIN;
    ctrl.value = gain;
    SYS_IOCTL(data->fd, VIDIOC_S_CTRL, &ctrl);
  }
  if (exposure != orig_exposure) {
    ctrl.id = V4L2_CID_EXPOSURE;
    ctrl.value = exposure;
    SYS_IOCTL(data->fd, VIDIOC_S_CTRL, &ctrl);
  }

  return 0;
}
Esempio n. 24
0
static int v4l2_dequeue_and_convert(int index, struct v4l2_buffer *buf,
		unsigned char *dest, int dest_size)
{
	const int max_tries = 10;
	int result, tries = max_tries;

	/* Make sure we have the real v4l2 buffers mapped */
	result = v4l2_map_buffers(index);
	if (result)
		return result;

	do {
		result = SYS_IOCTL(devices[index].fd, VIDIOC_DQBUF, buf);
		if (result) {
			if (errno != EAGAIN) {
				int saved_err = errno;

				V4L2_LOG_ERR("dequeuing buf: %s\n", strerror(errno));
				errno = saved_err;
			}
			return result;
		}

		devices[index].frame_queued &= ~(1 << buf->index);

		result = v4lconvert_convert(devices[index].convert,
				&devices[index].src_fmt, &devices[index].dest_fmt,
				devices[index].frame_pointers[buf->index],
				buf->bytesused, dest ? dest : (devices[index].convert_mmap_buf +
					buf->index * V4L2_FRAME_BUF_SIZE), dest_size);

		if (devices[index].first_frame) {
			/* Always treat convert errors as EAGAIN during the first few frames, as
			   some cams produce bad frames at the start of the stream
			   (hsync and vsync still syncing ??). */
			if (result < 0)
				errno = EAGAIN;
			devices[index].first_frame--;
		}

		if (result < 0) {
			int saved_err = errno;

			if (errno == EAGAIN)
				V4L2_LOG("warning error while converting frame data: %s",
						v4lconvert_get_error_message(devices[index].convert));
			else
				V4L2_LOG_ERR("converting / decoding frame data: %s",
						v4lconvert_get_error_message(devices[index].convert));

			v4l2_queue_read_buffer(index, buf->index);
			errno = saved_err;
		}
		tries--;
	} while (result < 0 && errno == EAGAIN && tries);

	if (result < 0 && errno == EAGAIN) {
		V4L2_LOG_ERR("got %d consecutive frame decode errors, last error: %s",
				max_tries, v4lconvert_get_error_message(devices[index].convert));
		errno = EAGAIN;
	}

	return result;
}
Esempio n. 25
0
/* auto gain and exposure algorithm based on the knee algorithm described here:
   http://ytse.tricolour.net/docs/LowLightOptimization.html */
static int autogain_calculate_lookup_tables(
  struct v4lprocessing_data *data,
  unsigned char *buf, const struct v4l2_format *fmt)
{
  int x, y, target, steps, avg_lum = 0;
  int gain, exposure, orig_gain, orig_exposure, exposure_low;
  struct v4l2_control ctrl;
  struct v4l2_queryctrl gainctrl, expoctrl;
  const int deadzone = 6;

  ctrl.id = V4L2_CID_EXPOSURE;
  expoctrl.id = V4L2_CID_EXPOSURE;
  if (SYS_IOCTL(data->fd, VIDIOC_QUERYCTRL, &expoctrl) ||
      SYS_IOCTL(data->fd, VIDIOC_G_CTRL, &ctrl))
    return 0;
  exposure = orig_exposure = ctrl.value;
  /* Determine a value below which we try to not lower the exposure,
     as most exposure controls tend to jump with big steps in the low
     range, causing oscilation, so we prefer to use gain when exposure
     has hit this value */
  exposure_low = expoctrl.maximum / 10;
  /* If we have a fine grained exposure control only avoid the last 10 steps */
  steps = exposure_low / expoctrl.step;
  if (steps > 10)
    steps = 10;
  exposure_low = steps * expoctrl.step + expoctrl.minimum;

  ctrl.id = V4L2_CID_GAIN;
  gainctrl.id = V4L2_CID_GAIN;
  if (SYS_IOCTL(data->fd, VIDIOC_QUERYCTRL, &gainctrl) ||
      SYS_IOCTL(data->fd, VIDIOC_G_CTRL, &ctrl))
    return 0;
  gain = orig_gain = ctrl.value;

  switch (fmt->fmt.pix.pixelformat) {
    case V4L2_PIX_FMT_SGBRG8:
    case V4L2_PIX_FMT_SGRBG8:
    case V4L2_PIX_FMT_SBGGR8:
    case V4L2_PIX_FMT_SRGGB8:
      buf += fmt->fmt.pix.height * fmt->fmt.pix.bytesperline / 4 +
	     fmt->fmt.pix.width / 4;

      for (y = 0; y < fmt->fmt.pix.height / 2; y++) {
	for (x = 0; x < fmt->fmt.pix.width / 2; x++) {
	  avg_lum += *buf++;
	}
	buf += fmt->fmt.pix.bytesperline - fmt->fmt.pix.width / 2;
      }
      avg_lum /= fmt->fmt.pix.height * fmt->fmt.pix.width / 4;
      break;

    case V4L2_PIX_FMT_RGB24:
    case V4L2_PIX_FMT_BGR24:
      buf += fmt->fmt.pix.height * fmt->fmt.pix.bytesperline / 4 +
	     fmt->fmt.pix.width * 3 / 4;

      for (y = 0; y < fmt->fmt.pix.height / 2; y++) {
	for (x = 0; x < fmt->fmt.pix.width / 2; x++) {
	  avg_lum += *buf++;
	  avg_lum += *buf++;
	  avg_lum += *buf++;
	}
	buf += fmt->fmt.pix.bytesperline - fmt->fmt.pix.width * 3 / 2;
      }
      avg_lum /= fmt->fmt.pix.height * fmt->fmt.pix.width * 3 / 4;
      break;
  }

  /* If we are off a multiple of deadzone, do multiple steps to reach the
     desired lumination fast (with the risc of a slight overshoot) */
  target = v4lcontrol_get_ctrl(data->control, V4LCONTROL_AUTOGAIN_TARGET);
  steps = (target - avg_lum) / deadzone;

  /* If we were decreasing and are now increasing, or vica versa, half the
     number of steps to avoid overshooting and oscilating */
  if ((steps > 0 && data->last_gain_correction < 0) ||
      (steps < 0 && data->last_gain_correction > 0))
    steps /= 2;

  if (steps == 0)
    return 0; /* Nothing to do */

  if (steps < 0) {
    if (exposure > expoctrl.default_value)
      autogain_adjust(&expoctrl, &exposure, steps, expoctrl.default_value);
    else if (gain > gainctrl.default_value)
      autogain_adjust(&gainctrl, &gain, steps, gainctrl.default_value);
    else if (exposure > exposure_low)
      autogain_adjust(&expoctrl, &exposure, steps, exposure_low);
    else if (gain > gainctrl.minimum)
      autogain_adjust(&gainctrl, &gain, steps, gainctrl.minimum);
    else if (exposure > expoctrl.minimum)
      autogain_adjust(&expoctrl, &exposure, steps, expoctrl.minimum);
    else
      steps = 0;
  } else {
    if (exposure < exposure_low)
      autogain_adjust(&expoctrl, &exposure, steps, exposure_low);
    else if (gain < gainctrl.default_value)
      autogain_adjust(&gainctrl, &gain, steps, gainctrl.default_value);
    else if (exposure < expoctrl.default_value)
      autogain_adjust(&expoctrl, &exposure, steps, expoctrl.default_value);
    else if (gain < gainctrl.maximum)
      autogain_adjust(&gainctrl, &gain, steps, gainctrl.maximum);
    else if (exposure < expoctrl.maximum)
      autogain_adjust(&expoctrl, &exposure, steps, expoctrl.maximum);
    else
      steps = 0;
  }

  if (steps) {
    data->last_gain_correction = steps;
    /* We are still settling down, force the next update sooner. Note we
       skip the next frame as that is still captured with the old settings,
       and another one just to be sure (because if we re-adjust based
       on the old settings we might overshoot). */
    data->lookup_table_update_counter = V4L2PROCESSING_UPDATE_RATE - 2;
  }

  if (gain != orig_gain) {
    ctrl.id = V4L2_CID_GAIN;
    ctrl.value = gain;
    SYS_IOCTL(data->fd, VIDIOC_S_CTRL, &ctrl);
  }
  if (exposure != orig_exposure) {
    ctrl.id = V4L2_CID_EXPOSURE;
    ctrl.value = exposure;
    SYS_IOCTL(data->fd, VIDIOC_S_CTRL, &ctrl);
  }

  return 0;
}
Esempio n. 26
0
struct v4lconvert_data *v4lconvert_create(int fd)
{
	int i, j;
	struct v4lconvert_data *data = calloc(1, sizeof(struct v4lconvert_data));
	struct v4l2_capability cap;
	/* This keeps tracks of devices which have only formats for which apps
	   most likely will need conversion and we can thus safely add software
	   processing controls without a performance impact. */
	int always_needs_conversion = 1;

	if (!data) {
		fprintf(stderr, "libv4lconvert: error: out of memory!\n");
		return NULL;
	}

	data->fd = fd;
	data->decompress_pid = -1;
	data->fps = 30;

	/* Check supported formats */
	for (i = 0; ; i++) {
		struct v4l2_fmtdesc fmt = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE };

		fmt.index = i;

		if (SYS_IOCTL(data->fd, VIDIOC_ENUM_FMT, &fmt))
			break;

		for (j = 0; j < ARRAY_SIZE(supported_src_pixfmts); j++)
			if (fmt.pixelformat == supported_src_pixfmts[j].fmt)
				break;

		if (j < ARRAY_SIZE(supported_src_pixfmts)) {
			data->supported_src_formats |= 1 << j;
			v4lconvert_get_framesizes(data, fmt.pixelformat, j);
			if (!supported_src_pixfmts[j].needs_conversion)
				always_needs_conversion = 0;
		} else
			always_needs_conversion = 0;
	}

	data->no_formats = i;

	/* Check if this cam has any special flags */
	if (SYS_IOCTL(data->fd, VIDIOC_QUERYCAP, &cap) == 0) {
		if (!strcmp((char *)cap.driver, "uvcvideo"))
			data->flags |= V4LCONVERT_IS_UVC;

		if ((cap.capabilities & 0xff) & ~V4L2_CAP_VIDEO_CAPTURE)
			always_needs_conversion = 0;
	}

	data->control = v4lcontrol_create(fd, always_needs_conversion);
	if (!data->control) {
		free(data);
		return NULL;
	}
	data->bandwidth = v4lcontrol_get_bandwidth(data->control);
	data->control_flags = v4lcontrol_get_flags(data->control);
	if (data->control_flags & V4LCONTROL_FORCE_TINYJPEG)
		data->flags |= V4LCONVERT_USE_TINYJPEG;

	data->processing = v4lprocessing_create(fd, data->control);
	if (!data->processing) {
		v4lcontrol_destroy(data->control);
		free(data);
		return NULL;
	}

	return data;
}
Esempio n. 27
0
				frmival_pixformat >> 24,
				frmival->width, frmival->height,
				dest_pixformat & 0xff,
				(dest_pixformat >> 8) & 0xff,
				(dest_pixformat >> 16) & 0xff,
				dest_pixformat >> 24,
				dest_fmt.fmt.pix.width , dest_fmt.fmt.pix.height);
		errno = EINVAL;
		return -1;
	}

	/* Enumerate the frameintervals of the source format we will be using */
	frmival->pixel_format = src_fmt.fmt.pix.pixelformat;
	frmival->width = src_fmt.fmt.pix.width;
	frmival->height = src_fmt.fmt.pix.height;
	res = SYS_IOCTL(data->fd, VIDIOC_ENUM_FRAMEINTERVALS, frmival);
	if (res) {
		int dest_pixfmt = dest_fmt.fmt.pix.pixelformat;
		int src_pixfmt  = src_fmt.fmt.pix.pixelformat;

		V4LCONVERT_ERR("Could not enum frameival index: %d for: %c%c%c%c %dx%d "
				"using src: %c%c%c%c %dx%d, error: %s\n",
				frmival->index,
				dest_pixfmt & 0xff,
				(dest_pixfmt >> 8) & 0xff,
				(dest_pixfmt >> 16) & 0xff,
				dest_pixfmt >> 24,
				dest_fmt.fmt.pix.width , dest_fmt.fmt.pix.height,
				src_pixfmt & 0xff,
				(src_pixfmt >> 8) & 0xff,
				(src_pixfmt >> 16) & 0xff,
Esempio n. 28
0
int v4l2_fd_open(int fd, int v4l2_flags)
{
	int i, index;
	char *lfname;
	struct v4l2_capability cap;
	struct v4l2_format fmt;
	struct v4lconvert_data *convert;

	/* If no log file was set by the app, see if one was specified through the
	   environment */
	if (!v4l2_log_file) {
		lfname = getenv("LIBV4L2_LOG_FILENAME");
		if (lfname)
			v4l2_log_file = fopen(lfname, "w");
	}

	/* check that this is an v4l2 device */
	if (SYS_IOCTL(fd, VIDIOC_QUERYCAP, &cap)) {
		int saved_err = errno;

		V4L2_LOG_ERR("getting capabilities: %s\n", strerror(errno));
		errno = saved_err;
		return -1;
	}

	/* we only add functionality for video capture devices */
	if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) ||
			!(cap.capabilities & (V4L2_CAP_STREAMING | V4L2_CAP_READWRITE)))
		return fd;

	/* Get current cam format */
	fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	if (SYS_IOCTL(fd, VIDIOC_G_FMT, &fmt)) {
		int saved_err = errno;

		V4L2_LOG_ERR("getting pixformat: %s\n", strerror(errno));
		errno = saved_err;
		return -1;
	}

	/* init libv4lconvert */
	convert = v4lconvert_create(fd);
	if (!convert)
		return -1;

	/* So we have a v4l2 capture device, register it in our devices array */
	pthread_mutex_lock(&v4l2_open_mutex);
	for (index = 0; index < V4L2_MAX_DEVICES; index++)
		if (devices[index].fd == -1) {
			devices[index].fd = fd;
			break;
		}
	pthread_mutex_unlock(&v4l2_open_mutex);

	if (index == V4L2_MAX_DEVICES) {
		V4L2_LOG_ERR("attempting to open more then %d video devices\n",
				V4L2_MAX_DEVICES);
		errno = EBUSY;
		return -1;
	}

	devices[index].flags = v4l2_flags;
	if (cap.capabilities & V4L2_CAP_READWRITE)
		devices[index].flags |= V4L2_SUPPORTS_READ;
	if (!(cap.capabilities & V4L2_CAP_STREAMING)) {
		devices[index].flags |= V4L2_USE_READ_FOR_READ;
		/* This device only supports read so the stream gets started by the
		   driver on the first read */
		devices[index].first_frame = V4L2_IGNORE_FIRST_FRAME_ERRORS;
	}
	if (!strcmp((char *)cap.driver, "uvcvideo"))
		devices[index].flags |= V4L2_IS_UVC;
	devices[index].open_count = 1;
	devices[index].src_fmt = fmt;
	devices[index].dest_fmt = fmt;

	/* When a user does a try_fmt with the current dest_fmt and the dest_fmt
	   is a supported one we will align the resolution (see try_fmt for why).
	   Do the same here now, so that a try_fmt on the result of a get_fmt done
	   immediately after open leaves the fmt unchanged. */
	if (v4lconvert_supported_dst_format(
				devices[index].dest_fmt.fmt.pix.pixelformat)) {
		devices[index].dest_fmt.fmt.pix.width &= ~7;
		devices[index].dest_fmt.fmt.pix.height &= ~1;
	}

	pthread_mutex_init(&devices[index].stream_lock, NULL);

	devices[index].no_frames = 0;
	devices[index].nreadbuffers = V4L2_DEFAULT_NREADBUFFERS;
	devices[index].convert = convert;
	devices[index].convert_mmap_buf = MAP_FAILED;
	for (i = 0; i < V4L2_MAX_NO_FRAMES; i++) {
		devices[index].frame_pointers[i] = MAP_FAILED;
		devices[index].frame_map_count[i] = 0;
	}
	devices[index].frame_queued = 0;
	devices[index].readbuf = NULL;
	devices[index].readbuf_size = 0;

	if (index >= devices_used)
		devices_used = index + 1;

	V4L2_LOG("open: %d\n", fd);

	return fd;
}
Esempio n. 29
0
int v4l2_ioctl(int fd, unsigned long int request, ...)
{
	void *arg;
	va_list ap;
	int result, index, saved_err;
	int is_capture_request = 0, stream_needs_locking = 0;

	va_start(ap, request);
	arg = va_arg(ap, void *);
	va_end(ap);

	index = v4l2_get_index(fd);
	if (index == -1)
		return SYS_IOCTL(fd, request, arg);

	/* Appearantly the kernel and / or glibc ignore the 32 most significant bits
	   when long = 64 bits, and some applications pass an int holding the req to
	   ioctl, causing it to get sign extended, depending upon this behavior */
	request = (unsigned int)request;

	/* Is this a capture request and do we need to take the stream lock? */
	switch (request) {
	case VIDIOC_QUERYCTRL:
	case VIDIOC_G_CTRL:
	case VIDIOC_S_CTRL:
		if (!(devices[index].flags & V4L2_DISABLE_CONVERSION))
			is_capture_request = 1;
		break;
	case VIDIOC_QUERYCAP:
		is_capture_request = 1;
		break;
	case VIDIOC_ENUM_FMT:
		if (((struct v4l2_fmtdesc *)arg)->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
				!(devices[index].flags & V4L2_DISABLE_CONVERSION))
			is_capture_request = 1;
		break;
	case VIDIOC_ENUM_FRAMESIZES:
	case VIDIOC_ENUM_FRAMEINTERVALS:
		if (!(devices[index].flags & V4L2_DISABLE_CONVERSION))
			is_capture_request = 1;
		break;
	case VIDIOC_TRY_FMT:
		if (((struct v4l2_format *)arg)->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
				!(devices[index].flags & V4L2_DISABLE_CONVERSION))
			is_capture_request = 1;
		break;
	case VIDIOC_S_FMT:
	case VIDIOC_G_FMT:
		if (((struct v4l2_format *)arg)->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
			is_capture_request = 1;
			stream_needs_locking = 1;
		}
		break;
	case VIDIOC_REQBUFS:
		if (((struct v4l2_requestbuffers *)arg)->type ==
				V4L2_BUF_TYPE_VIDEO_CAPTURE) {
			is_capture_request = 1;
			stream_needs_locking = 1;
		}
		break;
	case VIDIOC_QUERYBUF:
	case VIDIOC_QBUF:
	case VIDIOC_DQBUF:
		if (((struct v4l2_buffer *)arg)->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
			is_capture_request = 1;
			stream_needs_locking = 1;
		}
		break;
	case VIDIOC_STREAMON:
	case VIDIOC_STREAMOFF:
		if (*((enum v4l2_buf_type *)arg) == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
			is_capture_request = 1;
			stream_needs_locking = 1;
		}
	}

	if (!is_capture_request) {
		result = SYS_IOCTL(fd, request, arg);
		saved_err = errno;
		v4l2_log_ioctl(request, arg, result);
		errno = saved_err;
		return result;
	}


	if (stream_needs_locking) {
		pthread_mutex_lock(&devices[index].stream_lock);
		/* If this is the first stream related ioctl, and we should only allow
		   libv4lconvert supported destination formats (so that it can do flipping,
		   processing, etc.) and the current destination format is not supported,
		   try setting the format to RGB24 (which is a supported dest. format). */
		if (!(devices[index].flags & V4L2_STREAM_TOUCHED) &&
				!(devices[index].flags & V4L2_DISABLE_CONVERSION) &&
				v4lconvert_supported_dst_fmt_only(devices[index].convert) &&
				!v4lconvert_supported_dst_format(
					devices[index].dest_fmt.fmt.pix.pixelformat)) {
			struct v4l2_format fmt = devices[index].dest_fmt;

			V4L2_LOG("Setting pixelformat to RGB24 (supported_dst_fmt_only)");
			devices[index].flags |= V4L2_STREAM_TOUCHED;
			fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB24;
			pthread_mutex_unlock(&devices[index].stream_lock);
			v4l2_ioctl(fd, VIDIOC_S_FMT, &fmt);
			pthread_mutex_lock(&devices[index].stream_lock);
			V4L2_LOG("Done setting pixelformat (supported_dst_fmt_only)");
		}
		devices[index].flags |= V4L2_STREAM_TOUCHED;
	}

	switch (request) {
	case VIDIOC_QUERYCTRL:
		result = v4lconvert_vidioc_queryctrl(devices[index].convert, arg);
		break;

	case VIDIOC_G_CTRL:
		result = v4lconvert_vidioc_g_ctrl(devices[index].convert, arg);
		break;

	case VIDIOC_S_CTRL:
		result = v4lconvert_vidioc_s_ctrl(devices[index].convert, arg);
		break;

	case VIDIOC_QUERYCAP: {
		struct v4l2_capability *cap = arg;

		result = SYS_IOCTL(devices[index].fd, VIDIOC_QUERYCAP, cap);
		if (result == 0)
			/* We always support read() as we fake it using mmap mode */
			cap->capabilities |= V4L2_CAP_READWRITE;
		break;
	}

	case VIDIOC_ENUM_FMT:
		result = v4lconvert_enum_fmt(devices[index].convert, arg);
		break;

	case VIDIOC_ENUM_FRAMESIZES:
		result = v4lconvert_enum_framesizes(devices[index].convert, arg);
		break;

	case VIDIOC_ENUM_FRAMEINTERVALS:
		result = v4lconvert_enum_frameintervals(devices[index].convert, arg);
		if (result)
			V4L2_LOG("ENUM_FRAMEINTERVALS Error: %s",
					v4lconvert_get_error_message(devices[index].convert));
		break;

	case VIDIOC_TRY_FMT:
		result = v4lconvert_try_format(devices[index].convert, arg, NULL);
		break;

	case VIDIOC_S_FMT: {
		struct v4l2_format src_fmt, *dest_fmt = arg;
		struct v4l2_pix_format req_pix_fmt;

		/* Don't be lazy on uvc cams, as this triggers a bug in the uvcvideo
		   driver in kernel <= 2.6.28 (with certain cams) */
		if (!(devices[index].flags & V4L2_IS_UVC) &&
				v4l2_pix_fmt_identical(&devices[index].dest_fmt, dest_fmt)) {
			*dest_fmt = devices[index].dest_fmt;
			result = 0;
			break;
		}

		if (v4l2_log_file) {
			int pixfmt = dest_fmt->fmt.pix.pixelformat;

			fprintf(v4l2_log_file, "VIDIOC_S_FMT app requesting: %c%c%c%c\n",
					pixfmt & 0xff,
					(pixfmt >> 8) & 0xff,
					(pixfmt >> 16) & 0xff,
					pixfmt >> 24);
		}

		if (devices[index].flags & V4L2_DISABLE_CONVERSION) {
			result = SYS_IOCTL(devices[index].fd, VIDIOC_TRY_FMT,
					dest_fmt);
			src_fmt = *dest_fmt;
		} else {
			result = v4lconvert_try_format(devices[index].convert, dest_fmt,
					&src_fmt);
		}

		if (result) {
			saved_err = errno;
			V4L2_LOG("S_FMT error trying format: %s\n", strerror(errno));
			errno = saved_err;
			break;
		}

		if (src_fmt.fmt.pix.pixelformat != dest_fmt->fmt.pix.pixelformat &&
				v4l2_log_file) {
			int pixfmt = src_fmt.fmt.pix.pixelformat;

			fprintf(v4l2_log_file, "VIDIOC_S_FMT converting from: %c%c%c%c\n",
					pixfmt & 0xff,
					(pixfmt >> 8) & 0xff,
					(pixfmt >> 16) & 0xff,
					pixfmt >> 24);
		}

		/* Maybe after try format has adjusted width/height etc, to whats
		   available nothing has changed (on the cam side) ? */
		if (!(devices[index].flags & V4L2_IS_UVC) &&
				v4l2_pix_fmt_identical(&devices[index].src_fmt, &src_fmt)) {
			v4l2_set_src_and_dest_format(index, &devices[index].src_fmt,
					dest_fmt);
			result = 0;
			break;
		}

		result = v4l2_check_buffer_change_ok(index);
		if (result)
			break;

		req_pix_fmt = src_fmt.fmt.pix;
		result = SYS_IOCTL(devices[index].fd, VIDIOC_S_FMT, &src_fmt);
		if (result) {
			saved_err = errno;
			V4L2_LOG_ERR("setting pixformat: %s\n", strerror(errno));
			/* Report to the app dest_fmt has not changed */
			*dest_fmt = devices[index].dest_fmt;
			errno = saved_err;
			break;
		}
		/* See if we've gotten what try_fmt promised us
		   (this check should never fail) */
		if (src_fmt.fmt.pix.width != req_pix_fmt.width ||
				src_fmt.fmt.pix.height != req_pix_fmt.height ||
				src_fmt.fmt.pix.pixelformat != req_pix_fmt.pixelformat) {
			V4L2_LOG_ERR("set_fmt gave us a different result then try_fmt!\n");
			/* Not what we expected / wanted, disable conversion */
			*dest_fmt = src_fmt;
		}

		v4l2_set_src_and_dest_format(index, &src_fmt, dest_fmt);
		break;
	}

	case VIDIOC_G_FMT: {
		struct v4l2_format *fmt = arg;

		*fmt = devices[index].dest_fmt;
		result = 0;
		break;
	}

	case VIDIOC_REQBUFS: {
		struct v4l2_requestbuffers *req = arg;

		/* IMPROVEME (maybe?) add support for userptr's? */
		if (req->memory != V4L2_MEMORY_MMAP) {
			errno = EINVAL;
			result = -1;
			break;
		}

		result = v4l2_check_buffer_change_ok(index);
		if (result)
			break;

		/* No more buffers then we can manage please */
		if (req->count > V4L2_MAX_NO_FRAMES)
			req->count = V4L2_MAX_NO_FRAMES;

		result = SYS_IOCTL(devices[index].fd, VIDIOC_REQBUFS, req);
		if (result < 0)
			break;
		result = 0; /* some drivers return the number of buffers on success */

		devices[index].no_frames = MIN(req->count, V4L2_MAX_NO_FRAMES);
		devices[index].flags &= ~V4L2_BUFFERS_REQUESTED_BY_READ;
		break;
	}

	case VIDIOC_QUERYBUF: {
		struct v4l2_buffer *buf = arg;

		if (devices[index].flags & V4L2_STREAM_CONTROLLED_BY_READ) {
			result = v4l2_deactivate_read_stream(index);
			if (result)
				break;
		}

		/* Do a real query even when converting to let the driver fill in
		   things like buf->field */
		result = SYS_IOCTL(devices[index].fd, VIDIOC_QUERYBUF, buf);
		if (result || !v4l2_needs_conversion(index))
			break;

		buf->m.offset = V4L2_MMAP_OFFSET_MAGIC | buf->index;
		buf->length = V4L2_FRAME_BUF_SIZE;
		if (devices[index].frame_map_count[buf->index])
			buf->flags |= V4L2_BUF_FLAG_MAPPED;
		else
			buf->flags &= ~V4L2_BUF_FLAG_MAPPED;
		break;
	}

	case VIDIOC_QBUF:
		if (devices[index].flags & V4L2_STREAM_CONTROLLED_BY_READ) {
			result = v4l2_deactivate_read_stream(index);
			if (result)
				break;
		}

		/* With some drivers the buffers must be mapped before queuing */
		if (v4l2_needs_conversion(index)) {
			result = v4l2_map_buffers(index);
			if (result)
				break;
		}

		result = SYS_IOCTL(devices[index].fd, VIDIOC_QBUF, arg);
		break;

	case VIDIOC_DQBUF: {
		struct v4l2_buffer *buf = arg;

		if (devices[index].flags & V4L2_STREAM_CONTROLLED_BY_READ) {
			result = v4l2_deactivate_read_stream(index);
			if (result)
				break;
		}

		if (!v4l2_needs_conversion(index)) {
			result = SYS_IOCTL(devices[index].fd, VIDIOC_DQBUF, buf);
			if (result) {
				int saved_err = errno;

				V4L2_LOG_ERR("dequeuing buf: %s\n", strerror(errno));
				errno = saved_err;
			}
			break;
		}

		/* An application can do a DQBUF before mmap-ing in the buffer,
		   but we need the buffer _now_ to write our converted data
		   to it! */
		if (devices[index].convert_mmap_buf == MAP_FAILED) {
			devices[index].convert_mmap_buf = (void *)SYS_MMAP(NULL,
				(size_t)(devices[index].no_frames * V4L2_FRAME_BUF_SIZE),
				PROT_READ | PROT_WRITE,
				MAP_ANONYMOUS | MAP_PRIVATE,
				-1, 0);
			if (devices[index].convert_mmap_buf == MAP_FAILED) {
				saved_err = errno;
				V4L2_LOG_ERR("allocating conversion buffer\n");
				errno = saved_err;
				result = -1;
				break;
			}
		}

		result = v4l2_dequeue_and_convert(index, buf, 0, V4L2_FRAME_BUF_SIZE);
		if (result < 0)
			break;

		buf->bytesused = result;
		buf->m.offset = V4L2_MMAP_OFFSET_MAGIC | buf->index;
		buf->length = V4L2_FRAME_BUF_SIZE;
		if (devices[index].frame_map_count[buf->index])
			buf->flags |= V4L2_BUF_FLAG_MAPPED;
		else
			buf->flags &= ~V4L2_BUF_FLAG_MAPPED;

		result = 0;
		break;
	}

	case VIDIOC_STREAMON:
	case VIDIOC_STREAMOFF:
		if (devices[index].flags & V4L2_STREAM_CONTROLLED_BY_READ) {
			result = v4l2_deactivate_read_stream(index);
			if (result)
				break;
		}

		if (request == VIDIOC_STREAMON)
			result = v4l2_streamon(index);
		else
			result = v4l2_streamoff(index);
		break;

	default:
		result = SYS_IOCTL(fd, request, arg);
		break;
	}
Esempio n. 30
0
struct v4lcontrol_data *v4lcontrol_create(int fd, int always_needs_conversion)
{
	int shm_fd;
	int i, rc, got_usb_ids, init = 0;
	char *s, shm_name[256], pwd_buf[1024];
	struct v4l2_capability cap;
	struct v4l2_queryctrl ctrl;
	struct passwd pwd, *pwd_p;
	unsigned short vendor_id = 0;
	unsigned short product_id = 0;
	struct v4l2_input input;

	struct v4lcontrol_data *data = calloc(1, sizeof(struct v4lcontrol_data));

	if (!data) {
		fprintf(stderr, "libv4lcontrol: error: out of memory!\n");
		return NULL;
	}

	data->fd = fd;

	/* Check if the driver has indicated some form of flipping is needed */
	if ((SYS_IOCTL(data->fd, VIDIOC_G_INPUT, &input.index) == 0) &&
			(SYS_IOCTL(data->fd, VIDIOC_ENUMINPUT, &input) == 0)) {
		if (input.status & V4L2_IN_ST_HFLIP)
			data->flags |= V4LCONTROL_HFLIPPED;
		if (input.status & V4L2_IN_ST_VFLIP)
			data->flags |= V4LCONTROL_VFLIPPED;
	}

	got_usb_ids = v4lcontrol_get_usb_ids(data, &vendor_id, &product_id);
	if (got_usb_ids)
		v4lcontrol_get_flags_from_db(data, vendor_id, product_id);

	/* Allow overriding through environment */
	s = getenv("LIBV4LCONTROL_FLAGS");
	if (s)
		data->flags = strtol(s, NULL, 0);

	ctrl.id = V4L2_CTRL_FLAG_NEXT_CTRL;
	if (SYS_IOCTL(data->fd, VIDIOC_QUERYCTRL, &ctrl) == 0)
		data->priv_flags |= V4LCONTROL_SUPPORTS_NEXT_CTRL;

	/* If the device always needs conversion, we can add fake controls at no cost
	   (no cost when not activated by the user that is) */
	if (always_needs_conversion || v4lcontrol_needs_conversion(data)) {
		for (i = 0; i < V4LCONTROL_AUTO_ENABLE_COUNT; i++) {
			ctrl.id = fake_controls[i].id;
			rc = SYS_IOCTL(data->fd, VIDIOC_QUERYCTRL, &ctrl);
			if (rc == -1 || (rc == 0 && (ctrl.flags & V4L2_CTRL_FLAG_DISABLED)))
				data->controls |= 1 << i;
		}
	}

	/* Check if a camera does not have hardware autogain and has the necessary
	   controls, before enabling sw autogain, even if this is requested by flags.
	   This is necessary because some cameras share a USB-ID, but can have
	   different sensors with / without autogain or the necessary controls. */
	while (data->flags & V4LCONTROL_WANTS_AUTOGAIN) {
		ctrl.id = V4L2_CID_AUTOGAIN;
		rc = SYS_IOCTL(data->fd, VIDIOC_QUERYCTRL, &ctrl);
		if (rc == 0 && !(ctrl.flags & V4L2_CTRL_FLAG_DISABLED))
			break;

		ctrl.id = V4L2_CID_EXPOSURE;
		rc = SYS_IOCTL(data->fd, VIDIOC_QUERYCTRL, &ctrl);
		if (rc != 0 || (ctrl.flags & V4L2_CTRL_FLAG_DISABLED))
			break;

		ctrl.id = V4L2_CID_GAIN;
		rc = SYS_IOCTL(data->fd, VIDIOC_QUERYCTRL, &ctrl);
		if (rc != 0 || (ctrl.flags & V4L2_CTRL_FLAG_DISABLED))
			break;

		data->controls |= 1 << V4LCONTROL_AUTOGAIN |
			1 << V4LCONTROL_AUTOGAIN_TARGET;
		break;
	}

	/* Allow overriding through environment */
	s = getenv("LIBV4LCONTROL_CONTROLS");
	if (s)
		data->controls = strtol(s, NULL, 0);

	if (data->controls == 0)
		return data; /* No need to create a shared memory segment */

	if (SYS_IOCTL(fd, VIDIOC_QUERYCAP, &cap)) {
		perror("libv4lcontrol: error querying device capabilities");
		goto error;
	}

	if (getpwuid_r(geteuid(), &pwd, pwd_buf, sizeof(pwd_buf), &pwd_p) == 0) {
		if (got_usb_ids)
			snprintf(shm_name, 256, "/libv4l-%s:%s:%04x:%04x:%s", pwd.pw_name,
					cap.bus_info, (int)vendor_id, (int)product_id, cap.card);
		else
			snprintf(shm_name, 256, "/libv4l-%s:%s:%s", pwd.pw_name,
					cap.bus_info, cap.card);
	} else {
		perror("libv4lcontrol: error getting username using uid instead");
		if (got_usb_ids)
			snprintf(shm_name, 256, "/libv4l-%lu:%s:%04x:%04x:%s",
					(unsigned long)geteuid(), cap.bus_info,
					(int)vendor_id, (int)product_id, cap.card);
		else
			snprintf(shm_name, 256, "/libv4l-%lu:%s:%s", (unsigned long)geteuid(),
					cap.bus_info, cap.card);
	}

	/* / is not allowed inside shm names */
	for (i = 1; shm_name[i]; i++)
		if (shm_name[i] == '/')
			shm_name[i] = '-';

	/* Open the shared memory object identified by shm_name */
	shm_fd = shm_open(shm_name, (O_CREAT | O_EXCL | O_RDWR), (S_IREAD | S_IWRITE));
	if (shm_fd >= 0)
		init = 1;
	else
		shm_fd = shm_open(shm_name, O_RDWR, (S_IREAD | S_IWRITE));

	if (shm_fd >= 0) {
		/* Set the shared memory size */
		ftruncate(shm_fd, V4LCONTROL_SHM_SIZE);

		/* Retreive a pointer to the shm object */
		data->shm_values = mmap(NULL, V4LCONTROL_SHM_SIZE, (PROT_READ | PROT_WRITE),
				MAP_SHARED, shm_fd, 0);
		close(shm_fd);

		if (data->shm_values == MAP_FAILED) {
			perror("libv4lcontrol: error shm mmap failed");
			data->shm_values = NULL;
		}
	} else
		perror("libv4lcontrol: error creating shm segment failed");

	/* Fall back to malloc */
	if (data->shm_values == NULL) {
		fprintf(stderr,
				"libv4lcontrol: falling back to malloc-ed memory for controls\n");
		data->shm_values = malloc(V4LCONTROL_SHM_SIZE);
		if (!data->shm_values) {
			fprintf(stderr, "libv4lcontrol: error: out of memory!\n");
			goto error;
		}
		init = 1;
		data->priv_flags |= V4LCONTROL_MEMORY_IS_MALLOCED;
	}

	if (init) {
		/* Initialize the new shm object we created */
		memset(data->shm_values, 0, V4LCONTROL_SHM_SIZE);

		for (i = 0; i < V4LCONTROL_COUNT; i++)
			data->shm_values[i] = fake_controls[i].default_value;

		if (data->flags & V4LCONTROL_WANTS_WB)
			data->shm_values[V4LCONTROL_WHITEBALANCE] = 1;

		if (data->flags_info && data->flags_info->default_gamma)
			data->shm_values[V4LCONTROL_GAMMA] = data->flags_info->default_gamma;
	}

	return data;

error:
	free(data);
	return NULL;
}