int enumerate_camera_devices(CameraInfo* cis, int max) { char dev_name[24]; int found = 0; int n; for (n = 0; n < max; n++) { CameraDevice* cd; sprintf(dev_name, "/dev/video%d", n); cd = camera_device_open(dev_name, 0); if (cd != NULL) { LinuxCameraDevice* lcd = (LinuxCameraDevice*)cd->opaque; if (!_camera_device_get_info(lcd, cis + found)) { char user_name[24]; sprintf(user_name, "webcam%d", found); cis[found].display_name = ASTRDUP(user_name); cis[found].in_use = 0; found++; } camera_device_close(cd); } else { break; } } return found; }
/* Client has queried conection to the camera. * Param: * cc - Queried camera client descriptor. * qc - Qemu client for the emulated camera. * param - Query parameters. There are no parameters expected for this query. */ static void _camera_client_query_connect(CameraClient* cc, QemudClient* qc, const char* param) { if (cc->camera != NULL) { /* Already connected. */ W("%s: Camera '%s' is already connected", __FUNCTION__, cc->device_name); _qemu_client_reply_ok(qc, "Camera is already connected"); return; } /* Open camera device. */ cc->camera = camera_device_open(cc->device_name, cc->inp_channel); if (cc->camera == NULL) { E("%s: Unable to open camera device '%s'", __FUNCTION__, cc->device_name); _qemu_client_reply_ko(qc, "Unable to open camera device."); return; } D("%s: Camera device '%s' is now connected", __FUNCTION__, cc->device_name); _qemu_client_reply_ok(qc, NULL); }
int enumerate_camera_devices(CameraInfo* cis, int max) { /* Array containing emulated webcam frame dimensions. * capXxx API provides device independent frame dimensions, by scaling frames * received from the device to whatever dimensions were requested by the user. * So, we can just use a small set of frame dimensions to emulate. */ static const CameraFrameDim _emulate_dims[] = { /* Emulates 640x480 frame. */ {640, 480}, /* Emulates 352x288 frame (required by camera framework). */ {352, 288}, /* Emulates 320x240 frame (required by camera framework). */ {320, 240}, /* Emulates 176x144 frame (required by camera framework). */ {176, 144} }; int inp_channel, found = 0; for (inp_channel = 0; inp_channel < 10 && found < max; inp_channel++) { char name[256]; CameraDevice* cd; snprintf(name, sizeof(name), "%s%d", _default_window_name, found); cd = camera_device_open(name, inp_channel); if (cd != NULL) { WndCameraDevice* wcd = (WndCameraDevice*)cd->opaque; /* Unfortunately, on Windows we have to start capturing in order to get the * actual frame properties. */ if (!camera_device_start_capturing(cd, V4L2_PIX_FMT_RGB32, 640, 480)) { cis[found].frame_sizes = (CameraFrameDim*)malloc(sizeof(_emulate_dims)); if (cis[found].frame_sizes != NULL) { char disp_name[24]; sprintf(disp_name, "webcam%d", found); cis[found].display_name = ASTRDUP(disp_name); cis[found].device_name = ASTRDUP(name); cis[found].direction = ASTRDUP("front"); cis[found].inp_channel = inp_channel; cis[found].frame_sizes_num = sizeof(_emulate_dims) / sizeof(*_emulate_dims); memcpy(cis[found].frame_sizes, _emulate_dims, sizeof(_emulate_dims)); cis[found].pixel_format = wcd->pixel_format; cis[found].in_use = 0; found++; } else { E("%s: Unable to allocate dimensions", __FUNCTION__); } camera_device_stop_capturing(cd); } else { /* No more cameras. */ camera_device_close(cd); break; } camera_device_close(cd); } else { /* No more cameras. */ break; } } return found; }