Beispiel #1
0
/* Client has queried the client to stop capturing video.
 * Param:
 *  cc - Queried camera client descriptor.
 *  qc - Qemu client for the emulated camera.
 *  param - Query parameters. There are no parameters expected for this query.
 */
static void
_camera_client_query_stop(CameraClient* cc, QemudClient* qc, const char* param)
{
    if (cc->video_frame == NULL) {
        /* Not started. */
        W("%s: Camera '%s' is not started", __FUNCTION__, cc->device_name);
        _qemu_client_reply_ok(qc, "Camera is not started");
        return;
    }

    /* Stop the camera. */
    if (camera_device_stop_capturing(cc->camera)) {
        E("%s: Cannot stop camera device '%s': %s",
          __FUNCTION__, cc->device_name, strerror(errno));
        _qemu_client_reply_ko(qc, "Cannot stop camera device");
        return;
    }

    free(cc->video_frame);
    cc->video_frame = NULL;

    D("%s: Camera device '%s' is now stopped.", __FUNCTION__, cc->device_name);
    _qemu_client_reply_ok(qc, NULL);
}
int
enumerate_camera_devices(CameraInfo* cis, int max)
{
/* Array containing emulated webcam frame dimensions.
 * capXxx API provides device independent frame dimensions, by scaling frames
 * received from the device to whatever dimensions were requested by the user.
 * So, we can just use a small set of frame dimensions to emulate.
 */
static const CameraFrameDim _emulate_dims[] =
{
  /* Emulates 640x480 frame. */
  {640, 480},
  /* Emulates 352x288 frame (required by camera framework). */
  {352, 288},
  /* Emulates 320x240 frame (required by camera framework). */
  {320, 240},
  /* Emulates 176x144 frame (required by camera framework). */
  {176, 144}
};
    int inp_channel, found = 0;

    for (inp_channel = 0; inp_channel < 10 && found < max; inp_channel++) {
        char name[256];
        CameraDevice* cd;

        snprintf(name, sizeof(name), "%s%d", _default_window_name, found);
        cd = camera_device_open(name, inp_channel);
        if (cd != NULL) {
            WndCameraDevice* wcd = (WndCameraDevice*)cd->opaque;

            /* Unfortunately, on Windows we have to start capturing in order to get the
             * actual frame properties. */
            if (!camera_device_start_capturing(cd, V4L2_PIX_FMT_RGB32, 640, 480)) {
                cis[found].frame_sizes = (CameraFrameDim*)malloc(sizeof(_emulate_dims));
                if (cis[found].frame_sizes != NULL) {
                    char disp_name[24];
                    sprintf(disp_name, "webcam%d", found);
                    cis[found].display_name = ASTRDUP(disp_name);
                    cis[found].device_name = ASTRDUP(name);
                    cis[found].direction = ASTRDUP("front");
                    cis[found].inp_channel = inp_channel;
                    cis[found].frame_sizes_num = sizeof(_emulate_dims) / sizeof(*_emulate_dims);
                    memcpy(cis[found].frame_sizes, _emulate_dims, sizeof(_emulate_dims));
                    cis[found].pixel_format = wcd->pixel_format;
                    cis[found].in_use = 0;
                    found++;
                } else {
                    E("%s: Unable to allocate dimensions", __FUNCTION__);
                }
                camera_device_stop_capturing(cd);
            } else {
                /* No more cameras. */
                camera_device_close(cd);
                break;
            }
            camera_device_close(cd);
        } else {
            /* No more cameras. */
            break;
        }
    }

    return found;
}