예제 #1
0
파일: app.c 프로젝트: RichardFans/wcamsrv
int main(int argc, char *argv[])
{
    app_t a = app_create(0);
    
    v4l2_dev_t v = v4l2_create(a, "/dev/video2", 0, 0);
    v4l2_set_img_proc(v, img_proc, v);
    v4l2_start_capture(v);

    app_exec(a);

    v4l2_stop_capture(v);
    v4l2_free(v);

    app_free(a);

    return 0;
}
예제 #2
0
int main(int argc, char *argv[])
{
    app_t a = app_create(0);
    jpg_dec_t d = jpg_dec_create();

    v4l2_dev_t v = v4l2_create(a, NULL, 0, 0);
    v4l2_set_img_proc(v, img_proc, d);
    v4l2_start_capture(v);

    app_exec(a);

    v4l2_stop_capture(v);
    v4l2_free(v);
    jpg_dec_free(d);

    app_free(a);

    return 0;
}
예제 #3
0
/*
 * Worker thread to get video data
 */
static void *v4l2_thread(void *vptr)
{
	V4L2_DATA(vptr);
	int r;
	fd_set fds;
	uint8_t *start;
	uint64_t frames;
	uint64_t first_ts;
	struct timeval tv;
	struct v4l2_buffer buf;
	struct obs_source_frame out;
	size_t plane_offsets[MAX_AV_PLANES];

	if (v4l2_start_capture(data->dev, &data->buffers) < 0)
		goto exit;

	frames   = 0;
	first_ts = 0;
	v4l2_prep_obs_frame(data, &out, plane_offsets);

	while (os_event_try(data->event) == EAGAIN) {
		FD_ZERO(&fds);
		FD_SET(data->dev, &fds);
		tv.tv_sec = 1;
		tv.tv_usec = 0;

		r = select(data->dev + 1, &fds, NULL, NULL, &tv);
		if (r < 0) {
			if (errno == EINTR)
				continue;
			blog(LOG_DEBUG, "select failed");
			break;
		} else if (r == 0) {
			blog(LOG_DEBUG, "select timeout");
			continue;
		}

		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		buf.memory = V4L2_MEMORY_MMAP;

		if (v4l2_ioctl(data->dev, VIDIOC_DQBUF, &buf) < 0) {
			if (errno == EAGAIN)
				continue;
			blog(LOG_DEBUG, "failed to dequeue buffer");
			break;
		}

		out.timestamp = timeval2ns(buf.timestamp);
		if (!frames)
			first_ts = out.timestamp;
		out.timestamp -= first_ts;

		start = (uint8_t *) data->buffers.info[buf.index].start;
		for (uint_fast32_t i = 0; i < MAX_AV_PLANES; ++i)
			out.data[i] = start + plane_offsets[i];
		obs_source_output_video(data->source, &out);

		if (v4l2_ioctl(data->dev, VIDIOC_QBUF, &buf) < 0) {
			blog(LOG_DEBUG, "failed to enqueue buffer");
			break;
		}

		frames++;
	}

	blog(LOG_INFO, "Stopped capture after %"PRIu64" frames", frames);

exit:
	v4l2_stop_capture(data->dev);
	return NULL;
}
예제 #4
0
vid_t vid_create(struct wcamsrv *ws) 
{
    struct v4l2_fmtdesc     fmt;
    struct vid *v = calloc(1, sizeof(struct vid));
    if (!v) {
		perror("vid_create");
		return NULL;
	}

    v->srv = ws;
    v->cam = v4l2_create(v->srv->app, cfg_get_camdev(v->srv->cfg), 
                                      cfg_get_cam_fmt_nr(v->srv->cfg),
                                      cfg_get_cam_frm_nr(v->srv->cfg));
    if (v->cam == NULL)
        goto err_mem;
    
	if (pthread_mutex_init(&v->tran_frm_mutex, NULL)) {
		perror("vid_create: pthread_mutex_init");
		goto err_v4l2;	
	}

    v4l2_get_fmt(v->cam, 0, &fmt);

    if (fmt.pixelformat == V4L2_PIX_FMT_JPEG) {
        v4l2_set_img_proc(v->cam, handle_jpeg_img_proc, v);  
        v->dec = jpg_dec_create();
        if (v->dec == NULL)
            goto err_mutex;
    } else if (fmt.pixelformat == V4L2_PIX_FMT_YUYV) {
        v4l2_set_img_proc(v->cam, handle_yuyv_img_proc, v);  
        v->enc = jpg_enc_create();
        if (v->enc == NULL)
            goto err_mutex;
    } else {
        pr_debug("Capture video format is %s, but now we just "
                 "support JPEG and YUYV.\n", 
                 fmt.description); 
        goto err_mutex;
    }

    v->fbd = fbd_create(0, cfg_get_fb_bpp(v->srv->cfg), 0, 0,
                           cfg_get_fb_width(v->srv->cfg),
                           cfg_get_fb_height(v->srv->cfg));
    if (v->fbd == NULL) 
        goto err_codec;

    if (v4l2_start_capture(v->cam))
        goto err_fbd;

    return v;
err_fbd:
    fbd_free(v->fbd);
err_codec: 
    if (v->enc)
        jpg_enc_free(v->enc);
    if (v->dec)
        jpg_dec_free(v->dec);
err_mutex:
    pthread_mutex_destroy(&v->tran_frm_mutex);
err_v4l2:
    v4l2_free(v->cam);
err_mem:
    free(v);
    return NULL;
}
예제 #5
0
/**
 * Handles all the video streaming and saving of the image shots
 * This is a sepereate thread, so it needs to be thread safe!
 */
static void *video_thread_function(void *data)
{
  struct video_config_t *vid = (struct video_config_t *)&(VIDEO_THREAD_CAMERA);

  struct image_t img_jpeg;
  struct image_t img_color;

  // create the images
  if (vid->filters) {
    // fixme: don't hardcode size, works for bebop front camera for now
#define IMG_FLT_SIZE 272
    image_create(&img_color, IMG_FLT_SIZE, IMG_FLT_SIZE, IMAGE_YUV422);
    image_create(&img_jpeg, IMG_FLT_SIZE, IMG_FLT_SIZE, IMAGE_JPEG);
  }
  else {
    image_create(&img_jpeg, vid->w, vid->h, IMAGE_JPEG);
  }

  // Start the streaming of the V4L2 device
  if (!v4l2_start_capture(video_thread.dev)) {
    printf("[video_thread-thread] Could not start capture of %s.\n", video_thread.dev->name);
    return 0;
  }

  // be nice to the more important stuff
  set_nice_level(10);

  // Initialize timing
  struct timespec time_now;
  struct timespec time_prev;
  clock_gettime(CLOCK_MONOTONIC, &time_prev);

  // Start streaming
  video_thread.is_running = true;
  while (video_thread.is_running) {

    // get time in us since last run
    clock_gettime(CLOCK_MONOTONIC, &time_now);
    unsigned int dt_us = sys_time_elapsed_us(&time_prev, &time_now);
    time_prev = time_now;

    // sleep remaining time to limit to specified fps
    uint32_t fps_period_us = (uint32_t)(1000000. / (float)video_thread.fps);
    if (dt_us < fps_period_us) {
      usleep(fps_period_us - dt_us);
    }
    else {
      fprintf(stderr, "video_thread: desired %i fps, only managing %.1f fps\n",
              video_thread.fps, 1000000.f / dt_us);
    }

    // Wait for a new frame (blocking)
    struct image_t img;
    v4l2_image_get(video_thread.dev, &img);

    // pointer to the final image to pass for saving and further processing
    struct image_t *img_final = &img;

    // run selected filters
    if (vid->filters) {
      if (vid->filters & VIDEO_FILTER_DEBAYER) {
        BayerToYUV(&img, &img_color, 0, 0);
      }
      // use color image for further processing
      img_final = &img_color;
    }

    // Check if we need to take a shot
    if (video_thread.take_shot) {
      video_thread_save_shot(img_final, &img_jpeg);
      video_thread.take_shot = false;
    }

    // Run processing if required
    cv_run(img_final);

    // Free the image
    v4l2_image_free(video_thread.dev, &img);
  }

  image_free(&img_jpeg);
  image_free(&img_color);

  return 0;
}