static
void
ppb_video_decoder_destroy_priv(void *p)
{
    struct pp_video_decoder_s *vd = p;

    if (vd->orig_graphics3d) {
        pp_resource_unref(vd->orig_graphics3d);
        vd->orig_graphics3d = 0;
    }

    if (vd->graphics3d) {
        pp_resource_unref(vd->graphics3d);
        vd->graphics3d = 0;
    }

    if (vd->avparser) {
        av_parser_close(vd->avparser);
        vd->avparser = NULL;
    }

    if (vd->avctx)
        avcodec_free_context(&vd->avctx);

    if (vd->avframe)
        av_frame_free(&vd->avframe);

    if (vd->va_context.context_id) {
        vaDestroyContext(display.va, vd->va_context.context_id);
        vd->va_context.context_id = 0;
    }

    if (vd->va_context.config_id) {
        vaDestroyConfig(display.va, vd->va_context.config_id);
        vd->va_context.config_id = 0;
    }

    vaDestroySurfaces(display.va, vd->surfaces, MAX_VIDEO_SURFACES);
    for (uintptr_t k = 0; k < MAX_VIDEO_SURFACES; k ++) {
        vd->surfaces[k] = VA_INVALID_SURFACE;
        vd->surface_used[k] = 0;

    }

    for (uintptr_t k = 0; k < vd->buffer_count; k ++) {
        vd->ppp_video_decoder_dev->DismissPictureBuffer(vd->instance->id, vd->self_id,
                                                        vd->buffers[k].id);
        pthread_mutex_lock(&display.lock);
        glXDestroyPixmap(display.x, vd->buffers[k].glx_pixmap);
        XFreePixmap(display.x, vd->buffers[k].pixmap);
        pthread_mutex_unlock(&display.lock);
    }

    vd->buffer_count = 0;
    vd->buffers_were_requested = 0;
    free_and_nullify(vd->buffers);
}
int32_t
ppb_flash_message_loop_run(PP_Resource flash_message_loop)
{
    struct pp_flash_message_loop_s *fml =
                        pp_resource_acquire(flash_message_loop, PP_RESOURCE_FLASH_MESSAGE_LOOP);
    if (!fml) {
        trace_error("%s, bad resource\n", __func__);
        return PP_ERROR_BADRESOURCE;
    }

    PP_Resource message_loop = ppb_message_loop_get_current();
    fml->running = 1;
    fml->message_loop = message_loop;
    fml->depth = ppb_message_loop_get_depth(message_loop) + 1;

    pp_resource_ref(flash_message_loop);        // prevent destroy of running loop
    pp_resource_release(flash_message_loop);

    // launching nested loop without depth increase to prevent hang up of previously pushed tasks
    ppb_message_loop_run_int(message_loop, ML_NESTED);

    fml = pp_resource_acquire(flash_message_loop, PP_RESOURCE_FLASH_MESSAGE_LOOP);
    if (fml) {
        fml->running = 0;
        pp_resource_release(flash_message_loop);
    }

    pp_resource_unref(flash_message_loop);
    return PP_OK;
}
static
void
ppb_url_response_info_destroy(void *p)
{
    struct pp_url_response_info_s *ri = p;
    pp_resource_unref(ri->url_loader_id);
}
void
pp_resource_release(PP_Resource resource)
{
    pthread_mutex_lock(&res_tbl_lock);
    struct pp_resource_generic_s *gr = g_hash_table_lookup(res_tbl, GINT_TO_POINTER(resource));
    if (gr) {
        pthread_mutex_unlock(&gr->lock);
    }
    pthread_mutex_unlock(&res_tbl_lock);

    // unref referenced in pp_resource_acquire()
    pp_resource_unref(resource);
}
void
ppb_core_release_resource(PP_Resource resource)
{
    pp_resource_unref(resource);
}
int32_t
ppb_message_loop_run_int(PP_Resource message_loop, int nested, int increase_depth)
{
    if (this_thread_message_loop != message_loop) {
        trace_error("%s, not attached to current thread\n", __func__);
        return PP_ERROR_WRONG_THREAD;
    }

    struct pp_message_loop_s *ml = pp_resource_acquire(message_loop, PP_RESOURCE_MESSAGE_LOOP);
    if (!ml) {
        trace_error("%s, bad resource\n", __func__);
        return PP_ERROR_BADRESOURCE;
    }

    // prevent nested loops
    if (!nested && ml->running) {
        trace_error("%s, trying to run nested loop without declaring as nested\n", __func__);
        pp_resource_release(message_loop);
        return PP_ERROR_INPROGRESS;
    }

    struct {
        int running;
        int teardown;
    } saved_state = {
        .running = ml->running,
        .teardown = ml->teardown,
    };

    ml->running = 1;
    ml->teardown = 0;
    if (increase_depth)
        ml->depth++;

    int teardown = 0;
    int destroy_ml = 0;
    int depth = ml->depth;
    pp_resource_ref(message_loop);
    GAsyncQueue *async_q = ml->async_q;
    GQueue *int_q = ml->int_q;
    pp_resource_release(message_loop);

    while (1) {
        struct timespec now;
        struct message_loop_task_s *task = g_queue_peek_head(int_q);
        gint64 timeout = 1000 * 1000;
        if (task) {
            clock_gettime(CLOCK_REALTIME, &now);
            timeout = (task->when.tv_sec - now.tv_sec) * 1000 * 1000 +
                      (task->when.tv_nsec - now.tv_nsec) / 1000;
            if (timeout <= 0) {
                // remove task from the queue
                g_queue_pop_head(int_q);

                // check if depth is correct
                if (task->depth > 0 && task->depth < depth) {
                    // wrong, reschedule it a bit later
                    task->when = add_ms(now, 10);
                    g_queue_insert_sorted(int_q, task, time_compare_func, NULL);
                    continue;
                }

                if (task->terminate) {
                    if (depth > 1) {
                        // exit at once, all remaining task will be processed by outer loop
                        g_slice_free(struct message_loop_task_s, task);
                        break;
                    }

                    // it's the outermost loop, we should wait for all tasks to be run
                    ml = pp_resource_acquire(message_loop, PP_RESOURCE_MESSAGE_LOOP);
                    if (ml) {
                        ml->teardown = 1;
                        teardown = 1;
                        destroy_ml = task->should_destroy_ml;
                        pp_resource_release(message_loop);
                    }

                    g_slice_free(struct message_loop_task_s, task);
                    continue;
                }

                // run task
                const struct PP_CompletionCallback ccb = task->ccb;
                if (ccb.func) {
                    ccb.func(ccb.user_data, task->result_to_pass);
                }

                // free task
                g_slice_free(struct message_loop_task_s, task);
                continue;   // run cycle again
            }
        } else if (teardown) {
            // teardown, no tasks in queue left
            break;
        }

        task = g_async_queue_timeout_pop(async_q, timeout);
        if (task)
            g_queue_insert_sorted(int_q, task, time_compare_func, NULL);
    }

    // mark thread as non-running
    ml = pp_resource_acquire(message_loop, PP_RESOURCE_MESSAGE_LOOP);
    if (ml) {
        if (increase_depth)
            ml->depth--;
        ml->running = 0;
        if (nested) {
            ml->running = saved_state.running;
            ml->teardown = saved_state.teardown;
        }
        pp_resource_release(message_loop);
    }
    pp_resource_unref(message_loop);
    if (destroy_ml)
        pp_resource_unref(message_loop);
    return PP_OK;
}

int32_t
ppb_message_loop_post_work_with_result(PP_Resource message_loop,
                                       struct PP_CompletionCallback callback, int64_t delay_ms,
                                       int32_t result_to_pass, int depth)
{
    if (callback.func == NULL) {
        trace_error("%s, callback.func == NULL\n", __func__);
        return PP_ERROR_BADARGUMENT;
    }

    struct pp_message_loop_s *ml = pp_resource_acquire(message_loop, PP_RESOURCE_MESSAGE_LOOP);
    if (!ml) {
        trace_error("%s, bad resource\n", __func__);
        return PP_ERROR_BADRESOURCE;
    }

    if (ml->running && ml->teardown) {
        // message loop is in a teardown state
        pp_resource_release(message_loop);
        trace_error("%s, quit request received, no additional work could be posted\n", __func__);
        return PP_ERROR_FAILED;
    }

    struct message_loop_task_s *task = g_slice_alloc0(sizeof(*task));

    task->result_to_pass = result_to_pass;
    task->ccb = callback;
    task->depth = depth;

    // calculate absolute time callback should be run at
    clock_gettime(CLOCK_REALTIME, &task->when);
    task->when.tv_sec += delay_ms / 1000;
    task->when.tv_nsec += (delay_ms % 1000) * 1000 * 1000;
    while (task->when.tv_nsec >= 1000 * 1000 * 1000) {
        task->when.tv_sec += 1;
        task->when.tv_nsec -= 1000 * 1000 * 1000;
    }

    g_async_queue_push(ml->async_q, task);
    pp_resource_release(message_loop);
    return PP_OK;
}

int32_t
ppb_message_loop_post_work(PP_Resource message_loop, struct PP_CompletionCallback callback,
                           int64_t delay_ms)
{
    return ppb_message_loop_post_work_with_result(message_loop, callback, delay_ms, PP_OK, 0);
}

int32_t
ppb_message_loop_post_quit_depth(PP_Resource message_loop, PP_Bool should_destroy, int depth)
{
    struct pp_message_loop_s *ml = pp_resource_acquire(message_loop, PP_RESOURCE_MESSAGE_LOOP);
    if (!ml) {
        trace_error("%s, bad resource\n", __func__);
        return PP_ERROR_BADRESOURCE;
    }

    struct message_loop_task_s *task = g_slice_alloc0(sizeof(*task));

    task->terminate = 1;
    task->depth = depth;
    task->should_destroy_ml = should_destroy;
    task->result_to_pass = PP_OK;

    clock_gettime(CLOCK_REALTIME, &task->when); // run as early as possible

    g_async_queue_push(ml->async_q, task);
    pp_resource_release(message_loop);
    return PP_OK;
}

int32_t
ppb_message_loop_post_quit(PP_Resource message_loop, PP_Bool should_destroy)
{
    int depth = ppb_message_loop_get_depth(message_loop);
    return ppb_message_loop_post_quit_depth(message_loop, should_destroy, depth);
}
static
void
_call_invalidaterect_ptac(void *param)
{
    struct pp_instance_s *pp_i = param;
    NPRect npr = {.top = 0, .left = 0, .bottom = pp_i->height, .right = pp_i->width};

    npn.invalidaterect(pp_i->npp, &npr);
    npn.forceredraw(pp_i->npp);
}

int32_t
ppb_graphics2d_flush(PP_Resource graphics_2d, struct PP_CompletionCallback callback)
{
    struct pp_graphics2d_s *g2d = pp_resource_acquire(graphics_2d, PP_RESOURCE_GRAPHICS2D);
    if (!g2d) {
        trace_error("%s, bad resource\n", __func__);
        return PP_ERROR_BADRESOURCE;
    }

    struct pp_instance_s *pp_i = g2d->instance;

    pthread_mutex_lock(&pp_i->lock);

    if (pp_i->graphics != graphics_2d) {
        pp_resource_release(graphics_2d);
        pthread_mutex_unlock(&pp_i->lock);
        return PP_ERROR_FAILED;
    }

    if (pp_i->graphics_in_progress) {
        pp_resource_release(graphics_2d);
        pthread_mutex_unlock(&pp_i->lock);
        return PP_ERROR_INPROGRESS;
    }

    pp_i->graphics_ccb = callback;
    pp_i->graphics_in_progress = 1;
    pthread_mutex_unlock(&pp_i->lock);

    while (g2d->task_list) {
        GList *link = g_list_first(g2d->task_list);
        struct g2d_paint_task_s *pt = link->data;
        struct pp_image_data_s  *id;

        cairo_t *cr;

        g2d->task_list = g_list_delete_link(g2d->task_list, link);
        switch (pt->type) {
        case gpt_paint_id:
            id = pp_resource_acquire(pt->image_data, PP_RESOURCE_IMAGE_DATA);
            if (!id)
                break;

            cairo_surface_mark_dirty(g2d->cairo_surf);
            cr = cairo_create(g2d->cairo_surf);
            cairo_set_source_surface(cr, id->cairo_surf, pt->ofs.x, pt->ofs.y);
            if (pt->src_is_set) {
                cairo_rectangle(cr, pt->src.point.x + pt->ofs.x, pt->src.point.y + pt->ofs.y,
                                pt->src.size.width, pt->src.size.height);
                cairo_fill(cr);
            } else {
                cairo_paint(cr);
            }
            cairo_surface_flush(g2d->cairo_surf);
            cairo_destroy(cr);
            pp_resource_release(pt->image_data);
            pp_resource_unref(pt->image_data);
            break;
        case gpt_replace_contents:
            id = pp_resource_acquire(pt->image_data, PP_RESOURCE_IMAGE_DATA);
            if (!id)
                break;
            if (id->width == g2d->width || id->height == g2d->height) {
                void            *tmp;
                cairo_surface_t *tmp_surf;

                cairo_surface_flush(id->cairo_surf);
                cairo_surface_flush(g2d->cairo_surf);

                tmp = g2d->data;
                g2d->data = id->data;
                id->data = tmp;

                tmp_surf = g2d->cairo_surf;
                g2d->cairo_surf = id->cairo_surf;
                id->cairo_surf = tmp_surf;
            }
            pp_resource_release(pt->image_data);
            pp_resource_unref(pt->image_data);
            break;
        }
        g_slice_free(struct g2d_paint_task_s, pt);
    }

    // scale image
    {
        cairo_surface_t *surf;
        surf = cairo_image_surface_create_for_data((unsigned char *)g2d->second_buffer,
                CAIRO_FORMAT_ARGB32, g2d->scaled_width, g2d->scaled_height, g2d->scaled_stride);
        cairo_t *cr = cairo_create(surf);
        cairo_scale(cr, g2d->scale, g2d->scale);
        cairo_set_source_surface(cr, g2d->cairo_surf, 0, 0);
        cairo_paint(cr);
        cairo_destroy(cr);
        cairo_surface_destroy(surf);
    }

    pp_resource_release(graphics_2d);

    pthread_mutex_lock(&pp_i->lock);
    if (!callback.func)
        pthread_barrier_init(&pp_i->graphics_barrier, NULL, 2);
    if (pp_i->is_fullscreen) {
        XGraphicsExposeEvent ev = {
            .type = GraphicsExpose,
            .drawable = pp_i->fs_wnd,
            .width = pp_i->width,
            .height = pp_i->height
        };

        XSendEvent(pp_i->dpy, pp_i->fs_wnd, True, ExposureMask, (void *)&ev);
        XFlush(pp_i->dpy);
        pthread_mutex_unlock(&pp_i->lock);
    } else {
        pthread_mutex_unlock(&pp_i->lock);
        ppb_core_call_on_browser_thread(_call_invalidaterect_ptac, pp_i);
    }

    if (callback.func)
        return PP_OK_COMPLETIONPENDING;

    pthread_barrier_wait(&pp_i->graphics_barrier);
    return PP_OK;
}
static
void
handle_disconnect_stage1(struct async_network_task_s *task)
{
    struct event *ev = evtimer_new(event_b, handle_disconnect_stage2, task);
    struct timeval timeout = {.tv_sec = 0};
    add_event_mapping(task, ev);
    event_add(ev, &timeout);
}

static
void
handle_udp_recv_stage2(int sock, short event_flags, void *arg)
{
    struct async_network_task_s *task = arg;

    struct pp_udp_socket_s *us = pp_resource_acquire(task->resource, PP_RESOURCE_UDP_SOCKET);
    if (!us) {
        trace_error("%s, bad resource\n", __func__);
        task_destroy(task);
        return;
    }

    socklen_t len = sizeof(task->addr_from->data);
    int32_t retval = recvfrom(sock, task->buffer, task->bufsize, 0,
                              (struct sockaddr *)task->addr_from->data, &len);
    task->addr_from->size = len;

    if (task->addr_from_resource)
        pp_resource_unref(task->addr_from_resource);

    if (retval < 0)
        retval = get_pp_errno();
    else if (retval == 0) {
        us->seen_eof = 1;   // TODO: is it needed?
    }

    pp_resource_release(task->resource);
    ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, retval, 0,
                                           __func__);
    task_destroy(task);
}

static
void
handle_udp_recv_stage1(struct async_network_task_s *task)
{
    struct pp_udp_socket_s *us = pp_resource_acquire(task->resource, PP_RESOURCE_UDP_SOCKET);
    if (!us) {
        trace_error("%s, bad resource\n", __func__);
        task_destroy(task);
        return;
    }

    memset(task->addr_from, 0, sizeof(*task->addr_from));

    struct event *ev = event_new(event_b, us->sock, EV_READ, handle_udp_recv_stage2, task);
    pp_resource_release(task->resource);
    add_event_mapping(task, ev);
    event_add(ev, NULL);
}

static
void
handle_udp_send_stage2(int sock, short event_flags, void *arg)
{
    struct async_network_task_s *task = arg;

    int retval = sendto(sock, task->buffer, task->bufsize, MSG_NOSIGNAL,
                        (struct sockaddr *)task->netaddr.data, task->netaddr.size);
    if (retval < 0)
        retval = get_pp_errno();

    ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, retval, 0,
                                           __func__);
    task_destroy(task);
}

static
void
handle_udp_send_stage1(struct async_network_task_s *task)
{
    struct pp_udp_socket_s *us = pp_resource_acquire(task->resource, PP_RESOURCE_UDP_SOCKET);
    if (!us) {
        trace_error("%s, bad resource\n", __func__);
        task_destroy(task);
        return;
    }

    // try to send immediately, but don't wait
    int retval = sendto(us->sock, task->buffer, task->bufsize, MSG_DONTWAIT | MSG_NOSIGNAL,
                        (struct sockaddr *)task->netaddr.data, task->netaddr.size);
    pp_resource_release(task->resource);

    if (retval >= 0) {
        // successfully sent
        ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, retval, 0,
                                               __func__);
        task_destroy(task);
        return;
    }

    // need to wait
    struct event *ev = event_new(event_b, us->sock, EV_WRITE, handle_udp_send_stage2, task);
    add_event_mapping(task, ev);
    event_add(ev, NULL);
}

static
void
handle_host_resolve_stage2(int result, char type, int count, int ttl, void *addresses, void *arg)
{
    struct async_network_task_s *task = arg;

    if (result != DNS_ERR_NONE || count < 1) {
        trace_warning("%s, evdns returned code %d, count = %d (%s:%u)\n", __func__, result, count,
                      task->host, (unsigned int)task->port);
        ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0,
                                               PP_ERROR_NAME_NOT_RESOLVED, 0, __func__);
        task_destroy(task);
        return;
    }

    struct pp_host_resolver_s *hr = pp_resource_acquire(task->resource, PP_RESOURCE_HOST_RESOLVER);
    if (!hr) {
        trace_error("%s, bad resource\n", __func__);
        task_destroy(task);
        return;
    }

    hr->addr_count = count;
    hr->addrs = calloc(count, sizeof(struct PP_NetAddress_Private));

    if (type == DNS_IPv4_A) {
        struct in_addr *ipv4_addrs = addresses;

        for (int k = 0; k < count; k ++) {
            struct sockaddr_in sai = {
                .sin_family = AF_INET,
                .sin_port =   htons(task->port),
            };

            memcpy(&sai.sin_addr, &ipv4_addrs[k], sizeof(struct in_addr));

            hr->addrs[k].size = sizeof(struct sockaddr_in);
            memcpy(hr->addrs[k].data, &sai, sizeof(sai));
        }

        ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, PP_OK, 0,
                                               __func__);

    } else if (type == DNS_IPv6_AAAA) {
        struct in6_addr *ipv6_addrs = addresses;

        for (int k = 0; k < count; k ++) {
            struct sockaddr_in6 sai6 = {
                .sin6_family = AF_INET6,
                .sin6_port =   htons(task->port),
            };

            memcpy(&sai6.sin6_addr, &ipv6_addrs[k], sizeof(struct in6_addr));

            hr->addrs[k].size = sizeof(struct sockaddr_in6);
            memcpy(hr->addrs[k].data, &sai6, sizeof(sai6));
        }

        ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, PP_OK, 0,
                                               __func__);

    } else {
        trace_error("%s, bad evdns type %d (%s:%u)\n", __func__, type, task->host,
                    (unsigned int)task->port);
        ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0,
                                               PP_ERROR_FAILED, 0, __func__);

    }

    pp_resource_release(task->resource);
    task_destroy(task);
}

static
void
handle_host_resolve_stage1(struct async_network_task_s *task)
{
    struct evdns_request *req;

    // queue DNS request
    req = evdns_base_resolve_ipv4(evdns_b, task->host, DNS_QUERY_NO_SEARCH,
                                  handle_host_resolve_stage2, task);
    // TODO: what about ipv6?

    if (!req) {
        trace_warning("%s, early dns resolution failure (%s:%u)\n", __func__, task->host,
                      (unsigned int)task->port);
        ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0,
                                               PP_ERROR_NAME_NOT_RESOLVED, 0, __func__);
        task_destroy(task);
        return;
    }
}

static
void *
network_worker_thread(void *param)
{
    event_base_dispatch(event_b);
    event_base_free(event_b);
    trace_error("%s, thread terminated\n", __func__);
    return NULL;
}
int32_t
ppb_video_capture_open(PP_Resource video_capture, PP_Resource device_ref,
                       const struct PP_VideoCaptureDeviceInfo_Dev *requested_info,
                       uint32_t buffer_count, struct PP_CompletionCallback callback)
{
    int32_t result;
    struct pp_video_capture_s *vc = pp_resource_acquire(video_capture, PP_RESOURCE_VIDEO_CAPTURE);
    if (!vc) {
        trace_error("%s, bad resource\n", __func__);
        return PP_ERROR_BADRESOURCE;
    }

    const char *capture_device = default_capture_device;
    struct PP_Var longname = ppb_device_ref_get_longname(device_ref);

    if (longname.type == PP_VARTYPE_STRING)
        capture_device = ppb_var_var_to_utf8(longname, NULL);

    vc->fd = v4l2_open(capture_device, O_RDWR);

    ppb_var_release(longname);

    if (vc->fd < 0) {
        result = PP_ERROR_NOACCESS;
        goto point_1;
    }

    struct v4l2_capability caps;
    if (v4l2_ioctl(vc->fd, VIDIOC_QUERYCAP, &caps) != 0) {
        result = PP_ERROR_FAILED;
        goto point_2;
    }

#ifdef V4L2_CAP_DEVICE_CAPS
    const uint32_t device_caps = (caps.capabilities & V4L2_CAP_DEVICE_CAPS) ? caps.device_caps
                                                                            : caps.capabilities;
#else
    const uint32_t device_caps = caps.capabilities;
#endif // V4L2_CAP_DEVICE_CAPS

    if (!(device_caps & V4L2_CAP_VIDEO_CAPTURE)) {
        trace_error("%s, device can't capture\n", __func__);
        result = PP_ERROR_FAILED;
        goto point_2;
    }

    if (!(device_caps & V4L2_CAP_READWRITE)) {
        trace_error("%s, device doesn't support read/write interface\n", __func__);
        result = PP_ERROR_FAILED;
        goto point_2;
    }

    if (requested_info) {
        vc->width =  requested_info->width;
        vc->height = requested_info->height;
        vc->fps =    requested_info->frames_per_second;
    } else {
        vc->width =  640;
        vc->height = 480;
        vc->fps =    15;
    }

    struct v4l2_format fmt = {
        .type =                 V4L2_BUF_TYPE_VIDEO_CAPTURE,
        .fmt.pix.width =        vc->width,
        .fmt.pix.height =       vc->height,
        .fmt.pix.pixelformat =  V4L2_PIX_FMT_YUV420,    // PPAPI hardcodes format to YUV420
        .fmt.pix.field =        V4L2_FIELD_INTERLACED,
    };

    if (v4l2_ioctl(vc->fd, VIDIOC_S_FMT, &fmt) != 0) {
        trace_error("%s, failed to set resolution\n", __func__);
        result = PP_ERROR_FAILED;
        goto point_2;
    }

    vc->width =  fmt.fmt.pix.width;
    vc->height = fmt.fmt.pix.height;

    vc->buffer_size = fmt.fmt.pix.sizeimage;    // buffer size in bytes
    vc->buffer_count = MAX(buffer_count, 5);    // limit lowest number of buffers, just in case

    vc->buffers = calloc(sizeof(*vc->buffers), vc->buffer_count);
    if (!vc->buffers) {
        trace_error("%s, memory allocation failure (1)\n", __func__);
        result = PP_ERROR_FAILED;
        goto point_2;
    }

    vc->buffer_is_free = malloc(sizeof(*vc->buffer_is_free) * vc->buffer_count);
    if (!vc->buffer_is_free) {
        trace_error("%s, memory allocation failure (2)\n", __func__);
        result = PP_ERROR_FAILED;
        goto point_3;
    }

    for (unsigned int k = 0; k < vc->buffer_count; k ++) {
        vc->buffer_is_free[k] = 1;
        vc->buffers[k] = ppb_buffer_create(vc->instance->id, vc->buffer_size);
        if (vc->buffers[k] == 0)
            goto point_4;
    }

    struct PP_VideoCaptureDeviceInfo_Dev info = {
        .width =             vc->width,
        .height =            vc->height,
        .frames_per_second = vc->fps,
    };

    vc->ppp_video_capture_dev->OnDeviceInfo(vc->instance->id, video_capture, &info,
                                            vc->buffer_count, vc->buffers);

    result = PP_OK;
    goto point_1;

point_4:
    for (unsigned int k = 0; k < vc->buffer_count; k ++)
        ppb_core_release_resource(vc->buffers[k]);
    free_and_nullify(vc->buffer_is_free);
point_3:
    free_and_nullify(vc->buffers);
point_2:
    v4l2_close(vc->fd);
    vc->fd = -1;
point_1:
    pp_resource_release(video_capture);
    ppb_core_call_on_main_thread2(0, callback, result, __func__);
    return PP_OK_COMPLETIONPENDING;
}

struct on_buffer_ready_param_s {
    PP_Instance                            instance;
    PP_Resource                            video_capture;
    uint32_t                               buf_idx;
    const struct PPP_VideoCapture_Dev_0_1 *ppp_video_capture_dev;
};

static
void
on_buffer_ready_comt(void *user_data, int32_t result)
{
    struct on_buffer_ready_param_s *p = user_data;
    struct pp_instance_s *pp_i = tables_get_pp_instance(p->instance);
    if (!pp_i)
        return;

    p->ppp_video_capture_dev->OnBufferReady(p->instance, p->video_capture, p->buf_idx);
    g_slice_free1(sizeof(*p), p);
}

static
void *
video_capture_thread(void *param)
{
    struct pp_video_capture_s *vc = param;

    PP_Resource  video_capture = vc->self_id;
    PP_Instance  instance = vc->instance->id;
    const int    fd = vc->fd;
    const size_t buffer_size = vc->buffer_size;

    vc = pp_resource_acquire(video_capture, PP_RESOURCE_VIDEO_CAPTURE);
    if (!vc)
        goto gone;

    while (!vc->terminate_thread) {
        // find free buffer
        uint32_t buf_idx = (uint32_t)-1;
        for (uint32_t k = 0; k < vc->buffer_count; k ++) {
            if (vc->buffer_is_free[k]) {
                buf_idx = k;
                vc->buffer_is_free[k] = 0;
                break;
            }
        }

        if (buf_idx == (uint32_t)-1) {
            // all buffers are busy, wait for some to free, with resource unlocked
            pp_resource_release(video_capture);
            usleep(10);
            vc = pp_resource_acquire(video_capture, PP_RESOURCE_VIDEO_CAPTURE);
            if (!vc)
                goto gone;
            continue;
        }

        PP_Resource buffer = vc->buffers[buf_idx];
        pp_resource_release(video_capture);

        // wait on v4l2_read() with resource unlocked
        void *ptr = ppb_buffer_map(buffer);
        RETRY_ON_EINTR(v4l2_read(fd, ptr, buffer_size));
        ppb_buffer_unmap(buffer);

        vc = pp_resource_acquire(video_capture, PP_RESOURCE_VIDEO_CAPTURE);
        if (!vc)
            goto gone;

        struct on_buffer_ready_param_s *p = g_slice_alloc(sizeof(*p));
        p->instance =               instance;
        p->video_capture =          video_capture;
        p->buf_idx =                buf_idx;
        p->ppp_video_capture_dev =  vc->ppp_video_capture_dev;
        ppb_core_call_on_main_thread2(0, PP_MakeCCB(on_buffer_ready_comt, p), PP_OK, __func__);
    }

    pp_resource_release(video_capture);
    return NULL;

gone:
    trace_error("%s, resource gone\n", __func__);
    return NULL;
}

int32_t
ppb_video_capture_start_capture(PP_Resource video_capture)
{
    struct pp_video_capture_s *vc = pp_resource_acquire(video_capture, PP_RESOURCE_VIDEO_CAPTURE);
    if (!vc) {
        trace_error("%s, bad resource\n", __func__);
        return PP_ERROR_BADRESOURCE;
    }

    if (vc->thread_started)
        goto done;

    if (vc->fd < 0) {
        trace_error("%s, device is closed\n", __func__);
        pp_resource_release(video_capture);
        return PP_ERROR_FAILED;
    }

    vc->ppp_video_capture_dev->OnStatus(vc->instance->id, video_capture,
                                        PP_VIDEO_CAPTURE_STATUS_STARTING);

    pp_resource_ref(video_capture); // prevents freeing while thread is still running
    pthread_create(&vc->thread, NULL, video_capture_thread, vc);
    vc->thread_started = 1;

    vc->ppp_video_capture_dev->OnStatus(vc->instance->id, video_capture,
                                        PP_VIDEO_CAPTURE_STATUS_STARTED);

done:
    pp_resource_release(video_capture);
    return PP_OK;
}

int32_t
ppb_video_capture_reuse_buffer(PP_Resource video_capture, uint32_t buffer)
{
    struct pp_video_capture_s *vc = pp_resource_acquire(video_capture, PP_RESOURCE_VIDEO_CAPTURE);
    if (!vc) {
        trace_error("%s, bad resource\n", __func__);
        return PP_ERROR_BADRESOURCE;
    }

    if (buffer < vc->buffer_count)
        vc->buffer_is_free[buffer] = 1;

    pp_resource_release(video_capture);
    return PP_OK;
}

int32_t
ppb_video_capture_stop_capture(PP_Resource video_capture)
{
    struct pp_video_capture_s *vc = pp_resource_acquire(video_capture, PP_RESOURCE_VIDEO_CAPTURE);
    if (!vc) {
        trace_error("%s, bad resource\n", __func__);
        return PP_ERROR_BADRESOURCE;
    }

    if (!vc->thread_started)
        goto done;

    vc->ppp_video_capture_dev->OnStatus(vc->instance->id, video_capture,
                                        PP_VIDEO_CAPTURE_STATUS_STOPPING);

    vc->terminate_thread = 1;
    pthread_t thread = vc->thread;

    pp_resource_release(video_capture);

    pthread_join(thread, NULL);

    vc = pp_resource_acquire(video_capture, PP_RESOURCE_VIDEO_CAPTURE);
    if (!vc) {
        trace_error("%s, resource gone\n", __func__);
        return PP_ERROR_BADRESOURCE;
    }

    vc->thread_started = 0;
    vc->terminate_thread = 0;
    vc->ppp_video_capture_dev->OnStatus(vc->instance->id, video_capture,
                                        PP_VIDEO_CAPTURE_STATUS_STOPPED);

    pp_resource_unref(video_capture);   // remove reference made in start_capture()

done:
    pp_resource_release(video_capture);
    return PP_OK;
}

void
ppb_video_capture_close(PP_Resource video_capture)
{
    ppb_video_capture_stop_capture(video_capture);

    struct pp_video_capture_s *vc = pp_resource_acquire(video_capture, PP_RESOURCE_VIDEO_CAPTURE);
    if (!vc) {
        trace_error("%s, bad resource\n", __func__);
        return;
    }

    ppb_video_capture_destroy(vc);

    pp_resource_release(video_capture);
    return;
}


// trace wrappers
TRACE_WRAPPER
PP_Resource
trace_ppb_video_capture_create(PP_Instance instance)
{
    trace_info("[PPB] {full} %s instance=%d\n", __func__+6, instance);
    return ppb_video_capture_create(instance);
}