bool p2n_get_property(NPObject *npobj, NPIdentifier name, NPVariant *np_result) { if (!npn.identifierisstring(name)) { trace_error("%s, name is not a string\n", __func__); return false; } if (npobj->_class == &p2n_proxy_class) { struct get_property_param_s *p = g_slice_alloc(sizeof(*p)); p->npobj = npobj; p->name = npn.utf8fromidentifier(name); p->np_result = np_result; p->m_loop = ppb_message_loop_get_for_browser_thread(); p->depth = ppb_message_loop_get_depth(p->m_loop) + 1; ppb_message_loop_post_work_with_result(p->m_loop, PP_MakeCCB(p2n_get_property_prepare_comt, p), 0, PP_OK, 0, __func__); ppb_message_loop_run_nested(p->m_loop); bool result = p->result; npn.memfree(p->name); g_slice_free1(sizeof(*p), p); return result; } else { return npobj->_class->getProperty(npobj, name, np_result); } }
// Schedules task for execution on browser thread. // // Since there is no access to browser event loop, we start a nested event loop which is terminated // as long as there is no tasks left. That way we can implement waiting as entering a nested loop // and thus avoid deadlocks. void ppb_core_call_on_browser_thread(PP_Instance instance, void (*func)(void *), void *user_data) { struct call_on_browser_thread_task_s *task = g_slice_alloc(sizeof(*task)); task->func = func; task->user_data = user_data; // Push task into queue. The only purpose is to put task into queue even if message loop // is currenly terminating (in teardown state), so we are ignoring that. There are three // possible loop states. Message loop is either running, stopped, or terminating. If it's // still running, task will be executed in the context of that loop. If it's stopped or // stopping right now, task will be pushed to a queue. After that code below will schedule // nested loop on browser thread. PP_Resource m_loop = ppb_message_loop_get_for_browser_thread(); ppb_message_loop_post_work_with_result(m_loop, PP_MakeCCB(call_on_browser_thread_comt, task), 0, PP_OK, 0, __func__); struct pp_instance_s *pp_i = instance ? tables_get_pp_instance(instance) : tables_get_some_pp_instance(); if (!pp_i) { trace_error("%s, no alive instance available\n", __func__); return; } // Schedule activation routine. pthread_mutex_lock(&display.lock); if (pp_i->npp) npn.pluginthreadasynccall(pp_i->npp, activate_browser_thread_ml_ptac, user_data); pthread_mutex_unlock(&display.lock); }
static int call_plugin_init_module(void) { int32_t (*ppp_initialize_module)(PP_Module module_id, PPB_GetInterface get_browser_interface); if (!module_dl_handler) return 0; ppp_initialize_module = dlsym(module_dl_handler, "PPP_InitializeModule"); if (!ppp_initialize_module) return 0; struct call_plugin_init_module_param_s *p = g_slice_alloc(sizeof(*p)); p->m_loop = ppb_message_loop_get_for_browser_thread(); p->depth = ppb_message_loop_get_depth(p->m_loop) + 1; p->ppp_initialize_module = ppp_initialize_module; ppb_message_loop_post_work_with_result(p->m_loop, PP_MakeCCB(call_plugin_init_module_prepare_comt, p), 0, PP_OK, p->depth, __func__); ppb_message_loop_run_nested(p->m_loop); int res = p->result; g_slice_free1(sizeof(*p), p); return res; }
static void handle_disconnect_stage2(int sock, short event_flags, void *arg) { struct async_network_task_s *task = arg; GHashTableIter iter; gpointer key, val; pthread_mutex_lock(&lock); g_hash_table_iter_init(&iter, tasks_ht); while (g_hash_table_iter_next(&iter, &key, &val)) { struct async_network_task_s *cur = key; if (cur == task) // skip current task continue; if (cur->resource == task->resource) { g_hash_table_iter_remove(&iter); event_free(cur->event); ppb_message_loop_post_work_with_result(cur->callback_ml, cur->callback, 0, PP_ERROR_ABORTED, 0, __func__); g_slice_free(struct async_network_task_s, cur); } } pthread_mutex_unlock(&lock); close(task->sock); task_destroy(task); }
static struct PP_Var n2p_call(void *object, struct PP_Var method_name, uint32_t argc, struct PP_Var *argv, struct PP_Var *exception) { if (method_name.type != PP_VARTYPE_STRING) { trace_error("%s, method_name is not a string\n", __func__); // TODO: fill exception return PP_MakeUndefined(); } struct call_param_s *p = g_slice_alloc(sizeof(*p)); p->object = object; p->method_name = method_name; p->argc = argc; p->argv = argv; p->exception = exception; p->m_loop = ppb_message_loop_get_current(); p->depth = ppb_message_loop_get_depth(p->m_loop) + 1; ppb_message_loop_post_work_with_result(p->m_loop, PP_MakeCCB(n2p_call_comt, p), 0, PP_OK, p->depth, __func__); ppb_message_loop_run_nested(p->m_loop); struct PP_Var result = p->result; g_slice_free1(sizeof(*p), p); return result; }
static void handle_tcp_connect_stage1(struct async_network_task_s *task) { struct evdns_request *req; struct sockaddr_in sai; memset(&sai, 0, sizeof(sai)); if (inet_pton(AF_INET, task->host, &sai.sin_addr) == 1) { // already a valid IP address handle_tcp_connect_stage2(DNS_ERR_NONE, DNS_IPv4_A, 1, 300, &sai.sin_addr, task); return; } // queue DNS request req = evdns_base_resolve_ipv4(evdns_b, task->host, DNS_QUERY_NO_SEARCH, handle_tcp_connect_stage2, task); // TODO: what about ipv6? if (!req) { trace_warning("%s, early dns resolution failure (%s:%u)\n", __func__, task->host, (unsigned int)task->port); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, PP_ERROR_NAME_NOT_RESOLVED, 0, __func__); task_destroy(task); return; } }
static bool n2p_has_property(void *object, struct PP_Var name, struct PP_Var *exception) { if (name.type != PP_VARTYPE_STRING) { trace_error("%s, name is not a string\n", __func__); // TODO: fill exception return false; } struct has_property_param_s *p = g_slice_alloc(sizeof(*p)); p->object = object; p->name = name; p->exception = exception; p->m_loop = ppb_message_loop_get_current(); p->depth = ppb_message_loop_get_depth(p->m_loop) + 1; ppb_message_loop_post_work_with_result(p->m_loop, PP_MakeCCB(n2p_has_property_comt, p), 0, PP_OK, p->depth, __func__); ppb_message_loop_run_nested(p->m_loop); bool result = p->result; g_slice_free1(sizeof(*p), p); return result; }
PP_Resource ppb_flash_menu_create(PP_Instance instance_id, const struct PP_Flash_Menu *menu_data) { struct pp_instance_s *pp_i = tables_get_pp_instance(instance_id); if (!pp_i) { trace_error("%s, bad instance\n", __func__); return 0; } PP_Resource flash_menu = pp_resource_allocate(PP_RESOURCE_FLASH_MENU, pp_i); if (pp_resource_get_type(flash_menu) != PP_RESOURCE_FLASH_MENU) { trace_error("%s, resource allocation failure\n", __func__); return 0; } struct flash_menu_create_param_s *p = g_slice_alloc0(sizeof(*p)); p->flash_menu = flash_menu; p->menu_data = menu_data; p->m_loop = ppb_message_loop_get_current(); p->depth = ppb_message_loop_get_depth(p->m_loop) + 1; ppb_message_loop_post_work_with_result(p->m_loop, PP_MakeCCB(flash_menu_create_comt, p), 0, PP_OK, p->depth, __func__); ppb_message_loop_run_nested(p->m_loop); g_slice_free1(sizeof(*p), p); return flash_menu; }
static void handle_tcp_connect_stage4(int sock, short event_flags, void *arg) { struct async_network_task_s *task = arg; struct pp_tcp_socket_s *ts = pp_resource_acquire(task->resource, PP_RESOURCE_TCP_SOCKET); if (!ts) { trace_warning("%s, tcp socket resource was closed during request (%s:%u)\n", __func__, task->host, (unsigned int)task->port); free(task->addr); task_destroy(task); return; } char buf[200]; socklen_t len = sizeof(buf); if (event_flags & EV_TIMEOUT) ts->is_connected = 0; else ts->is_connected = (getpeername(ts->sock, (struct sockaddr *)buf, &len) == 0); if (ts->is_connected) { ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, PP_OK, 0, __func__); pp_resource_release(task->resource); free(task->addr); task_destroy(task); return; } // try other addresses, one by one task->addr_ptr++; if (task->addr_ptr < task->addr_count) { pp_resource_release(task->resource); handle_tcp_connect_stage3(task); return; } // no addresses left, fail gracefully trace_warning("%s, connection failed to all addresses (%s:%u)\n", __func__, task->host, (unsigned int)task->port); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, get_pp_errno(), 0, __func__); pp_resource_release(task->resource); free(task->addr); task_destroy(task); }
int32_t ppb_url_loader_read_response_body(PP_Resource loader, void *buffer, int32_t bytes_to_read, struct PP_CompletionCallback callback) { struct url_loader_read_task_s *rt; int32_t read_bytes = PP_ERROR_FAILED; struct pp_url_loader_s *ul = pp_resource_acquire(loader, PP_RESOURCE_URL_LOADER); if (!ul) { trace_error("%s, bad resource\n", __func__); return PP_ERROR_BADRESOURCE; } if (ul->fd == -1) { trace_error("%s, fd==-1\n", __func__); pp_resource_release(loader); return PP_ERROR_FAILED; } if (ul->read_tasks) { // schedule task instead of immediate reading if there is another task // in the queue already goto schedule_read_task; } read_bytes = -1; off_t ofs = lseek(ul->fd, ul->read_pos, SEEK_SET); if (ofs != (off_t)-1) read_bytes = RETRY_ON_EINTR(read(ul->fd, buffer, bytes_to_read)); if (read_bytes < 0) read_bytes = PP_ERROR_FAILED; else ul->read_pos += read_bytes; if (read_bytes == 0 && !ul->finished_loading) { // no data ready, schedule read task goto schedule_read_task; } pp_resource_release(loader); if (callback.flags & PP_COMPLETIONCALLBACK_FLAG_OPTIONAL) return read_bytes; ppb_message_loop_post_work_with_result(ppb_message_loop_get_current(), callback, 0, read_bytes, 0, __func__); return PP_OK_COMPLETIONPENDING; schedule_read_task: rt = g_slice_alloc(sizeof(*rt)); rt->url_loader = loader; rt->buffer = buffer; rt->bytes_to_read = bytes_to_read; rt->ccb = callback; rt->ccb_ml = ppb_message_loop_get_current(); ul->read_tasks = g_list_append(ul->read_tasks, rt); pp_resource_release(loader); return PP_OK_COMPLETIONPENDING; }
void ppb_core_call_on_main_thread2(int32_t delay_in_milliseconds, struct PP_CompletionCallback callback, int32_t result, const char *origin) { PP_Resource main_message_loop = ppb_message_loop_get_for_main_thread(); if (main_message_loop == 0) trace_error("%s, no main loop\n", __func__); const int depth = 1; ppb_message_loop_post_work_with_result(main_message_loop, callback, delay_in_milliseconds, result, depth, origin); }
static void handle_tcp_connect_stage2(int result, char type, int count, int ttl, void *addresses, void *arg) { struct async_network_task_s *task = arg; if (result != DNS_ERR_NONE || count < 1) { trace_warning("%s, evdns returned code %d, count = %d (%s:%u)\n", __func__, result, count, task->host, (unsigned int)task->port); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, PP_ERROR_NAME_NOT_RESOLVED, 0, __func__); task_destroy(task); return; } evutil_make_socket_nonblocking(task->sock); task->addr_count = count; task->addr_ptr = 0; task->addr_type = type; if (type == DNS_IPv4_A) { task->addr = malloc(4 * count); memcpy(task->addr, addresses, 4 * count); } else if (type == DNS_IPv6_AAAA) { task->addr = malloc(16 * count); memcpy(task->addr, addresses, 16 * count); } else { trace_error("%s, bad evdns type %d (%s:%u)\n", __func__, type, task->host, (unsigned int)task->port); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, PP_ERROR_FAILED, 0, __func__); task_destroy(task); return; } handle_tcp_connect_stage3(task); }
static void handle_tcp_write_stage2(int sock, short event_flags, void *arg) { struct async_network_task_s *task = arg; int32_t retval = send(sock, task->buffer, task->bufsize, 0); if (retval < 0) retval = get_pp_errno(); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, retval, 0, __func__); task_destroy(task); }
static void n2p_deallocate(void *object) { struct deallocate_param_s *p = g_slice_alloc(sizeof(*p)); p->object = object; p->m_loop = ppb_message_loop_get_current(); p->depth = ppb_message_loop_get_depth(p->m_loop) + 1; ppb_message_loop_post_work_with_result(p->m_loop, PP_MakeCCB(n2p_deallocate_comt, p), 0, PP_OK, p->depth, __func__); ppb_message_loop_run_nested(p->m_loop); g_slice_free1(sizeof(*p), p); }
struct PP_Var ppb_flash_get_proxy_for_url(PP_Instance instance, const char *url) { struct get_proxy_for_url_param_s *p = g_slice_alloc(sizeof(*p)); p->instance_id = instance; p->url = url; p->m_loop = ppb_message_loop_get_current(); p->depth = ppb_message_loop_get_depth(p->m_loop) + 1; ppb_message_loop_post_work_with_result(p->m_loop, PP_MakeCCB(get_proxy_for_url_comt, p), 0, PP_OK, p->depth, __func__); ppb_message_loop_run_nested(p->m_loop); struct PP_Var result = p->result; g_slice_free1(sizeof(*p), p); return result; }
static void handle_tcp_connect_with_net_address(struct async_network_task_s *task) { if (task->netaddr.size == sizeof(struct sockaddr_in)) { struct sockaddr_in *sai = (void *)task->netaddr.data; task->port = ntohs(sai->sin_port); handle_tcp_connect_stage2(DNS_ERR_NONE, DNS_IPv4_A, 1, 3600, &sai->sin_addr, task); } else if (task->netaddr.size == sizeof(struct sockaddr_in6)) { struct sockaddr_in6 *sai = (void *)task->netaddr.data; task->port = ntohs(sai->sin6_port); handle_tcp_connect_stage2(DNS_ERR_NONE, DNS_IPv6_AAAA, 1, 3600, &sai->sin6_addr, task); } else { trace_error("%s, bad address type\n", __func__); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, PP_ERROR_NAME_NOT_RESOLVED, 0, __func__); task_destroy(task); } }
bool p2n_enumerate(NPObject *npobj, NPIdentifier **value, uint32_t *count) { if (npobj->_class == &p2n_proxy_class) { struct enumerate_param_s *p = g_slice_alloc(sizeof(*p)); p->npobj = npobj; p->m_loop = ppb_message_loop_get_for_browser_thread(); p->depth = ppb_message_loop_get_depth(p->m_loop) + 1; ppb_message_loop_post_work_with_result(p->m_loop, PP_MakeCCB(p2n_enumerate_prepare_comt, p), 0, PP_OK, 0, __func__); ppb_message_loop_run_nested(p->m_loop); bool result = p->result; *count = p->count; *value = npn.memalloc(p->count * sizeof(NPIdentifier)); char *tmpbuf = malloc(1); for (uint32_t k = 0; k < p->count; k ++) { uint32_t len = 0; const char *s = ppb_var_var_to_utf8(p->values[k], &len); // make zero-terminated string char *ptr = realloc(tmpbuf, len + 1); if (!ptr) { result = false; goto err; } tmpbuf = ptr; memcpy(tmpbuf, s, len); tmpbuf[len] = 0; value[k] = npn.getstringidentifier(tmpbuf); } err: free(tmpbuf); g_slice_free1(sizeof(*p), p); return result; } else { return npobj->_class->enumerate(npobj, value, count); } }
static struct PP_Var n2p_construct(void *object, uint32_t argc, struct PP_Var *argv, struct PP_Var *exception) { struct construct_param_s *p = g_slice_alloc(sizeof(*p)); p->object = object; p->argc = argc; p->argv = argv; p->exception = exception; p->m_loop = ppb_message_loop_get_current(); p->depth = ppb_message_loop_get_depth(p->m_loop) + 1; ppb_message_loop_post_work_with_result(p->m_loop, PP_MakeCCB(n2p_construct_comt, p), 0, PP_OK, p->depth, __func__); ppb_message_loop_run_nested(p->m_loop); struct PP_Var result = p->result; g_slice_free1(sizeof(*p), p); return result; }
PP_Bool ppb_flash_is_rect_topmost(PP_Instance instance, const struct PP_Rect *rect) { if (!rect) return PP_FALSE; struct topmost_rect_param_s *p = g_slice_alloc(sizeof(*p)); p->instance = instance; p->rect = *rect; p->m_loop = ppb_message_loop_get_current(); p->depth = ppb_message_loop_get_depth(p->m_loop) + 1; ppb_message_loop_post_work_with_result(p->m_loop, PP_MakeCCB(topmost_rect_comt, p), 0, PP_OK, p->depth, __func__); ppb_message_loop_run_nested(p->m_loop); PP_Bool result = p->result; g_slice_free1(sizeof(*p), p); return result; }
static void handle_tcp_connect_stage3(struct async_network_task_s *task) { int res = -1; if (task->addr_type == DNS_IPv4_A) { struct sockaddr_in sai; memset(&sai, 0, sizeof(sai)); sai.sin_family = AF_INET; sai.sin_addr.s_addr = *((uint32_t *)task->addr + task->addr_ptr); sai.sin_port = htons(task->port); res = connect(task->sock, (struct sockaddr *)&sai, sizeof(sai)); } else if (task->addr_type == DNS_IPv6_AAAA) { struct sockaddr_in6 sai; memset(&sai, 0, sizeof(sai)); sai.sin6_family = AF_INET6; memcpy(&sai.sin6_addr, (char*)task->addr + task->addr_ptr * sizeof(sai.sin6_addr), sizeof(sai.sin6_addr)); sai.sin6_port = htons(task->port); res = connect(task->sock, (struct sockaddr *)&sai, sizeof(sai)); } else { // handled in stage2 trace_error("%s, never reached\n", __func__); } if (res != 0 && errno != EINPROGRESS) { trace_error("%s, res = %d, errno = %d (%s:%u)\n", __func__, res, errno, task->host, (unsigned int)task->port); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, get_pp_errno(), 0, __func__); free(task->addr); task_destroy(task); return; } struct event *ev = event_new(event_b, task->sock, EV_WRITE, handle_tcp_connect_stage4, task); add_event_mapping(task, ev); event_add(ev, &connect_timeout); }
static void handle_tcp_read_stage2(int sock, short event_flags, void *arg) { struct async_network_task_s *task = arg; int32_t retval; retval = recv(sock, task->buffer, task->bufsize, 0); if (retval < 0) retval = get_pp_errno(); else if (retval == 0) { struct pp_tcp_socket_s *ts = pp_resource_acquire(task->resource, PP_RESOURCE_TCP_SOCKET); if (ts) { ts->seen_eof = 1; pp_resource_release(task->resource); } } ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, retval, 0, __func__); task_destroy(task); }
struct PP_Var ppb_flash_clipboard_read_data(PP_Instance instance_id, PP_Flash_Clipboard_Type clipboard_type, uint32_t format) { if (!clipboard_type_and_format_are_supported(clipboard_type, format, __func__)) return PP_MakeUndefined(); struct clipboard_read_data_param_s *p = g_slice_alloc(sizeof(*p)); p->clipboard_type = clipboard_type; p->format = format; p->m_loop = ppb_message_loop_get_current(); p->depth = ppb_message_loop_get_depth(p->m_loop) + 1; ppb_message_loop_post_work_with_result(p->m_loop, PP_MakeCCB(clipboard_read_data_comt, p), 0, PP_OK, p->depth, __func__); ppb_message_loop_run_nested(p->m_loop); struct PP_Var result = p->result; g_slice_free1(sizeof(*p), p); return result; }
static void call_plugin_shutdown_module(void) { if (!module_dl_handler) return; void (*ppp_shutdown_module)(void); ppp_shutdown_module = dlsym(module_dl_handler, "PPP_ShutdownModule"); if (!ppp_shutdown_module) return; struct call_plugin_shutdown_module_param_s *p = g_slice_alloc(sizeof(*p)); p->m_loop = ppb_message_loop_get_for_browser_thread(); p->depth = ppb_message_loop_get_depth(p->m_loop) + 1; p->ppp_shutdown_module = ppp_shutdown_module; ppb_message_loop_post_work_with_result(p->m_loop, PP_MakeCCB(call_plugin_shutdown_module_prepare_comt, p), 0, PP_OK, p->depth, __func__); ppb_message_loop_run_nested(p->m_loop); g_slice_free1(sizeof(*p), p); }
static void handle_disconnect_stage1(struct async_network_task_s *task) { struct event *ev = evtimer_new(event_b, handle_disconnect_stage2, task); struct timeval timeout = {.tv_sec = 0}; add_event_mapping(task, ev); event_add(ev, &timeout); } static void handle_udp_recv_stage2(int sock, short event_flags, void *arg) { struct async_network_task_s *task = arg; struct pp_udp_socket_s *us = pp_resource_acquire(task->resource, PP_RESOURCE_UDP_SOCKET); if (!us) { trace_error("%s, bad resource\n", __func__); task_destroy(task); return; } socklen_t len = sizeof(task->addr_from->data); int32_t retval = recvfrom(sock, task->buffer, task->bufsize, 0, (struct sockaddr *)task->addr_from->data, &len); task->addr_from->size = len; if (task->addr_from_resource) pp_resource_unref(task->addr_from_resource); if (retval < 0) retval = get_pp_errno(); else if (retval == 0) { us->seen_eof = 1; // TODO: is it needed? } pp_resource_release(task->resource); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, retval, 0, __func__); task_destroy(task); } static void handle_udp_recv_stage1(struct async_network_task_s *task) { struct pp_udp_socket_s *us = pp_resource_acquire(task->resource, PP_RESOURCE_UDP_SOCKET); if (!us) { trace_error("%s, bad resource\n", __func__); task_destroy(task); return; } memset(task->addr_from, 0, sizeof(*task->addr_from)); struct event *ev = event_new(event_b, us->sock, EV_READ, handle_udp_recv_stage2, task); pp_resource_release(task->resource); add_event_mapping(task, ev); event_add(ev, NULL); } static void handle_udp_send_stage2(int sock, short event_flags, void *arg) { struct async_network_task_s *task = arg; int retval = sendto(sock, task->buffer, task->bufsize, MSG_NOSIGNAL, (struct sockaddr *)task->netaddr.data, task->netaddr.size); if (retval < 0) retval = get_pp_errno(); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, retval, 0, __func__); task_destroy(task); } static void handle_udp_send_stage1(struct async_network_task_s *task) { struct pp_udp_socket_s *us = pp_resource_acquire(task->resource, PP_RESOURCE_UDP_SOCKET); if (!us) { trace_error("%s, bad resource\n", __func__); task_destroy(task); return; } // try to send immediately, but don't wait int retval = sendto(us->sock, task->buffer, task->bufsize, MSG_DONTWAIT | MSG_NOSIGNAL, (struct sockaddr *)task->netaddr.data, task->netaddr.size); pp_resource_release(task->resource); if (retval >= 0) { // successfully sent ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, retval, 0, __func__); task_destroy(task); return; } // need to wait struct event *ev = event_new(event_b, us->sock, EV_WRITE, handle_udp_send_stage2, task); add_event_mapping(task, ev); event_add(ev, NULL); } static void handle_host_resolve_stage2(int result, char type, int count, int ttl, void *addresses, void *arg) { struct async_network_task_s *task = arg; if (result != DNS_ERR_NONE || count < 1) { trace_warning("%s, evdns returned code %d, count = %d (%s:%u)\n", __func__, result, count, task->host, (unsigned int)task->port); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, PP_ERROR_NAME_NOT_RESOLVED, 0, __func__); task_destroy(task); return; } struct pp_host_resolver_s *hr = pp_resource_acquire(task->resource, PP_RESOURCE_HOST_RESOLVER); if (!hr) { trace_error("%s, bad resource\n", __func__); task_destroy(task); return; } hr->addr_count = count; hr->addrs = calloc(count, sizeof(struct PP_NetAddress_Private)); if (type == DNS_IPv4_A) { struct in_addr *ipv4_addrs = addresses; for (int k = 0; k < count; k ++) { struct sockaddr_in sai = { .sin_family = AF_INET, .sin_port = htons(task->port), }; memcpy(&sai.sin_addr, &ipv4_addrs[k], sizeof(struct in_addr)); hr->addrs[k].size = sizeof(struct sockaddr_in); memcpy(hr->addrs[k].data, &sai, sizeof(sai)); } ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, PP_OK, 0, __func__); } else if (type == DNS_IPv6_AAAA) { struct in6_addr *ipv6_addrs = addresses; for (int k = 0; k < count; k ++) { struct sockaddr_in6 sai6 = { .sin6_family = AF_INET6, .sin6_port = htons(task->port), }; memcpy(&sai6.sin6_addr, &ipv6_addrs[k], sizeof(struct in6_addr)); hr->addrs[k].size = sizeof(struct sockaddr_in6); memcpy(hr->addrs[k].data, &sai6, sizeof(sai6)); } ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, PP_OK, 0, __func__); } else { trace_error("%s, bad evdns type %d (%s:%u)\n", __func__, type, task->host, (unsigned int)task->port); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, PP_ERROR_FAILED, 0, __func__); } pp_resource_release(task->resource); task_destroy(task); } static void handle_host_resolve_stage1(struct async_network_task_s *task) { struct evdns_request *req; // queue DNS request req = evdns_base_resolve_ipv4(evdns_b, task->host, DNS_QUERY_NO_SEARCH, handle_host_resolve_stage2, task); // TODO: what about ipv6? if (!req) { trace_warning("%s, early dns resolution failure (%s:%u)\n", __func__, task->host, (unsigned int)task->port); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, PP_ERROR_NAME_NOT_RESOLVED, 0, __func__); task_destroy(task); return; } } static void * network_worker_thread(void *param) { event_base_dispatch(event_b); event_base_free(event_b); trace_error("%s, thread terminated\n", __func__); return NULL; }
int32_t ppb_message_loop_run_int(PP_Resource message_loop, int nested, int increase_depth) { if (this_thread_message_loop != message_loop) { trace_error("%s, not attached to current thread\n", __func__); return PP_ERROR_WRONG_THREAD; } struct pp_message_loop_s *ml = pp_resource_acquire(message_loop, PP_RESOURCE_MESSAGE_LOOP); if (!ml) { trace_error("%s, bad resource\n", __func__); return PP_ERROR_BADRESOURCE; } // prevent nested loops if (!nested && ml->running) { trace_error("%s, trying to run nested loop without declaring as nested\n", __func__); pp_resource_release(message_loop); return PP_ERROR_INPROGRESS; } struct { int running; int teardown; } saved_state = { .running = ml->running, .teardown = ml->teardown, }; ml->running = 1; ml->teardown = 0; if (increase_depth) ml->depth++; int teardown = 0; int destroy_ml = 0; int depth = ml->depth; pp_resource_ref(message_loop); GAsyncQueue *async_q = ml->async_q; GQueue *int_q = ml->int_q; pp_resource_release(message_loop); while (1) { struct timespec now; struct message_loop_task_s *task = g_queue_peek_head(int_q); gint64 timeout = 1000 * 1000; if (task) { clock_gettime(CLOCK_REALTIME, &now); timeout = (task->when.tv_sec - now.tv_sec) * 1000 * 1000 + (task->when.tv_nsec - now.tv_nsec) / 1000; if (timeout <= 0) { // remove task from the queue g_queue_pop_head(int_q); // check if depth is correct if (task->depth > 0 && task->depth < depth) { // wrong, reschedule it a bit later task->when = add_ms(now, 10); g_queue_insert_sorted(int_q, task, time_compare_func, NULL); continue; } if (task->terminate) { if (depth > 1) { // exit at once, all remaining task will be processed by outer loop g_slice_free(struct message_loop_task_s, task); break; } // it's the outermost loop, we should wait for all tasks to be run ml = pp_resource_acquire(message_loop, PP_RESOURCE_MESSAGE_LOOP); if (ml) { ml->teardown = 1; teardown = 1; destroy_ml = task->should_destroy_ml; pp_resource_release(message_loop); } g_slice_free(struct message_loop_task_s, task); continue; } // run task const struct PP_CompletionCallback ccb = task->ccb; if (ccb.func) { ccb.func(ccb.user_data, task->result_to_pass); } // free task g_slice_free(struct message_loop_task_s, task); continue; // run cycle again } } else if (teardown) { // teardown, no tasks in queue left break; } task = g_async_queue_timeout_pop(async_q, timeout); if (task) g_queue_insert_sorted(int_q, task, time_compare_func, NULL); } // mark thread as non-running ml = pp_resource_acquire(message_loop, PP_RESOURCE_MESSAGE_LOOP); if (ml) { if (increase_depth) ml->depth--; ml->running = 0; if (nested) { ml->running = saved_state.running; ml->teardown = saved_state.teardown; } pp_resource_release(message_loop); } pp_resource_unref(message_loop); if (destroy_ml) pp_resource_unref(message_loop); return PP_OK; } int32_t ppb_message_loop_post_work_with_result(PP_Resource message_loop, struct PP_CompletionCallback callback, int64_t delay_ms, int32_t result_to_pass, int depth) { if (callback.func == NULL) { trace_error("%s, callback.func == NULL\n", __func__); return PP_ERROR_BADARGUMENT; } struct pp_message_loop_s *ml = pp_resource_acquire(message_loop, PP_RESOURCE_MESSAGE_LOOP); if (!ml) { trace_error("%s, bad resource\n", __func__); return PP_ERROR_BADRESOURCE; } if (ml->running && ml->teardown) { // message loop is in a teardown state pp_resource_release(message_loop); trace_error("%s, quit request received, no additional work could be posted\n", __func__); return PP_ERROR_FAILED; } struct message_loop_task_s *task = g_slice_alloc0(sizeof(*task)); task->result_to_pass = result_to_pass; task->ccb = callback; task->depth = depth; // calculate absolute time callback should be run at clock_gettime(CLOCK_REALTIME, &task->when); task->when.tv_sec += delay_ms / 1000; task->when.tv_nsec += (delay_ms % 1000) * 1000 * 1000; while (task->when.tv_nsec >= 1000 * 1000 * 1000) { task->when.tv_sec += 1; task->when.tv_nsec -= 1000 * 1000 * 1000; } g_async_queue_push(ml->async_q, task); pp_resource_release(message_loop); return PP_OK; } int32_t ppb_message_loop_post_work(PP_Resource message_loop, struct PP_CompletionCallback callback, int64_t delay_ms) { return ppb_message_loop_post_work_with_result(message_loop, callback, delay_ms, PP_OK, 0); } int32_t ppb_message_loop_post_quit_depth(PP_Resource message_loop, PP_Bool should_destroy, int depth) { struct pp_message_loop_s *ml = pp_resource_acquire(message_loop, PP_RESOURCE_MESSAGE_LOOP); if (!ml) { trace_error("%s, bad resource\n", __func__); return PP_ERROR_BADRESOURCE; } struct message_loop_task_s *task = g_slice_alloc0(sizeof(*task)); task->terminate = 1; task->depth = depth; task->should_destroy_ml = should_destroy; task->result_to_pass = PP_OK; clock_gettime(CLOCK_REALTIME, &task->when); // run as early as possible g_async_queue_push(ml->async_q, task); pp_resource_release(message_loop); return PP_OK; } int32_t ppb_message_loop_post_quit(PP_Resource message_loop, PP_Bool should_destroy) { int depth = ppb_message_loop_get_depth(message_loop); return ppb_message_loop_post_quit_depth(message_loop, should_destroy, depth); }
int32_t ppb_url_loader_follow_redirect(PP_Resource loader, struct PP_CompletionCallback callback) { struct pp_url_loader_s *ul = pp_resource_acquire(loader, PP_RESOURCE_URL_LOADER); if (!ul) { trace_error("%s, bad resource\n", __func__); return PP_ERROR_BADRESOURCE; } char *new_url = nullsafe_strdup(ul->redirect_url); free_and_nullify(ul->url); free_and_nullify(ul->redirect_url); free_and_nullify(ul->status_line); free_and_nullify(ul->headers); free_and_nullify(ul->request_headers); post_data_free(ul->post_data); ul->post_data = NULL; if (ul->fd >= 0) { close(ul->fd); ul->fd = -1; } // abort further handling of the NPStream if (ul->np_stream) { ul->np_stream->pdata = NULL; ul->np_stream = NULL; } ul->fd = open_temporary_file(); ul->url = new_url; ul->read_pos = 0; ul->method = PP_METHOD_GET; ul->ccb = callback; ul->ccb_ml = ppb_message_loop_get_current(); struct url_loader_open_param_s *p = g_slice_alloc(sizeof(*p)); p->url = ul->url; p->loader = loader; p->instance_id = ul->instance->id; p->method = ul->method; p->request_headers = ul->request_headers; p->custom_referrer_url = ul->custom_referrer_url; p->custom_content_transfer_encoding = ul->custom_content_transfer_encoding; p->custom_user_agent = ul->custom_user_agent; p->target = NULL; p->post_len = 0; p->post_data = NULL; p->m_loop = ppb_message_loop_get_current(); p->depth = ppb_message_loop_get_depth(p->m_loop) + 1; ppb_core_add_ref_resource(loader); // add ref to ensure data in ul remain accessible pp_resource_release(loader); ppb_message_loop_post_work_with_result(p->m_loop, PP_MakeCCB(url_loader_open_comt, p), 0, PP_OK, p->depth, __func__); ppb_message_loop_run_nested(p->m_loop); int retval = p->retval; g_slice_free1(sizeof(*p), p); if (retval != NPERR_NO_ERROR) return PP_ERROR_FAILED; if (callback.func == NULL) { // TODO: remove busy loop int done = 0; while (!done) { ul = pp_resource_acquire(loader, PP_RESOURCE_URL_LOADER); if (ul) { done = ul->finished_loading; pp_resource_release(loader); } else { break; } printf("waitin'\n"); usleep(10000); } return PP_OK; } return PP_OK_COMPLETIONPENDING; }
int32_t ppb_url_loader_open_target(PP_Resource loader, PP_Resource request_info, struct PP_CompletionCallback callback, const char *target) { struct pp_url_loader_s *ul = pp_resource_acquire(loader, PP_RESOURCE_URL_LOADER); if (!ul) { trace_error("%s, bad resource\n", __func__); return PP_ERROR_BADRESOURCE; } struct pp_url_request_info_s *ri = pp_resource_acquire(request_info, PP_RESOURCE_URL_REQUEST_INFO); if (!ri) { trace_error("%s, bad resource\n", __func__); pp_resource_release(loader); return PP_ERROR_BADRESOURCE; } struct PP_Var full_url; if (ri->is_immediate_javascript) { full_url = ppb_var_var_from_utf8_z(ri->url); } else { struct PP_Var rel_url = ppb_var_var_from_utf8_z(ri->url); full_url = ppb_url_util_resolve_relative_to_document(ul->instance->id, rel_url, NULL); ppb_var_release(rel_url); } ul->url = nullsafe_strdup(ppb_var_var_to_utf8(full_url, NULL)); ul->method = ri->method; ul->read_pos = 0; ul->request_headers = nullsafe_strdup(ri->headers); ul->follow_redirects = ri->follow_redirects; ul->stream_to_file = ri->stream_to_file; ul->record_download_progress = ri->record_download_progress; ul->record_upload_progress = ri->record_upload_progress; ul->custom_referrer_url = nullsafe_strdup(ri->custom_referrer_url); ul->allow_cross_origin_requests = ri->allow_cross_origin_requests; ul->allow_credentials = ri->allow_credentials; ul->custom_content_transfer_encoding = nullsafe_strdup(ri->custom_content_transfer_encoding); ul->custom_user_agent = nullsafe_strdup(ri->custom_user_agent); ul->target = nullsafe_strdup(target); #define TRIM_NEWLINE(s) s = trim_nl(s) TRIM_NEWLINE(ul->request_headers); TRIM_NEWLINE(ul->custom_referrer_url); TRIM_NEWLINE(ul->custom_content_transfer_encoding); TRIM_NEWLINE(ul->custom_user_agent); post_data_free(ul->post_data); ul->post_data = post_data_duplicate(ri->post_data); ul->fd = open_temporary_file(); ul->ccb = callback; ul->ccb_ml = ppb_message_loop_get_current(); ppb_var_release(full_url); pp_resource_release(request_info); if (config.quirks.connect_first_loader_to_unrequested_stream) { if (ul->instance->content_url_loader == 0) { ul->instance->content_url_loader = loader; pp_resource_release(loader); return PP_OK_COMPLETIONPENDING; } } struct url_loader_open_param_s *p = g_slice_alloc(sizeof(*p)); p->url = ul->url; p->loader = loader; p->instance_id = ul->instance->id; p->method = ul->method; p->request_headers = ul->request_headers; p->custom_referrer_url = ul->custom_referrer_url; p->custom_content_transfer_encoding = ul->custom_content_transfer_encoding; p->custom_user_agent = ul->custom_user_agent; p->target = ul->target; p->post_data = ul->post_data; p->m_loop = ppb_message_loop_get_current(); p->depth = ppb_message_loop_get_depth(p->m_loop) + 1; ppb_core_add_ref_resource(loader); // add ref to ensure data in ul remain accessible pp_resource_release(loader); ppb_message_loop_post_work_with_result(p->m_loop, PP_MakeCCB(url_loader_open_comt, p), 0, PP_OK, p->depth, __func__); ppb_message_loop_run_nested(p->m_loop); int retval = p->retval; g_slice_free1(sizeof(*p), p); if (retval != NPERR_NO_ERROR) return PP_ERROR_FAILED; if (callback.func == NULL) { // TODO: remove busy loop int done = 0; while (!done) { ul = pp_resource_acquire(loader, PP_RESOURCE_URL_LOADER); if (ul) { done = ul->finished_loading; pp_resource_release(loader); } else { break; } printf("waitin'\n"); usleep(10000); } return PP_OK; } return PP_OK_COMPLETIONPENDING; }