/** * \brief Writes the data and/or spare area of a NANDFLASH page, after calculating an * ECC for the data area and storing it in the spare. If no data buffer is * provided, the ECC is read from the existing page spare. If no spare buffer * is provided, the spare area is still written with the ECC information * calculated on the data buffer. * \param nand Pointer to an EccNandFlash instance. * \param block Number of the block to write in. * \param page Number of the page to write inside the given block. * \param data Data area buffer, can be 0. * \param spare Spare area buffer, can be 0. * \return 0 if successful; otherwise returns an error code. */ static uint8_t ecc_write_page_with_swecc(const struct _nand_flash *nand, uint16_t block, uint16_t page, void *data, void *spare) { uint8_t error; uint8_t hamming[NAND_MAX_SPARE_ECC_BYTES]; uint16_t page_data_size = nand_model_get_page_data_size(&nand->model); uint16_t page_spare_size = nand_model_get_page_spare_size(&nand->model); /* Compute ECC on the new data, if provided */ /* If not provided, hamming code set to 0xFFFF.. to keep existing bytes */ memset(hamming, 0xFF, NAND_MAX_SPARE_ECC_BYTES); if (data) { /* Compute hamming code on data */ hamming_compute_256x(data, page_data_size, hamming); } /* Store code in spare buffer (if no buffer provided, use a temp. one) */ if (!spare) { spare = spare_buf; memset(spare, 0xFF, page_spare_size); } nand_spare_scheme_write_ecc(nand_model_get_scheme(&nand->model), spare, hamming); /* Perform write operation */ error = nand_raw_write_page(nand, block, page, data, spare); if (error) { trace_error("nand_ecc_write_page: Failed to write page\r\n"); return error; } return 0; }
static void call_forceredraw_ptac(void *param) { struct pp_instance_s *pp_i = tables_get_pp_instance(GPOINTER_TO_SIZE(param)); if (!pp_i) { trace_error("%s, bad instance\n", __func__); return; } if (pp_i->is_fullscreen || pp_i->windowed_mode) { XEvent ev = { .xgraphicsexpose = { .type = GraphicsExpose, .drawable = pp_i->is_fullscreen ? pp_i->fs_wnd : pp_i->wnd, .width = pp_i->is_fullscreen ? pp_i->fs_width : pp_i->width, .height = pp_i->is_fullscreen ? pp_i->fs_height : pp_i->height, } }; pthread_mutex_lock(&display.lock); XSendEvent(display.x, ev.xgraphicsexpose.drawable, True, ExposureMask, &ev); XFlush(display.x); pthread_mutex_unlock(&display.lock); } else {
static void set_text_input_type_ptac(void *param) { struct set_text_input_type_param_s *p = param; struct pp_instance_s *pp_i = tables_get_pp_instance(p->instance); if (!pp_i) { trace_error("%s, bad instance\n", __func__); return; } if (pp_i->im_context) gw_gtk_im_context_focus_out(pp_i->im_context); switch (p->type) { case PP_TEXTINPUT_TYPE_DEV_NONE: case PP_TEXTINPUT_TYPE_DEV_PASSWORD: pp_i->im_context = NULL; break; case PP_TEXTINPUT_TYPE_DEV_TEXT: pp_i->im_context = pp_i->im_context_multi; break; default: pp_i->im_context = pp_i->im_context_simple; break; } pp_i->textinput_type = p->type; if (pp_i->im_context) gw_gtk_im_context_focus_in(pp_i->im_context); g_slice_free1(sizeof(*p), p); }
bool p2n_get_property(NPObject *npobj, NPIdentifier name, NPVariant *np_result) { if (!npn.identifierisstring(name)) { trace_error("%s, name is not a string\n", __func__); return false; } if (npobj->_class == &p2n_proxy_class) { struct get_property_param_s *p = g_slice_alloc(sizeof(*p)); p->npobj = npobj; p->name = npn.utf8fromidentifier(name); p->np_result = np_result; p->m_loop = ppb_message_loop_get_for_browser_thread(); p->depth = ppb_message_loop_get_depth(p->m_loop) + 1; ppb_message_loop_post_work_with_result(p->m_loop, PP_MakeCCB(p2n_get_property_prepare_comt, p), 0, PP_OK, 0, __func__); ppb_message_loop_run_nested(p->m_loop); bool result = p->result; npn.memfree(p->name); g_slice_free1(sizeof(*p), p); return result; } else { return npobj->_class->getProperty(npobj, name, np_result); } }
int32_t ppb_host_resolver_resolve(PP_Resource host_resolver, const char *host, uint16_t port, const struct PP_HostResolver_Private_Hint *hint, struct PP_CompletionCallback callback) { struct pp_host_resolver_s *hr = pp_resource_acquire(host_resolver, PP_RESOURCE_HOST_RESOLVER); if (!hr) { trace_error("%s, bad resource\n", __func__); return PP_ERROR_BADRESOURCE; } hr->host = nullsafe_strdup(host); struct async_network_task_s *task = async_network_task_create(); task->type = ASYNC_NETWORK_HOST_RESOLVE; task->resource = host_resolver; task->host = nullsafe_strdup(host); task->port = port; task->callback = callback; task->callback_ml = ppb_message_loop_get_current(); pp_resource_release(host_resolver); async_network_task_push(task); return PP_OK_COMPLETIONPENDING; }
static void report_shader_compile_error(const char *fname, GLuint shader, const char *shader_body) { char *log; GLint log_length = 0; glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &log_length); log = malloc(log_length + 1); if (!log) { trace_error("%s, can't allocate memory\n", __func__); return; } glGetShaderInfoLog(shader, log_length + 1, NULL, log); trace_error("%s, compilation of shader\n%s\nfailed with error: %s\n", fname, shader_body, log); free(log); }
PP_Bool ppb_url_request_info_append_data_to_body(PP_Resource request, const void *data, uint32_t len) { struct pp_url_request_info_s *ri = pp_resource_acquire(request, PP_RESOURCE_URL_REQUEST_INFO); if (!ri) { trace_error("%s, bad resource\n", __func__); return PP_FALSE; } PP_Bool retval = PP_FALSE; struct post_data_item_s pdi = { 0 }; pdi.data = g_memdup(data, len); if (!pdi.data) { retval = PP_FALSE; goto err; } pdi.len = len; g_array_append_val(ri->post_data, pdi); retval = PP_TRUE; err: pp_resource_release(request); return retval; }
IPAddress WiFiClass::getLocalIP() { FILE *fp = NULL; char cmd[128]; uint8_t ipb[4]; IPAddress ip; trace_debug("getLocalIP"); sprintf(cmd, "ifconfig %s | egrep \"inet addr\" | cut -d: -f2- > %s", ARDUINO_WLAN, TMP_PATH); system(cmd); if (NULL == (fp = fopen(TMP_PATH, "r"))) { trace_error("can't open handle to %s", TMP_PATH); return ip; } fscanf(fp, "%s", cmd); /* inet addr */ fclose(fp); trace_debug("my IP=%s", cmd); if(isdigit(cmd[0])) { sscanf(cmd, "%hhd.%hhd.%hhd.%hhd", &ipb[0], &ipb[1], &ipb[2], &ipb[3]); ip._sin.sin_addr.s_addr = ( ((uint32_t)ipb[3])<<24 | \ ((uint32_t)ipb[2])<<16 | ((uint32_t)ipb[1])<<8 | ((uint32_t)ipb[0]) ); trace_debug("returning ip %3d.%3d.%3d.%3d", (ip._sin.sin_addr.s_addr&0x000000FF), (ip._sin.sin_addr.s_addr&0x0000FF00)>>8, (ip._sin.sin_addr.s_addr&0x00FF0000)>>16, (ip._sin.sin_addr.s_addr&0xFF000000)>>24); } else {
void SPIClass::setDataMode(uint8_t mode) { uint8_t linuxSpiMode = 0; switch(mode) { case SPI_MODE0: linuxSpiMode = SPI_MODE0; break; case SPI_MODE1: linuxSpiMode = SPI_MODE1; break; case SPI_MODE2: linuxSpiMode = SPI_MODE2; break; case SPI_MODE3: linuxSpiMode = SPI_MODE3; break; default: trace_error("Invalid SPI mode specified\n"); return; } spi_setmode (this->fd, linuxSpiMode); this->mode = mode; }
void ppb_graphics2d_paint_image_data(PP_Resource graphics_2d, PP_Resource image_data, const struct PP_Point *top_left, const struct PP_Rect *src_rect) { struct pp_graphics2d_s *g2d = pp_resource_acquire(graphics_2d, PP_RESOURCE_GRAPHICS2D); if (!g2d) { trace_error("%s, bad resource\n", __func__); return; } struct g2d_paint_task_s *pt = g_slice_alloc(sizeof(*pt)); pt->type = gpt_paint_id; pp_resource_ref(image_data); pt->image_data = image_data; pt->src_is_set = !!src_rect; if (top_left) { memcpy(&pt->ofs, top_left, sizeof(*top_left)); } else { pt->ofs.x = pt->ofs.y = 0; } if (src_rect) memcpy(&pt->src, src_rect, sizeof(*src_rect)); g2d->task_list = g_list_append(g2d->task_list, pt); pp_resource_release(graphics_2d); }
GArray * post_data_duplicate(GArray *post_data) { GArray *post_data2; if (!post_data) return NULL; post_data2 = post_data_new(); if (!post_data2) return NULL; for (guint k = 0; k < post_data->len; k ++) { struct post_data_item_s *pdi = &g_array_index(post_data, struct post_data_item_s, k); struct post_data_item_s pdi2 = *pdi; if (pdi2.file_ref != 0) { ppb_core_add_ref_resource(pdi2.file_ref); } else { pdi2.data = g_memdup(pdi->data, pdi->len); if (!pdi2.data) { trace_error("%s, can't allocate memory\n", __func__); continue; } } g_array_append_val(post_data2, pdi2); } return post_data2; }
boolean File::check_exists(){ if (!this) { trace_error("Empty or closed file in "); return false; } return true; }
void digitalWrite(register uint8_t pin, register uint8_t val) { uint32_t idx; if (unlikely(pin >= GPIO_TOTAL)) return; if (unlikely(g_APinState[pin].uCurrentInput)) { if (val) { trace_debug("%s: input pin%u driven high: enabling " "pullup", __func__, pin); pinMode(pin, INPUT_PULLUP); } else { trace_error("%s: input pin%u driven low!", __func__, pin); } return; } if (unlikely(g_APinState[pin].uCurrentPwm)) { turnOffPWM(pin); } idx = pinGetIndex(pin); digitalWriteSetVal(idx, pin, val); // alias - enable w/o on Fab D for waggle of pin 20 to compensate for pin 13 error if (unlikely(g_APinDescription[idx].ulGPIOAlias != NONE)){ idx = pinGetIndex(g_APinDescription[idx].ulGPIOAlias); digitalWriteSetVal(idx, g_APinDescription[idx].ulGPIOAlias, val); } //trace_debug("%s: pin=%d, handle=%d, val=%d", __func__, pin, handle, val); }
static int x_error_handler(Display *dpy, XErrorEvent *ee) { trace_error("[NP] caught Xlib error %d\n", ee->error_code); return 0; }
static int32_t get_pp_errno(void) { int retval = PP_ERROR_FAILED; switch (errno) { case EACCES: case EPERM: return PP_ERROR_NOACCESS; case EADDRINUSE: return PP_ERROR_ADDRESS_IN_USE; case ECONNREFUSED: return PP_ERROR_CONNECTION_REFUSED; case ENETUNREACH: return PP_ERROR_ADDRESS_UNREACHABLE; case ETIMEDOUT: return PP_ERROR_CONNECTION_TIMEDOUT; case ENOTCONN: return PP_ERROR_CONNECTION_CLOSED; case ECONNRESET: return PP_ERROR_CONNECTION_RESET; case EAGAIN: case EBADF: return PP_ERROR_FAILED; default: trace_error("%s, no conversion for %d\n", __func__, errno); } return retval; }
static bool _gmac_configure_mdc_clock(Gmac *gmac) { uint32_t mck, clk; mck = pmc_get_peripheral_clock(get_gmac_id_from_addr(gmac)); /* Disable RX/TX */ gmac->GMAC_NCR &= ~(GMAC_NCR_RXEN | GMAC_NCR_TXEN); /* Find divider */ if (mck <= 20000000) { clk = GMAC_NCFGR_CLK_MCK_8; // MCK/8 } else if (mck <= 40000000) { clk = GMAC_NCFGR_CLK_MCK_16; // MCK/16 } else if (mck <= 80000000) { clk = GMAC_NCFGR_CLK_MCK_32; // MCK/32 } else if (mck <= 120000000) { clk = GMAC_NCFGR_CLK_MCK_48; // MCK/48 } else if (mck <= 160000000) { clk = GMAC_NCFGR_CLK_MCK_64; // MCK/64 } else if (mck <= 240000000) { clk = GMAC_NCFGR_CLK_MCK_96; // MCK/96 } else { trace_error("MCK too high, cannot configure MDC clock.\r\n"); return false; } /* configure MDC clock divider and enable RX/TX */ gmac->GMAC_NCFGR = (gmac->GMAC_NCFGR & ~GMAC_NCFGR_CLK_Msk) | clk; gmac->GMAC_NCR |= (GMAC_NCR_RXEN | GMAC_NCR_TXEN); return true; }
void SPIClass::begin() { /* Set the pin mux, for the SCK, MOSI and MISO pins ONLY * * Leave the SS pin in GPIO mode (the application will control it) * but set it's direction to output and initially high */ system("echo on > /sys/devices/pci0000:00/0000:00:07.1/power/control"); //disables SPI power management pinMode(SPI_SS_GPIO_PIN, OUTPUT); digitalWrite(SPI_SS_GPIO_PIN, HIGH); muxSelectSpi(1); if(fd <= 0) { this->fd = open(LINUX_SPIDEV, O_RDWR); if (this->fd < 0) { trace_error("Failed to open SPI device\n"); return; } } /* Load default/last configuration */ this->setClockDivider(this->clkDiv); this->setBitOrder(this->bitOrder); this->setDataMode(this->mode); }
int variantPinMode(uint8_t pin, uint8_t mode) { /* * Standard (sysfs) or fast-mode UIO options are available for some pins * * The pin at this time is set to Fast-mode by default, if available */ int ret = 0; PinDescription *p = NULL; if (pin >= GPIO_TOTAL){ trace_error("%s: invalid pin%u", __func__, pin); return PIN_EINVAL; } /* Search for entry */ p = &g_APinDescription[ardPin2DescIdx[pin]]; /* Alternate entries for Fast-Mode GPIO: enable by default if available */ if (p->pAlternate) { p->iAlternate = 1; trace_debug("%s: enable Fast-Mode SoC GPIO for pin%u", __func__, pin); } return 0; }
static bool n2p_has_property(void *object, struct PP_Var name, struct PP_Var *exception) { if (name.type != PP_VARTYPE_STRING) { trace_error("%s, name is not a string\n", __func__); // TODO: fill exception return false; } struct has_property_param_s *p = g_slice_alloc(sizeof(*p)); p->object = object; p->name = name; p->exception = exception; p->m_loop = ppb_message_loop_get_current(); p->depth = ppb_message_loop_get_depth(p->m_loop) + 1; ppb_message_loop_post_work_with_result(p->m_loop, PP_MakeCCB(n2p_has_property_comt, p), 0, PP_OK, p->depth, __func__); ppb_message_loop_run_nested(p->m_loop); bool result = p->result; g_slice_free1(sizeof(*p), p); return result; }
int inHeaps(ptr_t v, Heap_t** legalHeaps, Bitmap_t** legalStarts) { int i; Heap_t* heap; Bitmap_t* starts; int wordOffset; int validOffset; if(legalHeaps==NULL) /* no check is performed */ return 1; for(i=0; (heap = legalHeaps[i]) != NULL; i++){ if(inHeap(v,heap)){ if(legalStarts==NULL || (starts = legalStarts[i]) == NULL) return 1; wordOffset = ((mem_t)v) - heap->bottom; validOffset = IsSet(starts, wordOffset); if(0 && !validOffset) trace_error("%lx invalid wordOffset %d", (long)v, wordOffset); return validOffset; } } return 0; }
static struct PP_Var n2p_call(void *object, struct PP_Var method_name, uint32_t argc, struct PP_Var *argv, struct PP_Var *exception) { if (method_name.type != PP_VARTYPE_STRING) { trace_error("%s, method_name is not a string\n", __func__); // TODO: fill exception return PP_MakeUndefined(); } struct call_param_s *p = g_slice_alloc(sizeof(*p)); p->object = object; p->method_name = method_name; p->argc = argc; p->argv = argv; p->exception = exception; p->m_loop = ppb_message_loop_get_current(); p->depth = ppb_message_loop_get_depth(p->m_loop) + 1; ppb_message_loop_post_work_with_result(p->m_loop, PP_MakeCCB(n2p_call_comt, p), 0, PP_OK, p->depth, __func__); ppb_message_loop_run_nested(p->m_loop); struct PP_Var result = p->result; g_slice_free1(sizeof(*p), p); return result; }
PP_Bool ppb_url_loader_get_download_progress(PP_Resource loader, int64_t *bytes_received, int64_t *total_bytes_to_be_received) { struct pp_url_loader_s *ul = pp_resource_acquire(loader, PP_RESOURCE_URL_LOADER); if (!ul) { trace_error("%s, bad resource\n", __func__); return PP_FALSE; } *total_bytes_to_be_received = ul->response_size; *bytes_received = 0; if (ul->fd >= 0) { struct stat sb; if (fstat(ul->fd, &sb) != 0) { pp_resource_release(loader); *bytes_received = -1; return PP_FALSE; } *bytes_received = sb.st_size; } pp_resource_release(loader); return PP_TRUE; }
// Schedules task for execution on browser thread. // // Since there is no access to browser event loop, we start a nested event loop which is terminated // as long as there is no tasks left. That way we can implement waiting as entering a nested loop // and thus avoid deadlocks. void ppb_core_call_on_browser_thread(PP_Instance instance, void (*func)(void *), void *user_data) { struct call_on_browser_thread_task_s *task = g_slice_alloc(sizeof(*task)); task->func = func; task->user_data = user_data; // Push task into queue. The only purpose is to put task into queue even if message loop // is currenly terminating (in teardown state), so we are ignoring that. There are three // possible loop states. Message loop is either running, stopped, or terminating. If it's // still running, task will be executed in the context of that loop. If it's stopped or // stopping right now, task will be pushed to a queue. After that code below will schedule // nested loop on browser thread. PP_Resource m_loop = ppb_message_loop_get_for_browser_thread(); ppb_message_loop_post_work_with_result(m_loop, PP_MakeCCB(call_on_browser_thread_comt, task), 0, PP_OK, 0, __func__); struct pp_instance_s *pp_i = instance ? tables_get_pp_instance(instance) : tables_get_some_pp_instance(); if (!pp_i) { trace_error("%s, no alive instance available\n", __func__); return; } // Schedule activation routine. pthread_mutex_lock(&display.lock); if (pp_i->npp) npn.pluginthreadasynccall(pp_i->npp, activate_browser_thread_ml_ptac, user_data); pthread_mutex_unlock(&display.lock); }
int32_t ppb_flash_message_loop_run(PP_Resource flash_message_loop) { struct pp_flash_message_loop_s *fml = pp_resource_acquire(flash_message_loop, PP_RESOURCE_FLASH_MESSAGE_LOOP); if (!fml) { trace_error("%s, bad resource\n", __func__); return PP_ERROR_BADRESOURCE; } PP_Resource message_loop = ppb_message_loop_get_current(); fml->running = 1; fml->message_loop = message_loop; fml->depth = ppb_message_loop_get_depth(message_loop) + 1; pp_resource_ref(flash_message_loop); // prevent destroy of running loop pp_resource_release(flash_message_loop); // launching nested loop without depth increase to prevent hang up of previously pushed tasks ppb_message_loop_run_int(message_loop, ML_NESTED); fml = pp_resource_acquire(flash_message_loop, PP_RESOURCE_FLASH_MESSAGE_LOOP); if (fml) { fml->running = 0; pp_resource_release(flash_message_loop); } pp_resource_unref(flash_message_loop); return PP_OK; }
static enum AVPixelFormat prepare_vaapi_context(struct pp_video_decoder_s *vd, int width, int height) { VAStatus status; vd->va_context.display = display.va; vd->va_context.config_id = VA_INVALID_ID; vd->va_context.context_id = VA_INVALID_ID; // function is called from libavcodec internals which were already protected by mutex status = vaCreateConfig(display.va, VAProfileH264High, VAEntrypointVLD, NULL, 0, &vd->va_context.config_id); if (status != VA_STATUS_SUCCESS) { trace_error("%s, can't create VA config\n", __func__); goto err; } #if VA_CHECK_VERSION(0, 34, 0) status = vaCreateSurfaces(display.va, VA_RT_FORMAT_YUV420, width, height, vd->surfaces, MAX_VIDEO_SURFACES, NULL, 0); #else status = vaCreateSurfaces(display.va, width, height, VA_RT_FORMAT_YUV420, MAX_VIDEO_SURFACES, vd->surfaces); #endif if (status != VA_STATUS_SUCCESS) { trace_error("%s, can't create VA surfaces\n", __func__); goto err; } status = vaCreateContext(display.va, vd->va_context.config_id, width, height, VA_PROGRESSIVE, vd->surfaces, MAX_VIDEO_SURFACES, &vd->va_context.context_id); if (status != VA_STATUS_SUCCESS) { trace_error("%s, can't create VA context\n", __func__); goto err; } vd->avctx->hwaccel_context = &vd->va_context; return AV_PIX_FMT_VAAPI_VLD; err: vd->failed_state = 1; vd->ppp_video_decoder_dev->NotifyError(vd->instance->id, vd->self_id, PP_VIDEODECODERERROR_UNREADABLE_INPUT); return AV_PIX_FMT_NONE; }
NPError NP_Initialize(NPNetscapeFuncs *aNPNFuncs, NPPluginFuncs *aNPPFuncs) { trace_info_f("[NP] %s aNPNFuncs=%p, aNPPFuncs=%p, browser API version = %u\n", __func__, aNPNFuncs, aNPPFuncs, aNPNFuncs->version); // set logging-only error handler. // Ignore a previous one, we have no plans to restore it (void)XSetErrorHandler(x_error_handler); (void)XSetIOErrorHandler(x_io_error_hanlder); memset(&npn, 0, sizeof(npn)); memcpy(&npn, aNPNFuncs, sizeof(npn) < aNPNFuncs->size ? sizeof(npn) : aNPNFuncs->size); NPPluginFuncs pf; memset(&pf, 0, sizeof(NPPluginFuncs)); pf.size = MIN(aNPPFuncs->size, sizeof(NPPluginFuncs)); // browser is supposed to fill .size and .version pf.newp = NPP_New; pf.destroy = NPP_Destroy; pf.setwindow = NPP_SetWindow; pf.newstream = NPP_NewStream; pf.destroystream = NPP_DestroyStream; pf.asfile = NPP_StreamAsFile; pf.writeready = NPP_WriteReady; pf.write = NPP_Write; pf.print = NPP_Print; pf.event = NPP_HandleEvent; pf.urlnotify = NPP_URLNotify; pf.getvalue = NPP_GetValue; pf.setvalue = NPP_SetValue; pf.gotfocus = NPP_GotFocus; pf.lostfocus = NPP_LostFocus; pf.urlredirectnotify = NPP_URLRedirectNotify; pf.clearsitedata = NPP_ClearSiteData; pf.getsiteswithdata = NPP_GetSitesWithData; pf.didComposite = NPP_DidComposite; memcpy(aNPPFuncs, &pf, pf.size); if (tables_open_display() != 0) return NPERR_GENERIC_ERROR; if (aNPNFuncs->version < NPVERS_HAS_PLUGIN_THREAD_ASYNC_CALL) { config.quirks.plugin_missing = 1; config.quirks.incompatible_npapi_version = 1; } load_ppp_module(); int res = call_plugin_init_module(); if (res != 0) { trace_error("%s, PPP_InitializeModule returned %d\n", __func__, res); return NPERR_GENERIC_ERROR; } return NPERR_NO_ERROR; }
int32_t ppb_graphics3d_resize_buffers(PP_Resource context, int32_t width, int32_t height) { if (width < 0 || height < 0) { trace_error("%s, width or height are negative\n", __func__); return PP_ERROR_BADARGUMENT; } struct pp_graphics3d_s *g3d = pp_resource_acquire(context, PP_RESOURCE_GRAPHICS3D); if (!g3d) { trace_error("%s, bad resource\n", __func__); return PP_ERROR_BADRESOURCE; } g3d->width = width; g3d->height = height; GLXPixmap old_glx_pixmap = g3d->glx_pixmap; Pixmap old_pixmap = g3d->pixmap; Picture old_pict = g3d->xr_pict; // release possibly bound to other thread g3d->glx_pixmap and bind it to the current one pthread_mutex_lock(&display.lock); glXMakeCurrent(display.x, g3d->glx_pixmap, g3d->glc); g3d->pixmap = XCreatePixmap(display.x, DefaultRootWindow(display.x), g3d->width, g3d->height, g3d->depth); g3d->glx_pixmap = glXCreatePixmap(display.x, g3d->fb_config, g3d->pixmap, NULL); XFlush(display.x); g3d->xr_pict = XRenderCreatePicture(display.x, g3d->pixmap, g3d->xr_pictfmt, 0, 0); // make new g3d->glx_pixmap current to the current thread to allow releasing old_glx_pixmap glXMakeCurrent(display.x, g3d->glx_pixmap, g3d->glc); // clear surface glClearColor(0.0, 0.0, 0.0, 1.0); glClear(GL_COLOR_BUFFER_BIT); // destroy previous glx and x pixmaps glXDestroyPixmap(display.x, old_glx_pixmap); XRenderFreePicture(display.x, old_pict); XFreePixmap(display.x, old_pixmap); pthread_mutex_unlock(&display.lock); pp_resource_release(context); return PP_OK; }
int32_t ppb_video_decoder_decode(PP_Resource video_decoder, const struct PP_VideoBitstreamBuffer_Dev *bitstream_buffer, struct PP_CompletionCallback callback) { struct pp_video_decoder_s *vd = pp_resource_acquire(video_decoder, PP_RESOURCE_VIDEO_DECODER); if (!vd) { trace_error("%s, bad resource\n", __func__); return PP_ERROR_BADRESOURCE; } if (vd->failed_state) { trace_warning("%s, there were errors before, giving up\n", __func__); pp_resource_release(video_decoder); return PP_ERROR_FAILED; } void *rawdata = ppb_buffer_map(bitstream_buffer->data); if (!rawdata) { trace_error("%s, bad bitstream buffer\n", __func__); pp_resource_release(video_decoder); return PP_ERROR_FAILED; } uint8_t *inbuf = rawdata; size_t inbuf_sz = bitstream_buffer->size; while (inbuf_sz > 0) { uint8_t *outbuf = NULL; int outbuf_sz = 0; int len = av_parser_parse2(vd->avparser, vd->avctx, &outbuf, &outbuf_sz, inbuf, inbuf_sz, 0, 0, AV_NOPTS_VALUE); if (outbuf_sz > 0) decode_frame(vd, outbuf, outbuf_sz, vd->last_consumed_bitstream_buffer_id); inbuf += len; inbuf_sz -= len; } vd->last_consumed_bitstream_buffer_id = bitstream_buffer->id; ppb_buffer_unmap(bitstream_buffer->data); pp_resource_release(video_decoder); ppb_core_call_on_main_thread(0, callback, PP_OK); return PP_OK_COMPLETIONPENDING; }
PP_Bool ppb_wheel_input_event_get_scroll_by_page(PP_Resource wheel_event) { struct pp_input_event_s *ie = pp_resource_acquire(wheel_event, PP_RESOURCE_INPUT_EVENT); if (!ie) { trace_error("%s, bad resource\n", __func__); return PP_FALSE; } if (ie->event_class != PP_INPUTEVENT_CLASS_WHEEL) { trace_error("%s, not a wheel event\n", __func__); pp_resource_release(wheel_event); return PP_FALSE; } PP_Bool ret = ie->scroll_by_page; pp_resource_release(wheel_event); return ret; }
uint32_t ppb_keyboard_input_event_get_key_code(PP_Resource key_event) { struct pp_input_event_s *ie = pp_resource_acquire(key_event, PP_RESOURCE_INPUT_EVENT); if (!ie) { trace_error("%s, bad resource\n", __func__); return 0; } if (ie->event_class != PP_INPUTEVENT_CLASS_KEYBOARD) { trace_error("%s, not a keyboard event\n", __func__); pp_resource_release(key_event); return 0; } uint32_t key_code = ie->key_code; pp_resource_release(key_event); return key_code; }