struct nvhost_hwctx_handler *nvhost_gr3d_t114_ctxhandler_init( u32 syncpt, u32 waitbase, struct nvhost_channel *ch) { struct mem_mgr *memmgr; u32 *save_ptr; struct host1x_hwctx_handler *p; p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return NULL; memmgr = nvhost_get_host(ch->dev)->memmgr; p->h.syncpt = syncpt; p->h.waitbase = waitbase; setup_save(p, NULL); p->save_buf = nvhost_memmgr_alloc(memmgr, p->save_size * 4, 32, mem_mgr_flag_write_combine); if (IS_ERR(p->save_buf)) goto fail_alloc; save_ptr = nvhost_memmgr_mmap(p->save_buf); if (!save_ptr) goto fail_mmap; p->save_sgt = nvhost_memmgr_pin(memmgr, p->save_buf, &ch->dev->dev); if (IS_ERR(p->save_sgt)) goto fail_pin; p->save_phys = sg_dma_address(p->save_sgt->sgl); setup_save(p, save_ptr); nvhost_memmgr_munmap(p->save_buf, save_ptr); p->save_slots = 5; p->h.alloc = ctx3d_alloc_v1; p->h.save_push = save_push_v1; p->h.restore_push = nvhost_3dctx_restore_push; p->h.save_service = NULL; p->h.get = nvhost_3dctx_get; p->h.put = nvhost_3dctx_put; return &p->h; fail_pin: nvhost_memmgr_munmap(p->save_buf, save_ptr); fail_mmap: nvhost_memmgr_put(memmgr, p->save_buf); fail_alloc: kfree(p); return NULL; }
struct nvhost_hwctx_handler *nvhost_gr3d_t20_ctxhandler_init( u32 syncpt, u32 waitbase, struct nvhost_channel *ch) { struct mem_mgr *memmgr; u32 *save_ptr; struct host1x_hwctx_handler *p; p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return NULL; memmgr = nvhost_get_host(ch->dev)->memmgr; p->syncpt = syncpt; p->waitbase = waitbase; setup_save(p, NULL); p->save_buf = mem_op().alloc(memmgr, p->save_size * sizeof(u32), 32, mem_mgr_flag_write_combine); if (IS_ERR_OR_NULL(p->save_buf)) goto fail_alloc; save_ptr = mem_op().mmap(p->save_buf); if (IS_ERR_OR_NULL(save_ptr)) goto fail_mmap; p->save_sgt = mem_op().pin(memmgr, p->save_buf); if (IS_ERR_OR_NULL(p->save_sgt)) goto fail_pin; p->save_phys = sg_dma_address(p->save_sgt->sgl); setup_save(p, save_ptr); mem_op().munmap(p->save_buf, save_ptr); p->save_slots = 1; p->h.alloc = ctx3d_alloc_v0; p->h.save_push = save_push_v0; p->h.save_service = ctx3d_save_service; p->h.get = nvhost_3dctx_get; p->h.put = nvhost_3dctx_put; return &p->h; fail_pin: mem_op().munmap(p->save_buf, save_ptr); fail_mmap: mem_op().put(memmgr, p->save_buf); fail_alloc: kfree(p); return NULL; }
struct nvhost_hwctx_handler *nvhost_gr3d_t30_ctxhandler_init( u32 syncpt, u32 waitbase, struct nvhost_channel *ch) { struct nvmap_client *nvmap; u32 *save_ptr; struct host1x_hwctx_handler *p; p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return NULL; nvmap = nvhost_get_host(ch->dev)->nvmap; register_sets = tegra_gpu_register_sets(); BUG_ON(register_sets == 0 || register_sets > 2); p->syncpt = syncpt; p->waitbase = waitbase; setup_save(p, NULL); p->save_buf = nvmap_alloc(nvmap, p->save_size * 4, 32, NVMAP_HANDLE_WRITE_COMBINE, 0); if (IS_ERR(p->save_buf)) { p->save_buf = NULL; return NULL; } p->save_slots = 6; if (register_sets == 2) p->save_slots += 2; save_ptr = nvmap_mmap(p->save_buf); if (!save_ptr) { nvmap_free(nvmap, p->save_buf); p->save_buf = NULL; return NULL; } p->save_phys = nvmap_pin(nvmap, p->save_buf); setup_save(p, save_ptr); p->h.alloc = ctx3d_alloc_v1; p->h.save_push = save_push_v1; p->h.save_service = NULL; p->h.get = nvhost_3dctx_get; p->h.put = nvhost_3dctx_put; return &p->h; }
struct nvhost_hwctx_handler *nvhost_gr3d_t30_ctxhandler_init( u32 syncpt, u32 waitbase, struct nvhost_channel *ch) { struct mem_mgr *memmgr; u32 *save_ptr; struct host1x_hwctx_handler *p; p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return NULL; memmgr = nvhost_get_host(ch->dev)->memmgr; p->syncpt = syncpt; p->waitbase = waitbase; setup_save(p, NULL); p->save_buf = mem_op().alloc(memmgr, p->save_size * 4, 32, mem_mgr_flag_write_combine); if (IS_ERR_OR_NULL(p->save_buf)) { p->save_buf = NULL; return NULL; } p->save_slots = 8; save_ptr = mem_op().mmap(p->save_buf); if (!save_ptr) { mem_op().put(memmgr, p->save_buf); p->save_buf = NULL; return NULL; } p->save_phys = mem_op().pin(memmgr, p->save_buf); setup_save(p, save_ptr); mem_op().munmap(p->save_buf, save_ptr); p->h.alloc = ctx3d_alloc_v1; p->h.save_push = save_push_v1; p->h.save_service = NULL; p->h.get = nvhost_3dctx_get; p->h.put = nvhost_3dctx_put; return &p->h; }
void calibration_entry(void* parameter) { rt_device_t device; struct rtgui_rect rect; struct setup_items setup; device = rt_device_find("touch"); if (device == RT_NULL) return; /* no this device */ calibration_ptr = (struct calibration_session*) rt_malloc(sizeof(struct calibration_session)); rt_memset(calibration_ptr, 0, sizeof(struct calibration_data)); calibration_ptr->device = device; rt_device_control(calibration_ptr->device, RT_TOUCH_CALIBRATION, (void*)calibration_data_post); rtgui_graphic_driver_get_rect(rtgui_graphic_driver_get_default(), &rect); /* set screen rect */ calibration_ptr->width = rect.x2; calibration_ptr->height = rect.y2; calibration_ptr->app = rtgui_app_create("calibration"); if (calibration_ptr->app != RT_NULL) { /* create calibration window */ calibration_ptr->win = rtgui_win_create(RT_NULL, "calibration", &rect, RTGUI_WIN_STYLE_NO_TITLE | RTGUI_WIN_STYLE_NO_BORDER | RTGUI_WIN_STYLE_ONTOP | RTGUI_WIN_STYLE_DESTROY_ON_CLOSE); if (calibration_ptr->win != RT_NULL) { rtgui_object_set_event_handler(RTGUI_OBJECT(calibration_ptr->win), calibration_event_handler); rtgui_win_show(calibration_ptr->win, RT_TRUE); } rtgui_app_destroy(calibration_ptr->app); } /* set calibration data */ rt_device_control(calibration_ptr->device, RT_TOUCH_CALIBRATION_DATA, &calibration_ptr->data); //save setup setup.touch_min_x = calibration_ptr->data.min_x; setup.touch_max_x = calibration_ptr->data.max_x; setup.touch_min_y = calibration_ptr->data.min_y; setup.touch_max_y = calibration_ptr->data.max_y; setup_save(&setup); /* recover to normal */ rt_device_control(calibration_ptr->device, RT_TOUCH_NORMAL, RT_NULL); /* release memory */ rt_free(calibration_ptr); calibration_ptr = RT_NULL; }
int __init nvhost_3dctx_handler_init(struct nvhost_hwctx_handler *h) { setup_save(NULL, &context_save_size, &context_restore_size, 0, 0); context_save_buf = nvmap_alloc(context_save_size * 4, 32, NVMEM_HANDLE_WRITE_COMBINE, (void**)&context_save_ptr); if (IS_ERR_OR_NULL(context_save_buf)) return PTR_ERR(context_save_buf); context_save_phys = nvmap_pin_single(context_save_buf); setup_save(context_save_ptr, NULL, NULL, NVSYNCPT_3D, NVWAITBASE_3D); h->init = ctx3d_init; h->deinit = ctx3d_deinit; h->save_service = ctx3d_save_service; return 0; }
int __init nvhost_gr3d_t30_ctxhandler_init(struct nvhost_hwctx_handler *h) { struct nvhost_channel *ch; struct nvmap_client *nvmap; u32 *save_ptr; ch = container_of(h, struct nvhost_channel, ctxhandler); nvmap = ch->dev->nvmap; register_sets = tegra_gpu_register_sets(); BUG_ON(register_sets == 0 || register_sets > 2); setup_save(NULL); nvhost_3dctx_save_buf = nvmap_alloc(nvmap, save_size * 4, 32, NVMAP_HANDLE_WRITE_COMBINE); if (IS_ERR(nvhost_3dctx_save_buf)) { int err = PTR_ERR(nvhost_3dctx_save_buf); nvhost_3dctx_save_buf = NULL; return err; } nvhost_3dctx_save_slots = 6; if (register_sets == 2) nvhost_3dctx_save_slots += 2; save_ptr = nvmap_mmap(nvhost_3dctx_save_buf); if (!save_ptr) { nvmap_free(nvmap, nvhost_3dctx_save_buf); nvhost_3dctx_save_buf = NULL; return -ENOMEM; } save_phys = nvmap_pin(nvmap, nvhost_3dctx_save_buf); setup_save(save_ptr); h->alloc = ctx3d_alloc_v1; h->save_push = save_push_v1; h->save_service = NULL; h->get = nvhost_3dctx_get; h->put = nvhost_3dctx_put; return 0; }
static void setup_load_default(void) { struct setup_items setup; rt_kprintf("setup_load_default!\r\n"); setup.touch_min_x = 0x7bd; setup.touch_max_x = 0x20; setup.touch_min_y = 0x53; setup.touch_max_y = 0x79b; setup_save(&setup); }