static void nv04_surface_copy_cpu(struct gl_context *ctx, struct nouveau_surface *dst, struct nouveau_surface *src, int dx, int dy, int sx, int sy, int w, int h) { int x, y; get_offset_t get_dst = (dst->layout == SWIZZLED ? get_swizzled_offset : get_linear_offset); get_offset_t get_src = (src->layout == SWIZZLED ? get_swizzled_offset : get_linear_offset); void *dp, *sp; nouveau_bo_map(dst->bo, NOUVEAU_BO_WR, context_client(ctx)); nouveau_bo_map(src->bo, NOUVEAU_BO_RD, context_client(ctx)); dp = dst->bo->map + dst->offset; sp = src->bo->map + src->offset; for (y = 0; y < h; y++) { for (x = 0; x < w; x++) { memcpy(dp + get_dst(dst, dx + x, dy + y), sp + get_src(src, sx + x, sy + y), dst->cpp); } } }
void * nouveau_resource_map_offset(struct nouveau_context *nv, struct nv04_resource *res, uint32_t offset, uint32_t flags) { if ((res->domain == NOUVEAU_BO_VRAM) && (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING)) nouveau_buffer_download(nv, res, 0, res->base.width0); if ((res->domain != NOUVEAU_BO_GART) || (res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY)) return res->data + offset; if (res->mm) { unsigned rw; rw = (flags & NOUVEAU_BO_WR) ? PIPE_TRANSFER_WRITE : PIPE_TRANSFER_READ; nouveau_buffer_sync(res, rw); if (nouveau_bo_map(res->bo, 0, NULL)) return NULL; } else { if (nouveau_bo_map(res->bo, flags, nv->screen->client)) return NULL; } return (uint8_t *)res->bo->map + res->offset + offset; }
int nv84_fence_create(struct nouveau_drm *drm) { struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); struct nv84_fence_priv *priv; int ret; priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->base.dtor = nv84_fence_destroy; priv->base.suspend = nv84_fence_suspend; priv->base.resume = nv84_fence_resume; priv->base.context_new = nv84_fence_context_new; priv->base.context_del = nv84_fence_context_del; #ifdef __NetBSD__ spin_lock_init(&priv->base.waitlock); DRM_INIT_WAITQUEUE(&priv->base.waitqueue, "nvfenceq"); #else init_waitqueue_head(&priv->base.waiting); #endif priv->base.uevent = true; ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0, TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo); if (ret == 0) { ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); if (ret == 0) { ret = nouveau_bo_map(priv->bo); if (ret) nouveau_bo_unpin(priv->bo); } if (ret) nouveau_bo_ref(NULL, &priv->bo); } if (ret == 0) ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0, TTM_PL_FLAG_TT, 0, 0, NULL, &priv->bo_gart); if (ret == 0) { ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT); if (ret == 0) { ret = nouveau_bo_map(priv->bo_gart); if (ret) nouveau_bo_unpin(priv->bo_gart); } if (ret) nouveau_bo_ref(NULL, &priv->bo_gart); } if (ret) nv84_fence_destroy(drm); return ret; }
static struct nouveau_bo * nouveau_channel_user_pushbuf_alloc(struct drm_device *dev) { struct nouveau_bo *pushbuf = NULL; int location, ret; if (nouveau_vram_pushbuf) location = TTM_PL_FLAG_VRAM; else location = TTM_PL_FLAG_TT; ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false, true, &pushbuf); if (ret) { NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret); return NULL; } ret = nouveau_bo_pin(pushbuf, location); if (ret) { NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret); nouveau_bo_ref(NULL, &pushbuf); return NULL; } ret = nouveau_bo_map(pushbuf); if (ret) { nouveau_bo_unpin(pushbuf); nouveau_bo_ref(NULL, &pushbuf); return NULL; } return pushbuf; }
/* Allocate an extra bo if we can't fit everything we need simultaneously. * (Could happen for very large user arrays.) */ static inline bool nouveau_scratch_runout(struct nouveau_context *nv, unsigned size) { int ret; unsigned n; if (nv->scratch.runout) n = nv->scratch.runout->nr; else n = 0; nv->scratch.runout = REALLOC(nv->scratch.runout, n == 0 ? 0 : (sizeof(*nv->scratch.runout) + (n + 0) * sizeof(void *)), sizeof(*nv->scratch.runout) + (n + 1) * sizeof(void *)); nv->scratch.runout->nr = n + 1; nv->scratch.runout->bo[n] = NULL; ret = nouveau_scratch_bo_alloc(nv, &nv->scratch.runout->bo[n], size); if (!ret) { ret = nouveau_bo_map(nv->scratch.runout->bo[n], 0, NULL); if (ret) nouveau_bo_ref(NULL, &nv->scratch.runout->bo[--nv->scratch.runout->nr]); } if (!ret) { nv->scratch.current = nv->scratch.runout->bo[n]; nv->scratch.offset = 0; nv->scratch.end = size; nv->scratch.map = nv->scratch.current->map; } return !ret; }
static bool nv50_hw_query_allocate(struct nv50_context *nv50, struct nv50_query *q, int size) { struct nv50_screen *screen = nv50->screen; struct nv50_hw_query *hq = nv50_hw_query(q); int ret; if (hq->bo) { nouveau_bo_ref(NULL, &hq->bo); if (hq->mm) { if (hq->state == NV50_HW_QUERY_STATE_READY) nouveau_mm_free(hq->mm); else nouveau_fence_work(screen->base.fence.current, nouveau_mm_free_work, hq->mm); } } if (size) { hq->mm = nouveau_mm_allocate(screen->base.mm_GART, size, &hq->bo, &hq->base_offset); if (!hq->bo) return false; hq->offset = hq->base_offset; ret = nouveau_bo_map(hq->bo, 0, screen->base.client); if (ret) { nv50_hw_query_allocate(nv50, q, 0); return false; } hq->data = (uint32_t *)((uint8_t *)hq->bo->map + hq->base_offset); } return true; }
void NVRefreshArea(ScrnInfoPtr pScrn, int num, BoxPtr pbox) { NVPtr pNv = NVPTR(pScrn); int x1, y1, x2, y2, width, height, cpp, FBPitch; unsigned char *src, *dst; cpp = pScrn->bitsPerPixel >> 3; FBPitch = pScrn->displayWidth * cpp; nouveau_bo_map(pNv->scanout, NOUVEAU_BO_WR, pNv->client); while(num--) { x1 = MAX(pbox->x1, 0); y1 = MAX(pbox->y1, 0); x2 = MIN(pbox->x2, pScrn->virtualX); y2 = MIN(pbox->y2, pScrn->virtualY); width = (x2 - x1) * cpp; height = y2 - y1; if (width > 0 && height > 0) { src = pNv->ShadowPtr + (y1 * pNv->ShadowPitch) + (x1 * cpp); dst = pNv->scanout->map + (y1 * FBPitch) + (x1 * cpp); while(height--) { memcpy(dst, src, width); dst += FBPitch; src += pNv->ShadowPitch; } } pbox++; } }
/* Maybe just migrate to GART right away if we actually need to do this. */ boolean nouveau_buffer_download(struct nouveau_context *nv, struct nv04_resource *buf, unsigned start, unsigned size) { struct nouveau_mm_allocation *mm; struct nouveau_bo *bounce = NULL; uint32_t offset; assert(buf->domain == NOUVEAU_BO_VRAM); mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset); if (!bounce) return FALSE; nv->copy_data(nv, bounce, offset, NOUVEAU_BO_GART, buf->bo, buf->offset + start, NOUVEAU_BO_VRAM, size); if (nouveau_bo_map(bounce, NOUVEAU_BO_RD, nv->screen->client)) return FALSE; memcpy(buf->data + start, (uint8_t *)bounce->map + offset, size); buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING; nouveau_bo_ref(NULL, &bounce); if (mm) nouveau_mm_free(mm); return TRUE; }
static GLboolean nouveau_bufferobj_data(GLcontext *ctx, GLenum target, GLsizeiptrARB size, const GLvoid *data, GLenum usage, struct gl_buffer_object *obj) { struct nouveau_bufferobj *nbo = to_nouveau_bufferobj(obj); int ret; obj->Size = size; obj->Usage = usage; nouveau_bo_ref(NULL, &nbo->bo); ret = nouveau_bo_new(context_dev(ctx), NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0, size, &nbo->bo); assert(!ret); if (data) { nouveau_bo_map(nbo->bo, NOUVEAU_BO_WR); memcpy(nbo->bo->map, data, size); nouveau_bo_unmap(nbo->bo); } return GL_TRUE; }
static void nv50_compute_upload_input(struct nv50_context *nv50, const uint32_t *input) { struct nv50_screen *screen = nv50->screen; struct nouveau_pushbuf *push = screen->base.pushbuf; unsigned size = align(nv50->compprog->parm_size, 0x4); BEGIN_NV04(push, NV50_COMPUTE(USER_PARAM_COUNT), 1); PUSH_DATA (push, (size / 4) << 8); if (size) { struct nouveau_mm_allocation *mm; struct nouveau_bo *bo = NULL; unsigned offset; mm = nouveau_mm_allocate(screen->base.mm_GART, size, &bo, &offset); assert(mm); nouveau_bo_map(bo, 0, screen->base.client); memcpy(bo->map + offset, input, size); nouveau_bufctx_refn(nv50->bufctx, 0, bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD); nouveau_pushbuf_bufctx(push, nv50->bufctx); nouveau_pushbuf_validate(push); BEGIN_NV04(push, NV50_COMPUTE(USER_PARAM(0)), size / 4); nouveau_pushbuf_data(push, bo, offset, size); nouveau_fence_work(screen->base.fence.current, nouveau_mm_free_work, mm); nouveau_bo_ref(NULL, &bo); nouveau_bufctx_reset(nv50->bufctx, 0); } }
static uint8_t * nouveau_transfer_staging(struct nouveau_context *nv, struct nouveau_transfer *tx, boolean permit_pb) { const unsigned adj = tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK; const unsigned size = align(tx->base.box.width, 4) + adj; if (!nv->push_data) permit_pb = FALSE; if ((size <= NOUVEAU_TRANSFER_PUSHBUF_THRESHOLD) && permit_pb) { tx->map = align_malloc(size, NOUVEAU_MIN_BUFFER_MAP_ALIGN); if (tx->map) tx->map += adj; } else { tx->mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &tx->bo, &tx->offset); if (tx->bo) { tx->offset += adj; if (!nouveau_bo_map(tx->bo, 0, NULL)) tx->map = (uint8_t *)tx->bo->map + tx->offset; } } return tx->map; }
static Bool nouveau_exa_download_from_screen(PixmapPtr pspix, int x, int y, int w, int h, char *dst, int dst_pitch) { ScrnInfoPtr pScrn = xf86Screens[pspix->drawable.pScreen->myNum]; NVPtr pNv = NVPTR(pScrn); struct nouveau_bo *bo; int src_pitch, cpp, offset; const char *src; Bool ret; src_pitch = exaGetPixmapPitch(pspix); cpp = pspix->drawable.bitsPerPixel >> 3; offset = (y * src_pitch) + (x * cpp); if (pNv->GART) { if (pNv->Architecture >= NV_ARCH_C0) { if (NVC0AccelDownloadM2MF(pspix, x, y, w, h, dst, dst_pitch)) return TRUE; } else { if (NVAccelDownloadM2MF(pspix, x, y, w, h, dst, dst_pitch)) return TRUE; } } bo = nouveau_pixmap_bo(pspix); if (nouveau_bo_map(bo, NOUVEAU_BO_RD)) return FALSE; src = (char *)bo->map + offset; ret = NVAccelMemcpyRect(dst, src, h, dst_pitch, src_pitch, w*cpp); nouveau_bo_unmap(bo); return ret; }
static void nouveau_renderbuffer_map(struct gl_context *ctx, struct gl_renderbuffer *rb, GLuint x, GLuint y, GLuint w, GLuint h, GLbitfield mode, GLubyte **out_map, GLint *out_stride) { struct nouveau_surface *s = &to_nouveau_renderbuffer(rb)->surface; GLubyte *map; int stride; int flags = 0; if (mode & GL_MAP_READ_BIT) flags |= NOUVEAU_BO_RD; if (mode & GL_MAP_WRITE_BIT) flags |= NOUVEAU_BO_WR; nouveau_bo_map(s->bo, flags); map = s->bo->map; stride = s->pitch; if (rb->Name == 0) { map += stride * (rb->Height - 1); stride = -stride; } map += x * s->cpp; map += (int)y * stride; *out_map = map; *out_stride = stride; }
/* Allocate an extra bo if we can't fit everything we need simultaneously. * (Could happen for very large user arrays.) */ static INLINE boolean nouveau_scratch_runout(struct nouveau_context *nv, unsigned size) { int ret; const unsigned n = nv->scratch.nr_runout++; nv->scratch.runout = REALLOC(nv->scratch.runout, (n + 0) * sizeof(*nv->scratch.runout), (n + 1) * sizeof(*nv->scratch.runout)); nv->scratch.runout[n] = NULL; ret = nouveau_scratch_bo_alloc(nv, &nv->scratch.runout[n], size); if (!ret) { ret = nouveau_bo_map(nv->scratch.runout[n], 0, NULL); if (ret) nouveau_bo_ref(NULL, &nv->scratch.runout[--nv->scratch.nr_runout]); } if (!ret) { nv->scratch.current = nv->scratch.runout[n]; nv->scratch.offset = 0; nv->scratch.end = size; nv->scratch.map = nv->scratch.current->map; } return !ret; }
/* Continue to next scratch buffer, if available (no wrapping, large enough). * Allocate it if it has not yet been created. */ static INLINE boolean nouveau_scratch_next(struct nouveau_context *nv, unsigned size) { struct nouveau_bo *bo; int ret; const unsigned i = (nv->scratch.id + 1) % NOUVEAU_MAX_SCRATCH_BUFS; if ((size > nv->scratch.bo_size) || (i == nv->scratch.wrap)) return FALSE; nv->scratch.id = i; bo = nv->scratch.bo[i]; if (!bo) { ret = nouveau_scratch_bo_alloc(nv, &bo, nv->scratch.bo_size); if (ret) return FALSE; nv->scratch.bo[i] = bo; } nv->scratch.current = bo; nv->scratch.offset = 0; nv->scratch.end = nv->scratch.bo_size; ret = nouveau_bo_map(bo, NOUVEAU_BO_WR, nv->screen->client); if (!ret) nv->scratch.map = bo->map; return !ret; }
static void renderbuffer_map_unmap(struct gl_renderbuffer *rb, GLboolean map) { struct nouveau_surface *s = &to_nouveau_renderbuffer(rb)->surface; if (map) { switch (rb->Format) { case MESA_FORMAT_RGB565: nouveau_InitPointers_rgb565(rb); break; case MESA_FORMAT_XRGB8888: nouveau_InitPointers_rgb888(rb); break; case MESA_FORMAT_ARGB8888: nouveau_InitPointers_argb8888(rb); break; case MESA_FORMAT_Z16: nouveau_InitDepthPointers_z16(rb); break; case MESA_FORMAT_Z24_S8: nouveau_InitDepthPointers_z24s8(rb); break; default: assert(0); } nouveau_bo_map(s->bo, NOUVEAU_BO_RDWR); } else { nouveau_bo_unmap(s->bo); } }
static boolean nv50_query_allocate(struct nv50_context *nv50, struct nv50_query *q, int size) { struct nv50_screen *screen = nv50->screen; int ret; if (q->bo) { nouveau_bo_ref(NULL, &q->bo); if (q->mm) { if (q->ready) nouveau_mm_free(q->mm); else nouveau_fence_work(screen->base.fence.current, nouveau_mm_free_work, q->mm); } } if (size) { q->mm = nouveau_mm_allocate(screen->base.mm_GART, size, &q->bo, &q->base); if (!q->bo) return FALSE; q->offset = q->base; ret = nouveau_bo_map(q->bo, 0, screen->base.client); if (ret) { nv50_query_allocate(nv50, q, 0); return FALSE; } q->data = (uint32_t *)((uint8_t *)q->bo->map + q->base); } return TRUE; }
static void renderbuffer_map_unmap(struct gl_context *ctx, struct gl_renderbuffer *rb, GLboolean map) { struct nouveau_surface *s = &to_nouveau_renderbuffer(rb)->surface; if (map) nouveau_bo_map(s->bo, NOUVEAU_BO_RDWR, context_client(ctx)); }
/* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */ boolean nouveau_buffer_migrate(struct nouveau_context *nv, struct nv04_resource *buf, const unsigned new_domain) { struct nouveau_screen *screen = nv->screen; struct nouveau_bo *bo; const unsigned old_domain = buf->domain; unsigned size = buf->base.width0; unsigned offset; int ret; assert(new_domain != old_domain); if (new_domain == NOUVEAU_BO_GART && old_domain == 0) { if (!nouveau_buffer_allocate(screen, buf, new_domain)) return FALSE; ret = nouveau_bo_map(buf->bo, 0, nv->screen->client); if (ret) return ret; memcpy((uint8_t *)buf->bo->map + buf->offset, buf->data, size); FREE(buf->data); } else if (old_domain != 0 && new_domain != 0) { struct nouveau_mm_allocation *mm = buf->mm; if (new_domain == NOUVEAU_BO_VRAM) { /* keep a system memory copy of our data in case we hit a fallback */ if (!nouveau_buffer_data_fetch(nv, buf, buf->bo, buf->offset, size)) return FALSE; if (nouveau_mesa_debug) debug_printf("migrating %u KiB to VRAM\n", size / 1024); } offset = buf->offset; bo = buf->bo; buf->bo = NULL; buf->mm = NULL; nouveau_buffer_allocate(screen, buf, new_domain); nv->copy_data(nv, buf->bo, buf->offset, new_domain, bo, offset, old_domain, buf->base.width0); nouveau_bo_ref(NULL, &bo); if (mm) release_allocation(&mm, screen->fence.current); } else if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) { if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM)) return FALSE; if (!nouveau_buffer_upload(nv, buf, 0, buf->base.width0)) return FALSE; } else return FALSE; assert(buf->domain == new_domain); return TRUE; }
static char * prep(void *hdl) { struct nouveau_bo *bo = hdl; int ret = nouveau_bo_map(bo, NOUVEAU_BO_RDWR); if (ret) { ERROR_MSG("nouveau_bo_map failed: %d", ret); return NULL; } return bo->map; }
static int nouveau_vpe_init(struct nouveau_decoder *dec) { int ret; if (dec->cmds) return 0; ret = nouveau_bo_map(dec->cmd_bo, NOUVEAU_BO_RDWR, dec->client); if (ret) { debug_printf("Mapping cmd bo: %s\n", strerror(-ret)); return ret; } ret = nouveau_bo_map(dec->data_bo, NOUVEAU_BO_RDWR, dec->client); if (ret) { debug_printf("Mapping data bo: %s\n", strerror(-ret)); return ret; } dec->cmds = dec->cmd_bo->map; dec->data = dec->data_bo->map; return ret; }
static void renderbuffer_map_unmap(struct gl_renderbuffer *rb, GLboolean map) { struct nouveau_surface *s = &to_nouveau_renderbuffer(rb)->surface; if (map) { nouveau_bo_map(s->bo, NOUVEAU_BO_RDWR); } else { nouveau_bo_unmap(s->bo); } }
static INLINE boolean nouveau_buffer_data_fetch(struct nouveau_context *nv, struct nv04_resource *buf, struct nouveau_bo *bo, unsigned offset, unsigned size) { if (!nouveau_buffer_malloc(buf)) return FALSE; if (nouveau_bo_map(bo, NOUVEAU_BO_RD, nv->client)) return FALSE; memcpy(buf->data, (uint8_t *)bo->map + offset, size); return TRUE; }
static void nv30_transfer_rect_cpu(XFER_ARGS) { get_ptr_t sp = get_ptr(src); get_ptr_t dp = get_ptr(dst); char *srcmap, *dstmap; int x, y; nouveau_bo_map(src->bo, NOUVEAU_BO_RD, nv30->base.client); nouveau_bo_map(dst->bo, NOUVEAU_BO_WR, nv30->base.client); srcmap = src->bo->map + src->offset; dstmap = dst->bo->map + dst->offset; for (y = 0; y < (dst->y1 - dst->y0); y++) { for (x = 0; x < (dst->x1 - dst->x0); x++) { memcpy(dp(dst, dstmap, dst->x0 + x, dst->y0 + y, dst->z), sp(src, srcmap, src->x0 + x, src->y0 + y, src->z), dst->cpp); } } }
static inline bool nouveau_buffer_data_fetch(struct nouveau_context *nv, struct nv04_resource *buf, struct nouveau_bo *bo, unsigned offset, unsigned size) { if (!nouveau_buffer_malloc(buf)) return false; if (nouveau_bo_map(bo, NOUVEAU_BO_RD, nv->client)) return false; memcpy(buf->data, (uint8_t *)bo->map + offset, size); return true; }
static void nouveau_bufferobj_get_subdata(GLcontext *ctx, GLenum target, GLintptrARB offset, GLsizeiptrARB size, GLvoid *data, struct gl_buffer_object *obj) { struct nouveau_bufferobj *nbo = to_nouveau_bufferobj(obj); nouveau_bo_map(nbo->bo, NOUVEAU_BO_RD); memcpy(data, nbo->bo->map + offset, size); nouveau_bo_unmap(nbo->bo); }
static INLINE void nvc0_query_update(struct nouveau_client *cli, struct nvc0_query *q) { if (q->is64bit) { if (!nouveau_bo_map(q->bo, NOUVEAU_BO_RD | NOUVEAU_BO_NOBLOCK, cli)) q->state = NVC0_QUERY_STATE_READY; } else { if (q->data[0] == q->sequence) q->state = NVC0_QUERY_STATE_READY; } }
static void init_dummy_texture(struct gl_context *ctx) { struct nouveau_surface *s = &to_nv04_context(ctx)->dummy_texture; nouveau_surface_alloc(ctx, s, SWIZZLED, NOUVEAU_BO_MAP | NOUVEAU_BO_VRAM, MESA_FORMAT_ARGB8888, 1, 1); nouveau_bo_map(s->bo, NOUVEAU_BO_WR, context_client(ctx)); *(uint32_t *)s->bo->map = 0xffffffff; }
static Bool nouveau_exa_prepare_access(PixmapPtr ppix, int index) { struct nouveau_bo *bo = nouveau_pixmap_bo(ppix); NVPtr pNv = NVPTR(xf86Screens[ppix->drawable.pScreen->myNum]); if (nv50_style_tiled_pixmap(ppix) && !pNv->wfb_enabled) return FALSE; if (nouveau_bo_map(bo, NOUVEAU_BO_RDWR)) return FALSE; ppix->devPrivate.ptr = bo->map; return TRUE; }
struct pipe_screen * nv50_screen_create(struct nouveau_device *dev) { struct nv50_screen *screen; struct pipe_screen *pscreen; struct nouveau_object *chan; uint64_t value; uint32_t tesla_class; unsigned stack_size, max_warps, tls_space; int ret; screen = CALLOC_STRUCT(nv50_screen); if (!screen) return NULL; pscreen = &screen->base.base; screen->base.sysmem_bindings = PIPE_BIND_CONSTANT_BUFFER; ret = nouveau_screen_init(&screen->base, dev); if (ret) FAIL_SCREEN_INIT("nouveau_screen_init failed: %d\n", ret); screen->base.pushbuf->user_priv = screen; screen->base.pushbuf->rsvd_kick = 5; chan = screen->base.channel; pscreen->destroy = nv50_screen_destroy; pscreen->context_create = nv50_create; pscreen->is_format_supported = nv50_screen_is_format_supported; pscreen->get_param = nv50_screen_get_param; pscreen->get_shader_param = nv50_screen_get_shader_param; pscreen->get_paramf = nv50_screen_get_paramf; nv50_screen_init_resource_functions(pscreen); nouveau_screen_init_vdec(&screen->base); ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0, 4096, NULL, &screen->fence.bo); if (ret) goto fail; nouveau_bo_map(screen->fence.bo, 0, NULL); screen->fence.map = screen->fence.bo->map; screen->base.fence.emit = nv50_screen_fence_emit; screen->base.fence.update = nv50_screen_fence_update; ret = nouveau_object_new(chan, 0xbeef0301, NOUVEAU_NOTIFIER_CLASS, &(struct nv04_notify){ .length = 32 }, sizeof(struct nv04_notify), &screen->sync);