static void nv84_fence_destroy(struct nouveau_drm *drm) { struct nv84_fence_priv *priv = drm->fence; nouveau_bo_unmap(priv->bo_gart); if (priv->bo_gart) nouveau_bo_unpin(priv->bo_gart); nouveau_bo_ref(NULL, &priv->bo_gart); nouveau_bo_unmap(priv->bo); if (priv->bo) nouveau_bo_unpin(priv->bo); nouveau_bo_ref(NULL, &priv->bo); drm->fence = NULL; kfree(priv); }
static void nouveau_exa_finish_access(PixmapPtr ppix, int index) { struct nouveau_bo *bo = nouveau_pixmap_bo(ppix); nouveau_bo_unmap(bo); }
static void nouveau_pipe_bo_unmap(struct pipe_winsys *pws, struct pipe_buffer *buf) { struct nouveau_pipe_buffer *nvbuf = nouveau_pipe_buffer(buf); nouveau_bo_unmap(nvbuf->bo); }
static void renderbuffer_map_unmap(struct gl_renderbuffer *rb, GLboolean map) { struct nouveau_surface *s = &to_nouveau_renderbuffer(rb)->surface; if (map) { switch (rb->Format) { case MESA_FORMAT_RGB565: nouveau_InitPointers_rgb565(rb); break; case MESA_FORMAT_XRGB8888: nouveau_InitPointers_rgb888(rb); break; case MESA_FORMAT_ARGB8888: nouveau_InitPointers_argb8888(rb); break; case MESA_FORMAT_Z16: nouveau_InitDepthPointers_z16(rb); break; case MESA_FORMAT_Z24_S8: nouveau_InitDepthPointers_z24s8(rb); break; default: assert(0); } nouveau_bo_map(s->bo, NOUVEAU_BO_RDWR); } else { nouveau_bo_unmap(s->bo); } }
/* Maybe just migrate to GART right away if we actually need to do this. */ boolean nouveau_buffer_download(struct nouveau_context *nv, struct nv04_resource *buf, unsigned start, unsigned size) { struct nouveau_mm_allocation *mm; struct nouveau_bo *bounce = NULL; uint32_t offset; assert(buf->domain == NOUVEAU_BO_VRAM); mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset); if (!bounce) return FALSE; nv->copy_data(nv, bounce, offset, NOUVEAU_BO_GART, buf->bo, buf->offset + start, NOUVEAU_BO_VRAM, size); if (nouveau_bo_map_range(bounce, offset, size, NOUVEAU_BO_RD)) return FALSE; memcpy(buf->data + start, bounce->map, size); nouveau_bo_unmap(bounce); buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING; nouveau_bo_ref(NULL, &bounce); if (mm) nouveau_mm_free(mm); return TRUE; }
static GLboolean nouveau_bufferobj_data(GLcontext *ctx, GLenum target, GLsizeiptrARB size, const GLvoid *data, GLenum usage, struct gl_buffer_object *obj) { struct nouveau_bufferobj *nbo = to_nouveau_bufferobj(obj); int ret; obj->Size = size; obj->Usage = usage; nouveau_bo_ref(NULL, &nbo->bo); ret = nouveau_bo_new(context_dev(ctx), NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0, size, &nbo->bo); assert(!ret); if (data) { nouveau_bo_map(nbo->bo, NOUVEAU_BO_WR); memcpy(nbo->bo->map, data, size); nouveau_bo_unmap(nbo->bo); } return GL_TRUE; }
static Bool nouveau_exa_download_from_screen(PixmapPtr pspix, int x, int y, int w, int h, char *dst, int dst_pitch) { ScrnInfoPtr pScrn = xf86Screens[pspix->drawable.pScreen->myNum]; NVPtr pNv = NVPTR(pScrn); struct nouveau_bo *bo; int src_pitch, cpp, offset; const char *src; Bool ret; src_pitch = exaGetPixmapPitch(pspix); cpp = pspix->drawable.bitsPerPixel >> 3; offset = (y * src_pitch) + (x * cpp); if (pNv->GART) { if (pNv->Architecture >= NV_ARCH_C0) { if (NVC0AccelDownloadM2MF(pspix, x, y, w, h, dst, dst_pitch)) return TRUE; } else { if (NVAccelDownloadM2MF(pspix, x, y, w, h, dst, dst_pitch)) return TRUE; } } bo = nouveau_pixmap_bo(pspix); if (nouveau_bo_map(bo, NOUVEAU_BO_RD)) return FALSE; src = (char *)bo->map + offset; ret = NVAccelMemcpyRect(dst, src, h, dst_pitch, src_pitch, w*cpp); nouveau_bo_unmap(bo); return ret; }
void nouveau_channel_free(struct nouveau_channel **chan) { struct nouveau_channel_priv *nvchan; struct nouveau_device_priv *nvdev; struct drm_nouveau_channel_free cf; unsigned i; if (!chan || !*chan) return; nvchan = nouveau_channel(*chan); (*chan)->flush_notify = NULL; *chan = NULL; nvdev = nouveau_device(nvchan->base.device); FIRE_RING(&nvchan->base); nouveau_pushbuf_fini(&nvchan->base); if (nvchan->notifier_bo) { nouveau_bo_unmap(nvchan->notifier_bo); nouveau_bo_ref(NULL, &nvchan->notifier_bo); } for (i = 0; i < nvchan->drm.nr_subchan; i++) free(nvchan->base.subc[i].gr); nouveau_grobj_free(&nvchan->base.vram); nouveau_grobj_free(&nvchan->base.gart); nouveau_grobj_free(&nvchan->base.nullobj); cf.channel = nvchan->drm.channel; drmCommandWrite(nvdev->fd, DRM_NOUVEAU_CHANNEL_FREE, &cf, sizeof(cf)); free(nvchan); }
void nouveau_channel_free(struct nouveau_channel **chan) { struct nouveau_channel_priv *nvchan; struct nouveau_device_priv *nvdev; struct drm_nouveau_channel_free cf; if (!chan || !*chan) return; nvchan = nouveau_channel(*chan); *chan = NULL; nvdev = nouveau_device(nvchan->base.device); FIRE_RING(&nvchan->base); nouveau_bo_unmap(nvchan->notifier_bo); nouveau_bo_ref(NULL, &nvchan->notifier_bo); nouveau_grobj_free(&nvchan->base.vram); nouveau_grobj_free(&nvchan->base.gart); nouveau_grobj_free(&nvchan->base.nullobj); cf.channel = nvchan->drm.channel; drmCommandWrite(nvdev->fd, DRM_NOUVEAU_CHANNEL_FREE, &cf, sizeof(cf)); free(nvchan); }
/* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */ boolean nouveau_buffer_migrate(struct nouveau_context *nv, struct nv04_resource *buf, const unsigned new_domain) { struct nouveau_screen *screen = nv->screen; struct nouveau_bo *bo; const unsigned old_domain = buf->domain; unsigned size = buf->base.width0; unsigned offset; int ret; assert(new_domain != old_domain); if (new_domain == NOUVEAU_BO_GART && old_domain == 0) { if (!nouveau_buffer_allocate(screen, buf, new_domain)) return FALSE; ret = nouveau_bo_map_range(buf->bo, buf->offset, size, NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC); if (ret) return ret; memcpy(buf->bo->map, buf->data, size); nouveau_bo_unmap(buf->bo); FREE(buf->data); } else if (old_domain != 0 && new_domain != 0) { struct nouveau_mm_allocation *mm = buf->mm; if (new_domain == NOUVEAU_BO_VRAM) { /* keep a system memory copy of our data in case we hit a fallback */ if (!nouveau_buffer_data_fetch(buf, buf->bo, buf->offset, size)) return FALSE; if (nouveau_mesa_debug) debug_printf("migrating %u KiB to VRAM\n", size / 1024); } offset = buf->offset; bo = buf->bo; buf->bo = NULL; buf->mm = NULL; nouveau_buffer_allocate(screen, buf, new_domain); nv->copy_data(nv, buf->bo, buf->offset, new_domain, bo, offset, old_domain, buf->base.width0); nouveau_bo_ref(NULL, &bo); if (mm) release_allocation(&mm, screen->fence.current); } else if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) { if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM)) return FALSE; if (!nouveau_buffer_upload(nv, buf, 0, buf->base.width0)) return FALSE; } else return FALSE; assert(buf->domain == new_domain); return TRUE; }
static void nouveau_renderbuffer_unmap(struct gl_context *ctx, struct gl_renderbuffer *rb) { struct nouveau_surface *s = &to_nouveau_renderbuffer(rb)->surface; nouveau_bo_unmap(s->bo); }
void nvc0_miptree_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *transfer) { struct nvc0_transfer *tx = (struct nvc0_transfer *)transfer; nouveau_bo_unmap(tx->rect[1].bo); }
void nv50_miptree_transfer_unmap(struct pipe_context *pcontext, struct pipe_transfer *ptx) { struct nv50_transfer *tx = (struct nv50_transfer *)ptx; if (--tx->map_refcnt) return; nouveau_bo_unmap(tx->bo); }
static void renderbuffer_map_unmap(struct gl_renderbuffer *rb, GLboolean map) { struct nouveau_surface *s = &to_nouveau_renderbuffer(rb)->surface; if (map) { nouveau_bo_map(s->bo, NOUVEAU_BO_RDWR); } else { nouveau_bo_unmap(s->bo); } }
void nouveau_deinit_array(struct nouveau_array *a) { if (a->bo) { if (a->bo->map) nouveau_bo_unmap(a->bo); } a->buf = NULL; a->fields = 0; }
static void nouveau_bufferobj_get_subdata(GLcontext *ctx, GLenum target, GLintptrARB offset, GLsizeiptrARB size, GLvoid *data, struct gl_buffer_object *obj) { struct nouveau_bufferobj *nbo = to_nouveau_bufferobj(obj); nouveau_bo_map(nbo->bo, NOUVEAU_BO_RD); memcpy(data, nbo->bo->map + offset, size); nouveau_bo_unmap(nbo->bo); }
static void nv84_fence_destroy(struct nouveau_drm *drm) { struct nv84_fence_priv *priv = drm->fence; #ifdef __NetBSD__ spin_lock_destroy(&priv->base.waitlock); DRM_DESTROY_WAITQUEUE(&priv->base.waitqueue); #endif nouveau_bo_unmap(priv->bo_gart); if (priv->bo_gart) nouveau_bo_unpin(priv->bo_gart); nouveau_bo_ref(NULL, &priv->bo_gart); nouveau_bo_unmap(priv->bo); if (priv->bo) nouveau_bo_unpin(priv->bo); nouveau_bo_ref(NULL, &priv->bo); drm->fence = NULL; kfree(priv); }
void nouveau_notifier_takedown_channel(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; if (!chan->notifier_bo) return; nouveau_bo_unmap(chan->notifier_bo); mutex_lock(&dev->struct_mutex); nouveau_bo_unpin(chan->notifier_bo); drm_gem_object_unreference(chan->notifier_bo->gem); mutex_unlock(&dev->struct_mutex); nouveau_mem_takedown(&chan->notifier_heap); }
/* Like download, but for GART buffers. Merge ? */ static INLINE boolean nouveau_buffer_data_fetch(struct nv04_resource *buf, struct nouveau_bo *bo, unsigned offset, unsigned size) { if (!buf->data) { buf->data = MALLOC(size); if (!buf->data) return FALSE; } if (nouveau_bo_map_range(bo, offset, size, NOUVEAU_BO_RD)) return FALSE; memcpy(buf->data, bo->map, size); nouveau_bo_unmap(bo); return TRUE; }
void * nouveau_screen_bo_map_range(struct pipe_screen *pscreen, struct nouveau_bo *bo, unsigned offset, unsigned length, unsigned flags) { int ret; ret = nouveau_bo_map_range(bo, offset, length, flags); if (ret) { nouveau_bo_unmap(bo); if (!(flags & NOUVEAU_BO_NOWAIT) || ret != -EBUSY) debug_printf("map_range failed: %d\n", ret); return NULL; } return (char *)bo->map - offset; /* why gallium? why? */ }
static inline char * get_bufferobj_map(struct gl_buffer_object *obj, unsigned flags) { struct nouveau_bufferobj *nbo = to_nouveau_bufferobj(obj); void *map = NULL; if (nbo->sys) { map = nbo->sys; } else if (nbo->bo) { nouveau_bo_map(nbo->bo, flags); map = nbo->bo->map; nouveau_bo_unmap(nbo->bo); } return map; }
static GLboolean nouveau_bufferobj_unmap(GLcontext *ctx, GLenum target, struct gl_buffer_object *obj) { struct nouveau_bufferobj *nbo = to_nouveau_bufferobj(obj); assert(obj->Pointer); nouveau_bo_unmap(nbo->bo); obj->Pointer = NULL; obj->Offset = 0; obj->Length = 0; obj->AccessFlags = 0; return GL_TRUE; }
static void * nouveau_buffer_transfer_map(struct pipe_context *pipe, struct pipe_transfer *transfer) { struct nouveau_transfer *xfr = nouveau_transfer(transfer); struct nv04_resource *buf = nv04_resource(transfer->resource); struct nouveau_bo *bo = buf->bo; uint8_t *map; int ret; uint32_t offset = xfr->base.box.x; uint32_t flags; nouveau_buffer_adjust_score(nouveau_context(pipe), buf, -250); if (buf->domain != NOUVEAU_BO_GART) return buf->data + offset; if (buf->mm) flags = NOUVEAU_BO_NOSYNC | NOUVEAU_BO_RDWR; else flags = nouveau_screen_transfer_flags(xfr->base.usage); offset += buf->offset; ret = nouveau_bo_map_range(buf->bo, offset, xfr->base.box.width, flags); if (ret) return NULL; map = bo->map; /* Unmap right now. Since multiple buffers can share a single nouveau_bo, * not doing so might make future maps fail or trigger "reloc while mapped" * errors. For now, mappings to userspace are guaranteed to be persistent. */ nouveau_bo_unmap(bo); if (buf->mm) { if (xfr->base.usage & PIPE_TRANSFER_DONTBLOCK) { if (nouveau_buffer_busy(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE)) return NULL; } else if (!(xfr->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)) { nouveau_buffer_sync(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE); } } return map; }
static boolean nv50_query_result(struct pipe_context *pipe, struct pipe_query *pq, boolean wait, uint64_t *result) { struct nv50_query *q = nv50_query(pq); int ret; if (!q->ready) { ret = nouveau_bo_map(q->bo, NOUVEAU_BO_RD | (wait ? 0 : NOUVEAU_BO_NOWAIT)); if (ret) return false; q->result = ((uint32_t *)q->bo->map)[1]; q->ready = TRUE; nouveau_bo_unmap(q->bo); } *result = q->result; return q->ready; }
void nouveau_channel_del(struct nouveau_channel **pchan) { struct nouveau_channel *chan = *pchan; if (chan) { if (chan->fence) nouveau_fence(chan->drm)->context_del(chan); nvif_object_fini(&chan->nvsw); nvif_object_fini(&chan->gart); nvif_object_fini(&chan->vram); nvif_object_fini(&chan->user); nvif_object_fini(&chan->push.ctxdma); nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); nouveau_bo_unmap(chan->push.buffer); if (chan->push.buffer && chan->push.buffer->pin_refcnt) nouveau_bo_unpin(chan->push.buffer); nouveau_bo_ref(NULL, &chan->push.buffer); kfree(chan); } *pchan = NULL; }
/* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART. * We'd like to only allocate @size bytes here, but then we'd have to rebase * the vertex indices ... */ boolean nouveau_user_buffer_upload(struct nv04_resource *buf, unsigned base, unsigned size) { struct nouveau_screen *screen = nouveau_screen(buf->base.screen); int ret; assert(buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY); buf->base.width0 = base + size; if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART)) return FALSE; ret = nouveau_bo_map_range(buf->bo, buf->offset + base, size, NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC); if (ret) return FALSE; memcpy(buf->bo->map, buf->data + base, size); nouveau_bo_unmap(buf->bo); return TRUE; }
static boolean nouveau_buffer_upload(struct nouveau_context *nv, struct nv04_resource *buf, unsigned start, unsigned size) { struct nouveau_mm_allocation *mm; struct nouveau_bo *bounce = NULL; uint32_t offset; if (size <= 192) { if (buf->base.bind & PIPE_BIND_CONSTANT_BUFFER) nv->push_cb(nv, buf->bo, buf->domain, buf->offset, buf->base.width0, start, size / 4, (const uint32_t *)(buf->data + start)); else nv->push_data(nv, buf->bo, buf->offset + start, buf->domain, size, buf->data + start); return TRUE; } mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset); if (!bounce) return FALSE; nouveau_bo_map_range(bounce, offset, size, NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC); memcpy(bounce->map, buf->data + start, size); nouveau_bo_unmap(bounce); nv->copy_data(nv, buf->bo, buf->offset + start, NOUVEAU_BO_VRAM, bounce, offset, NOUVEAU_BO_GART, size); nouveau_bo_ref(NULL, &bounce); if (mm) release_allocation(&mm, nv->screen->fence.current); if (start == 0 && size == buf->base.width0) buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING; return TRUE; }
static int nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) { struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb; struct fb_info *info; if (nfbdev->helper.fbdev) { info = nfbdev->helper.fbdev; unregister_framebuffer(info); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } if (nouveau_fb->nvbo) { nouveau_bo_unmap(nouveau_fb->nvbo); drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); nouveau_fb->nvbo = NULL; } drm_fb_helper_fini(&nfbdev->helper); drm_framebuffer_cleanup(&nouveau_fb->base); return 0; }
void nvc0_push_vbo2(struct nvc0_context *nvc0, const struct pipe_draw_info *info) { struct push_context ctx; unsigned i, n; unsigned inst = info->instance_count; unsigned prim = nvc0_prim_gl(info->mode); ctx.nvc0 = nvc0; ctx.vertex_size = nvc0->vertex->vtx_size; ctx.idxbuf = NULL; ctx.num_attrs = 0; ctx.edgeflag = 0.5f; ctx.edgeflag_input = 32; for (i = 0; i < nvc0->vertex->num_elements; ++i) { struct pipe_vertex_element *ve = &nvc0->vertex->element[i].pipe; struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[ve->vertex_buffer_index]; struct nouveau_bo *bo = nvc0_resource(vb->buffer)->bo; unsigned nr_components; if (!(nvc0->vbo_fifo & (1 << i))) continue; n = ctx.num_attrs++; if (nouveau_bo_map(bo, NOUVEAU_BO_RD)) return; ctx.attr[n].map = (uint8_t *)bo->map + vb->buffer_offset + ve->src_offset; nouveau_bo_unmap(bo); ctx.attr[n].stride = vb->stride; ctx.attr[n].divisor = ve->instance_divisor; nr_components = util_format_get_nr_components(ve->src_format); switch (util_format_get_component_bits(ve->src_format, UTIL_FORMAT_COLORSPACE_RGB, 0)) { case 8: switch (nr_components) { case 1: ctx.attr[n].push = emit_b08_1; break; case 2: ctx.attr[n].push = emit_b16_1; break; case 3: ctx.attr[n].push = emit_b08_3; break; case 4: ctx.attr[n].push = emit_b32_1; break; } break; case 16: switch (nr_components) { case 1: ctx.attr[n].push = emit_b16_1; break; case 2: ctx.attr[n].push = emit_b32_1; break; case 3: ctx.attr[n].push = emit_b16_3; break; case 4: ctx.attr[n].push = emit_b32_2; break; } break; case 32: switch (nr_components) { case 1: ctx.attr[n].push = emit_b32_1; break; case 2: ctx.attr[n].push = emit_b32_2; break; case 3: ctx.attr[n].push = emit_b32_3; break; case 4: ctx.attr[n].push = emit_b32_4; break; } break; default: assert(0); break; } } if (info->indexed) { struct nvc0_resource *res = nvc0_resource(nvc0->idxbuf.buffer); if (!res || nouveau_bo_map(res->bo, NOUVEAU_BO_RD)) return; ctx.idxbuf = (uint8_t *)res->bo->map + nvc0->idxbuf.offset + res->offset; nouveau_bo_unmap(res->bo); ctx.idxsize = nvc0->idxbuf.index_size; } else { ctx.idxsize = 0; } while (inst--) { BEGIN_RING(nvc0->screen->base.channel, RING_3D(VERTEX_BEGIN_GL), 1); OUT_RING (nvc0->screen->base.channel, prim); switch (ctx.idxsize) { case 0: emit_seq(&ctx, info->start, info->count); break; case 1: emit_elt08(&ctx, info->start, info->count); break; case 2: emit_elt16(&ctx, info->start, info->count); break; case 4: emit_elt32(&ctx, info->start, info->count); break; } IMMED_RING(nvc0->screen->base.channel, RING_3D(VERTEX_END_GL), 0); prim |= NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT; } }
static inline Bool NVAccelDownloadM2MF(PixmapPtr pspix, int x, int y, int w, int h, char *dst, unsigned dst_pitch) { ScrnInfoPtr pScrn = xf86Screens[pspix->drawable.pScreen->myNum]; NVPtr pNv = NVPTR(pScrn); struct nouveau_channel *chan = pNv->chan; struct nouveau_grobj *m2mf = pNv->NvMemFormat; struct nouveau_bo *bo = nouveau_pixmap_bo(pspix); unsigned cpp = pspix->drawable.bitsPerPixel / 8; unsigned line_len = w * cpp; unsigned src_offset = 0, src_pitch = 0, linear = 0; /* Maximum DMA transfer */ unsigned line_count = pNv->GART->size / line_len; if (!nv50_style_tiled_pixmap(pspix)) { linear = 1; src_pitch = exaGetPixmapPitch(pspix); src_offset += (y * src_pitch) + (x * cpp); } /* HW limitations */ if (line_count > 2047) line_count = 2047; while (h) { int i; char *src; if (line_count > h) line_count = h; if (MARK_RING(chan, 32, 6)) return FALSE; BEGIN_RING(chan, m2mf, NV04_MEMORY_TO_MEMORY_FORMAT_DMA_BUFFER_IN, 2); if (OUT_RELOCo(chan, bo, NOUVEAU_BO_GART | NOUVEAU_BO_VRAM | NOUVEAU_BO_RD) || OUT_RELOCo(chan, pNv->GART, NOUVEAU_BO_GART | NOUVEAU_BO_WR)) { MARK_UNDO(chan); return FALSE; } if (pNv->Architecture >= NV_ARCH_50) { if (!linear) { BEGIN_RING(chan, m2mf, NV50_MEMORY_TO_MEMORY_FORMAT_LINEAR_IN, 7); OUT_RING (chan, 0); OUT_RING (chan, bo->tile_mode << 4); OUT_RING (chan, pspix->drawable.width * cpp); OUT_RING (chan, pspix->drawable.height); OUT_RING (chan, 1); OUT_RING (chan, 0); OUT_RING (chan, (y << 16) | (x * cpp)); } else { BEGIN_RING(chan, m2mf, NV50_MEMORY_TO_MEMORY_FORMAT_LINEAR_IN, 1); OUT_RING (chan, 1); } BEGIN_RING(chan, m2mf, NV50_MEMORY_TO_MEMORY_FORMAT_LINEAR_OUT, 1); OUT_RING (chan, 1); BEGIN_RING(chan, m2mf, NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH, 2); if (OUT_RELOCh(chan, bo, src_offset, NOUVEAU_BO_GART | NOUVEAU_BO_VRAM | NOUVEAU_BO_RD) || OUT_RELOCh(chan, pNv->GART, 0, NOUVEAU_BO_GART | NOUVEAU_BO_WR)) { MARK_UNDO(chan); return FALSE; } } BEGIN_RING(chan, m2mf, NV04_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); if (OUT_RELOCl(chan, bo, src_offset, NOUVEAU_BO_GART | NOUVEAU_BO_VRAM | NOUVEAU_BO_RD) || OUT_RELOCl(chan, pNv->GART, 0, NOUVEAU_BO_GART | NOUVEAU_BO_WR)) { MARK_UNDO(chan); return FALSE; } OUT_RING (chan, src_pitch); OUT_RING (chan, line_len); OUT_RING (chan, line_len); OUT_RING (chan, line_count); OUT_RING (chan, (1<<8)|1); OUT_RING (chan, 0); if (nouveau_bo_map(pNv->GART, NOUVEAU_BO_RD)) { MARK_UNDO(chan); return FALSE; } src = pNv->GART->map; if (dst_pitch == line_len) { memcpy(dst, src, dst_pitch * line_count); dst += dst_pitch * line_count; } else { for (i = 0; i < line_count; i++) { memcpy(dst, src, line_len); src += line_len; dst += dst_pitch; } } nouveau_bo_unmap(pNv->GART); if (linear) src_offset += line_count * src_pitch; h -= line_count; y += line_count; } return TRUE; }