static void nouveau_buffer_transfer_unmap(struct pipe_context *pipe, struct pipe_transfer *transfer) { struct nouveau_context *nv = nouveau_context(pipe); struct nouveau_transfer *tx = nouveau_transfer(transfer); struct nv04_resource *buf = nv04_resource(transfer->resource); if (tx->base.usage & PIPE_TRANSFER_WRITE) { if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT) && tx->map) nouveau_transfer_write(nv, tx, 0, tx->base.box.width); if (likely(buf->domain)) { const uint8_t bind = buf->base.bind; /* make sure we invalidate dedicated caches */ if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) nv->vbo_dirty = TRUE; if (bind & (PIPE_BIND_CONSTANT_BUFFER)) nv->cb_dirty = TRUE; } } nouveau_buffer_transfer_del(nv, tx); FREE(tx); }
/* Unmap stage of the transfer. If it was a WRITE transfer and the map that * was returned was not the real resource's data, this needs to transfer the * data back to the resource. * * Also marks vbo dirty based on the buffer's binding */ static void nouveau_buffer_transfer_unmap(struct pipe_context *pipe, struct pipe_transfer *transfer) { struct nouveau_context *nv = nouveau_context(pipe); struct nouveau_transfer *tx = nouveau_transfer(transfer); struct nv04_resource *buf = nv04_resource(transfer->resource); if (tx->base.usage & PIPE_TRANSFER_WRITE) { if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT) && tx->map) nouveau_transfer_write(nv, tx, 0, tx->base.box.width); if (likely(buf->domain)) { const uint8_t bind = buf->base.bind; /* make sure we invalidate dedicated caches */ if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) nv->vbo_dirty = TRUE; } util_range_add(&buf->valid_buffer_range, tx->base.box.x, tx->base.box.x + tx->base.box.width); } if (!tx->bo && (tx->base.usage & PIPE_TRANSFER_WRITE)) NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_direct, tx->base.box.width); nouveau_buffer_transfer_del(nv, tx); FREE(tx); }
static void nouveau_buffer_transfer_flush_region(struct pipe_context *pipe, struct pipe_transfer *transfer, const struct pipe_box *box) { struct nouveau_transfer *tx = nouveau_transfer(transfer); if (tx->map) nouveau_transfer_write(nouveau_context(pipe), tx, box->x, box->width); }
static void nouveau_buffer_transfer_flush_region(struct pipe_context *pipe, struct pipe_transfer *transfer, const struct pipe_box *box) { struct nouveau_transfer *tx = nouveau_transfer(transfer); struct nv04_resource *buf = nv04_resource(transfer->resource); if (tx->map) nouveau_transfer_write(nouveau_context(pipe), tx, box->x, box->width); util_range_add(&buf->valid_buffer_range, tx->base.box.x + box->x, tx->base.box.x + box->x + box->width); }
static void * nouveau_buffer_transfer_map(struct pipe_context *pipe, struct pipe_transfer *transfer) { struct nouveau_transfer *xfr = nouveau_transfer(transfer); struct nv04_resource *buf = nv04_resource(transfer->resource); struct nouveau_bo *bo = buf->bo; uint8_t *map; int ret; uint32_t offset = xfr->base.box.x; uint32_t flags; nouveau_buffer_adjust_score(nouveau_context(pipe), buf, -250); if (buf->domain != NOUVEAU_BO_GART) return buf->data + offset; if (buf->mm) flags = NOUVEAU_BO_NOSYNC | NOUVEAU_BO_RDWR; else flags = nouveau_screen_transfer_flags(xfr->base.usage); offset += buf->offset; ret = nouveau_bo_map_range(buf->bo, offset, xfr->base.box.width, flags); if (ret) return NULL; map = bo->map; /* Unmap right now. Since multiple buffers can share a single nouveau_bo, * not doing so might make future maps fail or trigger "reloc while mapped" * errors. For now, mappings to userspace are guaranteed to be persistent. */ nouveau_bo_unmap(bo); if (buf->mm) { if (xfr->base.usage & PIPE_TRANSFER_DONTBLOCK) { if (nouveau_buffer_busy(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE)) return NULL; } else if (!(xfr->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)) { nouveau_buffer_sync(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE); } } return map; }
static void nouveau_buffer_transfer_destroy(struct pipe_context *pipe, struct pipe_transfer *transfer) { struct nv04_resource *buf = nv04_resource(transfer->resource); struct nouveau_transfer *xfr = nouveau_transfer(transfer); struct nouveau_context *nv = nouveau_context(pipe); if (xfr->base.usage & PIPE_TRANSFER_WRITE) { if (buf->domain == NOUVEAU_BO_VRAM) { nouveau_buffer_upload(nv, buf, transfer->box.x, transfer->box.width); } if (buf->domain != 0 && (buf->base.bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))) nouveau_context(pipe)->vbo_dirty = TRUE; } FREE(xfr); }
static void * nouveau_buffer_transfer_map(struct pipe_context *pipe, struct pipe_transfer *transfer) { struct nouveau_context *nv = nouveau_context(pipe); struct nouveau_transfer *xfr = nouveau_transfer(transfer); struct nv04_resource *buf = nv04_resource(transfer->resource); struct nouveau_bo *bo = buf->bo; uint8_t *map; int ret; uint32_t offset = xfr->base.box.x; uint32_t flags = 0; if (buf->domain != NOUVEAU_BO_GART) return buf->data + offset; if (!buf->mm) flags = nouveau_screen_transfer_flags(xfr->base.usage); offset += buf->offset; ret = nouveau_bo_map(buf->bo, flags, nv->screen->client); if (ret) return NULL; map = (uint8_t *)bo->map + offset; if (buf->mm) { if (xfr->base.usage & PIPE_TRANSFER_DONTBLOCK) { if (nouveau_buffer_busy(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE)) return NULL; } else if (!(xfr->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)) { nouveau_buffer_sync(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE); } } return map; }