static boolean nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf) { struct nouveau_transfer tx; boolean ret; tx.base.resource = &buf->base; tx.base.box.x = 0; tx.base.box.width = buf->base.width0; tx.bo = NULL; if (!buf->data) if (!nouveau_buffer_malloc(buf)) return FALSE; if (!(buf->status & NOUVEAU_BUFFER_STATUS_DIRTY)) return TRUE; nv->stats.buf_cache_count++; if (!nouveau_transfer_staging(nv, &tx, FALSE)) return FALSE; ret = nouveau_transfer_read(nv, &tx); if (ret) { buf->status &= ~NOUVEAU_BUFFER_STATUS_DIRTY; memcpy(buf->data, tx.map, buf->base.width0); } nouveau_buffer_transfer_del(nv, &tx); return ret; }
static void nouveau_buffer_transfer_unmap(struct pipe_context *pipe, struct pipe_transfer *transfer) { struct nouveau_context *nv = nouveau_context(pipe); struct nouveau_transfer *tx = nouveau_transfer(transfer); struct nv04_resource *buf = nv04_resource(transfer->resource); if (tx->base.usage & PIPE_TRANSFER_WRITE) { if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT) && tx->map) nouveau_transfer_write(nv, tx, 0, tx->base.box.width); if (likely(buf->domain)) { const uint8_t bind = buf->base.bind; /* make sure we invalidate dedicated caches */ if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) nv->vbo_dirty = TRUE; if (bind & (PIPE_BIND_CONSTANT_BUFFER)) nv->cb_dirty = TRUE; } } nouveau_buffer_transfer_del(nv, tx); FREE(tx); }
/* Unmap stage of the transfer. If it was a WRITE transfer and the map that * was returned was not the real resource's data, this needs to transfer the * data back to the resource. * * Also marks vbo dirty based on the buffer's binding */ static void nouveau_buffer_transfer_unmap(struct pipe_context *pipe, struct pipe_transfer *transfer) { struct nouveau_context *nv = nouveau_context(pipe); struct nouveau_transfer *tx = nouveau_transfer(transfer); struct nv04_resource *buf = nv04_resource(transfer->resource); if (tx->base.usage & PIPE_TRANSFER_WRITE) { if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT) && tx->map) nouveau_transfer_write(nv, tx, 0, tx->base.box.width); if (likely(buf->domain)) { const uint8_t bind = buf->base.bind; /* make sure we invalidate dedicated caches */ if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) nv->vbo_dirty = TRUE; } util_range_add(&buf->valid_buffer_range, tx->base.box.x, tx->base.box.x + tx->base.box.width); } if (!tx->bo && (tx->base.usage & PIPE_TRANSFER_WRITE)) NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_direct, tx->base.box.width); nouveau_buffer_transfer_del(nv, tx); FREE(tx); }
/* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */ boolean nouveau_buffer_migrate(struct nouveau_context *nv, struct nv04_resource *buf, const unsigned new_domain) { struct nouveau_screen *screen = nv->screen; struct nouveau_bo *bo; const unsigned old_domain = buf->domain; unsigned size = buf->base.width0; unsigned offset; int ret; assert(new_domain != old_domain); if (new_domain == NOUVEAU_BO_GART && old_domain == 0) { if (!nouveau_buffer_allocate(screen, buf, new_domain)) return FALSE; ret = nouveau_bo_map(buf->bo, 0, nv->client); if (ret) return ret; memcpy((uint8_t *)buf->bo->map + buf->offset, buf->data, size); align_free(buf->data); } else if (old_domain != 0 && new_domain != 0) { struct nouveau_mm_allocation *mm = buf->mm; if (new_domain == NOUVEAU_BO_VRAM) { /* keep a system memory copy of our data in case we hit a fallback */ if (!nouveau_buffer_data_fetch(nv, buf, buf->bo, buf->offset, size)) return FALSE; if (nouveau_mesa_debug) debug_printf("migrating %u KiB to VRAM\n", size / 1024); } offset = buf->offset; bo = buf->bo; buf->bo = NULL; buf->mm = NULL; nouveau_buffer_allocate(screen, buf, new_domain); nv->copy_data(nv, buf->bo, buf->offset, new_domain, bo, offset, old_domain, buf->base.width0); nouveau_bo_ref(NULL, &bo); if (mm) release_allocation(&mm, screen->fence.current); } else if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) { struct nouveau_transfer tx; if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM)) return FALSE; tx.base.resource = &buf->base; tx.base.box.x = 0; tx.base.box.width = buf->base.width0; tx.bo = NULL; if (!nouveau_transfer_staging(nv, &tx, FALSE)) return FALSE; nouveau_transfer_write(nv, &tx, 0, tx.base.box.width); nouveau_buffer_transfer_del(nv, &tx); } else return FALSE; assert(buf->domain == new_domain); return TRUE; }