struct pipe_resource * nouveau_buffer_create(struct pipe_screen *pscreen, const struct pipe_resource *templ) { struct nouveau_screen *screen = nouveau_screen(pscreen); struct nv04_resource *buffer; boolean ret; buffer = CALLOC_STRUCT(nv04_resource); if (!buffer) return NULL; buffer->base = *templ; buffer->vtbl = &nouveau_buffer_vtbl; pipe_reference_init(&buffer->base.reference, 1); buffer->base.screen = pscreen; if ((buffer->base.bind & screen->sysmem_bindings) == screen->sysmem_bindings) ret = nouveau_buffer_allocate(screen, buffer, 0); else ret = nouveau_buffer_allocate(screen, buffer, NOUVEAU_BO_GART); if (ret == FALSE) goto fail; return &buffer->base; fail: FREE(buffer); return NULL; }
/* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */ boolean nouveau_buffer_migrate(struct nouveau_context *nv, struct nv04_resource *buf, const unsigned new_domain) { struct nouveau_screen *screen = nv->screen; struct nouveau_bo *bo; const unsigned old_domain = buf->domain; unsigned size = buf->base.width0; unsigned offset; int ret; assert(new_domain != old_domain); if (new_domain == NOUVEAU_BO_GART && old_domain == 0) { if (!nouveau_buffer_allocate(screen, buf, new_domain)) return FALSE; ret = nouveau_bo_map_range(buf->bo, buf->offset, size, NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC); if (ret) return ret; memcpy(buf->bo->map, buf->data, size); nouveau_bo_unmap(buf->bo); FREE(buf->data); } else if (old_domain != 0 && new_domain != 0) { struct nouveau_mm_allocation *mm = buf->mm; if (new_domain == NOUVEAU_BO_VRAM) { /* keep a system memory copy of our data in case we hit a fallback */ if (!nouveau_buffer_data_fetch(buf, buf->bo, buf->offset, size)) return FALSE; if (nouveau_mesa_debug) debug_printf("migrating %u KiB to VRAM\n", size / 1024); } offset = buf->offset; bo = buf->bo; buf->bo = NULL; buf->mm = NULL; nouveau_buffer_allocate(screen, buf, new_domain); nv->copy_data(nv, buf->bo, buf->offset, new_domain, bo, offset, old_domain, buf->base.width0); nouveau_bo_ref(NULL, &bo); if (mm) release_allocation(&mm, screen->fence.current); } else if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) { if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM)) return FALSE; if (!nouveau_buffer_upload(nv, buf, 0, buf->base.width0)) return FALSE; } else return FALSE; assert(buf->domain == new_domain); return TRUE; }
static INLINE boolean nouveau_buffer_allocate(struct nouveau_screen *screen, struct nv04_resource *buf, unsigned domain) { uint32_t size = buf->base.width0; if (buf->base.bind & PIPE_BIND_CONSTANT_BUFFER) size = align(size, 0x100); if (domain == NOUVEAU_BO_VRAM) { buf->mm = nouveau_mm_allocate(screen->mm_VRAM, size, &buf->bo, &buf->offset); if (!buf->bo) return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART); } else if (domain == NOUVEAU_BO_GART) { buf->mm = nouveau_mm_allocate(screen->mm_GART, size, &buf->bo, &buf->offset); if (!buf->bo) return FALSE; } if (domain != NOUVEAU_BO_GART) { if (!buf->data) { buf->data = MALLOC(buf->base.width0); if (!buf->data) return FALSE; } } buf->domain = domain; if (buf->bo) buf->address = buf->bo->offset + buf->offset; return TRUE; }
struct pipe_resource * nouveau_buffer_create(struct pipe_screen *pscreen, const struct pipe_resource *templ) { struct nouveau_screen *screen = nouveau_screen(pscreen); struct nv04_resource *buffer; boolean ret; buffer = CALLOC_STRUCT(nv04_resource); if (!buffer) return NULL; buffer->base = *templ; buffer->vtbl = &nouveau_buffer_vtbl; pipe_reference_init(&buffer->base.reference, 1); buffer->base.screen = pscreen; if (buffer->base.bind & (screen->vidmem_bindings & screen->sysmem_bindings)) { switch (buffer->base.usage) { case PIPE_USAGE_DEFAULT: case PIPE_USAGE_IMMUTABLE: case PIPE_USAGE_STATIC: buffer->domain = NOUVEAU_BO_VRAM; break; case PIPE_USAGE_DYNAMIC: /* For most apps, we'd have to do staging transfers to avoid sync * with this usage, and GART -> GART copies would be suboptimal. */ buffer->domain = NOUVEAU_BO_VRAM; break; case PIPE_USAGE_STAGING: case PIPE_USAGE_STREAM: buffer->domain = NOUVEAU_BO_GART; break; default: assert(0); break; } } else { if (buffer->base.bind & screen->vidmem_bindings) buffer->domain = NOUVEAU_BO_VRAM; else if (buffer->base.bind & screen->sysmem_bindings) buffer->domain = NOUVEAU_BO_GART; } ret = nouveau_buffer_allocate(screen, buffer, buffer->domain); if (ret == FALSE) goto fail; if (buffer->domain == NOUVEAU_BO_VRAM && screen->hint_buf_keep_sysmem_copy) nouveau_buffer_cache(NULL, buffer); return &buffer->base; fail: FREE(buffer); return NULL; }
static INLINE boolean nouveau_buffer_allocate(struct nouveau_screen *screen, struct nv04_resource *buf, unsigned domain) { uint32_t size = buf->base.width0; if (buf->base.bind & (PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_COMPUTE_RESOURCE | PIPE_BIND_SHADER_RESOURCE)) size = align(size, 0x100); if (domain == NOUVEAU_BO_VRAM) { buf->mm = nouveau_mm_allocate(screen->mm_VRAM, size, &buf->bo, &buf->offset); if (!buf->bo) return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART); NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_vid, buf->base.width0); } else if (domain == NOUVEAU_BO_GART) { buf->mm = nouveau_mm_allocate(screen->mm_GART, size, &buf->bo, &buf->offset); if (!buf->bo) return FALSE; NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_sys, buf->base.width0); } else { assert(domain == 0); if (!nouveau_buffer_malloc(buf)) return FALSE; } buf->domain = domain; if (buf->bo) buf->address = buf->bo->offset + buf->offset; return TRUE; }
static inline bool nouveau_buffer_allocate(struct nouveau_screen *screen, struct nv04_resource *buf, unsigned domain) { uint32_t size = align(buf->base.width0, 0x100); if (domain == NOUVEAU_BO_VRAM) { buf->mm = nouveau_mm_allocate(screen->mm_VRAM, size, &buf->bo, &buf->offset); if (!buf->bo) return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART); NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_vid, buf->base.width0); } else if (domain == NOUVEAU_BO_GART) { buf->mm = nouveau_mm_allocate(screen->mm_GART, size, &buf->bo, &buf->offset); if (!buf->bo) return false; NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_sys, buf->base.width0); } else { assert(domain == 0); if (!nouveau_buffer_malloc(buf)) return false; } buf->domain = domain; if (buf->bo) buf->address = buf->bo->offset + buf->offset; util_range_set_empty(&buf->valid_buffer_range); return true; }
static INLINE boolean nouveau_buffer_reallocate(struct nouveau_screen *screen, struct nv04_resource *buf, unsigned domain) { nouveau_buffer_release_gpu_storage(buf); return nouveau_buffer_allocate(screen, buf, domain); }
struct pipe_resource * nouveau_buffer_create(struct pipe_screen *pscreen, const struct pipe_resource *templ) { struct nouveau_screen *screen = nouveau_screen(pscreen); struct nv04_resource *buffer; boolean ret; buffer = CALLOC_STRUCT(nv04_resource); if (!buffer) return NULL; buffer->base = *templ; buffer->vtbl = &nouveau_buffer_vtbl; pipe_reference_init(&buffer->base.reference, 1); buffer->base.screen = pscreen; if (buffer->base.bind & (screen->vidmem_bindings & screen->sysmem_bindings)) { switch (buffer->base.usage) { case PIPE_USAGE_DEFAULT: case PIPE_USAGE_IMMUTABLE: case PIPE_USAGE_STATIC: buffer->domain = NOUVEAU_BO_VRAM; break; case PIPE_USAGE_DYNAMIC: case PIPE_USAGE_STAGING: case PIPE_USAGE_STREAM: buffer->domain = NOUVEAU_BO_GART; break; default: assert(0); break; } } else { if (buffer->base.bind & screen->vidmem_bindings) buffer->domain = NOUVEAU_BO_VRAM; else if (buffer->base.bind & screen->sysmem_bindings) buffer->domain = NOUVEAU_BO_GART; } ret = nouveau_buffer_allocate(screen, buffer, buffer->domain); if (ret == FALSE) goto fail; return &buffer->base; fail: FREE(buffer); return NULL; }
static INLINE boolean nouveau_buffer_reallocate(struct nouveau_screen *screen, struct nv04_resource *buf, unsigned domain) { nouveau_buffer_release_gpu_storage(buf); nouveau_fence_ref(NULL, &buf->fence); nouveau_fence_ref(NULL, &buf->fence_wr); buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK; return nouveau_buffer_allocate(screen, buf, domain); }
/* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */ boolean nouveau_buffer_migrate(struct nouveau_context *nv, struct nv04_resource *buf, const unsigned new_domain) { struct nouveau_screen *screen = nv->screen; struct nouveau_bo *bo; const unsigned old_domain = buf->domain; unsigned size = buf->base.width0; unsigned offset; int ret; assert(new_domain != old_domain); if (new_domain == NOUVEAU_BO_GART && old_domain == 0) { if (!nouveau_buffer_allocate(screen, buf, new_domain)) return FALSE; ret = nouveau_bo_map(buf->bo, 0, nv->client); if (ret) return ret; memcpy((uint8_t *)buf->bo->map + buf->offset, buf->data, size); align_free(buf->data); } else if (old_domain != 0 && new_domain != 0) { struct nouveau_mm_allocation *mm = buf->mm; if (new_domain == NOUVEAU_BO_VRAM) { /* keep a system memory copy of our data in case we hit a fallback */ if (!nouveau_buffer_data_fetch(nv, buf, buf->bo, buf->offset, size)) return FALSE; if (nouveau_mesa_debug) debug_printf("migrating %u KiB to VRAM\n", size / 1024); } offset = buf->offset; bo = buf->bo; buf->bo = NULL; buf->mm = NULL; nouveau_buffer_allocate(screen, buf, new_domain); nv->copy_data(nv, buf->bo, buf->offset, new_domain, bo, offset, old_domain, buf->base.width0); nouveau_bo_ref(NULL, &bo); if (mm) release_allocation(&mm, screen->fence.current); } else if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) { struct nouveau_transfer tx; if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM)) return FALSE; tx.base.resource = &buf->base; tx.base.box.x = 0; tx.base.box.width = buf->base.width0; tx.bo = NULL; if (!nouveau_transfer_staging(nv, &tx, FALSE)) return FALSE; nouveau_transfer_write(nv, &tx, 0, tx.base.box.width); nouveau_buffer_transfer_del(nv, &tx); } else return FALSE; assert(buf->domain == new_domain); return TRUE; }
struct pipe_resource * nouveau_buffer_create(struct pipe_screen *pscreen, const struct pipe_resource *templ) { struct nouveau_screen *screen = nouveau_screen(pscreen); struct nv04_resource *buffer; bool ret; buffer = CALLOC_STRUCT(nv04_resource); if (!buffer) return NULL; buffer->base = *templ; buffer->vtbl = &nouveau_buffer_vtbl; pipe_reference_init(&buffer->base.reference, 1); buffer->base.screen = pscreen; if (buffer->base.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT | PIPE_RESOURCE_FLAG_MAP_COHERENT)) { buffer->domain = NOUVEAU_BO_GART; } else if (buffer->base.bind == 0 || (buffer->base.bind & (screen->vidmem_bindings & screen->sysmem_bindings))) { switch (buffer->base.usage) { case PIPE_USAGE_DEFAULT: case PIPE_USAGE_IMMUTABLE: buffer->domain = NV_VRAM_DOMAIN(screen); break; case PIPE_USAGE_DYNAMIC: /* For most apps, we'd have to do staging transfers to avoid sync * with this usage, and GART -> GART copies would be suboptimal. */ buffer->domain = NV_VRAM_DOMAIN(screen); break; case PIPE_USAGE_STAGING: case PIPE_USAGE_STREAM: buffer->domain = NOUVEAU_BO_GART; break; default: assert(0); break; } } else { if (buffer->base.bind & screen->vidmem_bindings) buffer->domain = NV_VRAM_DOMAIN(screen); else if (buffer->base.bind & screen->sysmem_bindings) buffer->domain = NOUVEAU_BO_GART; } /* There can be very special situations where we want non-gpu-mapped * buffers, but never through this interface. */ assert(buffer->domain); ret = nouveau_buffer_allocate(screen, buffer, buffer->domain); if (ret == false) goto fail; if (buffer->domain == NOUVEAU_BO_VRAM && screen->hint_buf_keep_sysmem_copy) nouveau_buffer_cache(NULL, buffer); NOUVEAU_DRV_STAT(screen, buf_obj_current_count, 1); util_range_init(&buffer->valid_buffer_range); return &buffer->base; fail: FREE(buffer); return NULL; }