static void * nouveau_buffer_transfer_map(struct pipe_context *pipe, struct pipe_resource *resource, unsigned level, unsigned usage, const struct pipe_box *box, struct pipe_transfer **ptransfer) { struct nouveau_context *nv = nouveau_context(pipe); struct nv04_resource *buf = nv04_resource(resource); struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer); uint8_t *map; int ret; if (!tx) return NULL; nouveau_buffer_transfer_init(tx, resource, box, usage); *ptransfer = &tx->base; if (buf->domain == NOUVEAU_BO_VRAM) { if (usage & NOUVEAU_TRANSFER_DISCARD) { if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK; nouveau_transfer_staging(nv, tx, TRUE); } else { if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) { if (buf->data) { align_free(buf->data); buf->data = NULL; } nouveau_transfer_staging(nv, tx, FALSE); nouveau_transfer_read(nv, tx); } else { if (usage & PIPE_TRANSFER_WRITE) nouveau_transfer_staging(nv, tx, TRUE); if (!buf->data) nouveau_buffer_cache(nv, buf); } } return buf->data ? (buf->data + box->x) : tx->map; } else if (unlikely(buf->domain == 0)) { return buf->data + box->x; } if (nouveau_buffer_should_discard(buf, usage)) { int ref = buf->base.reference.count - 1; nouveau_buffer_reallocate(nv->screen, buf, buf->domain); if (ref > 0) /* any references inside context possible ? */ nv->invalidate_resource_storage(nv, &buf->base, ref); } ret = nouveau_bo_map(buf->bo, buf->mm ? 0 : nouveau_screen_transfer_flags(usage), nv->client); if (ret) { FREE(tx); return NULL; } map = (uint8_t *)buf->bo->map + buf->offset + box->x; /* using kernel fences only if !buf->mm */ if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) || !buf->mm) return map; if (nouveau_buffer_busy(buf, usage & PIPE_TRANSFER_READ_WRITE)) { if (unlikely(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) { /* Discarding was not possible, must sync because * subsequent transfers might use UNSYNCHRONIZED. */ nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE); } else if (usage & PIPE_TRANSFER_DISCARD_RANGE) { nouveau_transfer_staging(nv, tx, TRUE); map = tx->map; } else if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) { if (usage & PIPE_TRANSFER_DONTBLOCK) map = NULL; else nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE); } else { nouveau_transfer_staging(nv, tx, TRUE); if (tx->map) memcpy(tx->map, map, box->width); map = tx->map; } } if (!map) FREE(tx); return map; }
/* Returns a pointer to a memory area representing a window into the * resource's data. * * This may or may not be the _actual_ memory area of the resource. However * when calling nouveau_buffer_transfer_unmap, if it wasn't the actual memory * area, the contents of the returned map are copied over to the resource. * * The usage indicates what the caller plans to do with the map: * * WRITE means that the user plans to write to it * * READ means that the user plans on reading from it * * DISCARD_WHOLE_RESOURCE means that the whole resource is going to be * potentially overwritten, and even if it isn't, the bits that aren't don't * need to be maintained. * * DISCARD_RANGE means that all the data in the specified range is going to * be overwritten. * * The strategy for determining what kind of memory area to return is complex, * see comments inside of the function. */ static void * nouveau_buffer_transfer_map(struct pipe_context *pipe, struct pipe_resource *resource, unsigned level, unsigned usage, const struct pipe_box *box, struct pipe_transfer **ptransfer) { struct nouveau_context *nv = nouveau_context(pipe); struct nv04_resource *buf = nv04_resource(resource); struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer); uint8_t *map; int ret; if (!tx) return NULL; nouveau_buffer_transfer_init(tx, resource, box, usage); *ptransfer = &tx->base; if (usage & PIPE_TRANSFER_READ) NOUVEAU_DRV_STAT(nv->screen, buf_transfers_rd, 1); if (usage & PIPE_TRANSFER_WRITE) NOUVEAU_DRV_STAT(nv->screen, buf_transfers_wr, 1); /* If we are trying to write to an uninitialized range, the user shouldn't * care what was there before. So we can treat the write as if the target * range were being discarded. Furthermore, since we know that even if this * buffer is busy due to GPU activity, because the contents were * uninitialized, the GPU can't care what was there, and so we can treat * the write as being unsynchronized. */ if ((usage & PIPE_TRANSFER_WRITE) && !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width)) usage |= PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_UNSYNCHRONIZED; if (usage & PIPE_TRANSFER_PERSISTENT) usage |= PIPE_TRANSFER_UNSYNCHRONIZED; if (buf->domain == NOUVEAU_BO_VRAM) { if (usage & NOUVEAU_TRANSFER_DISCARD) { /* Set up a staging area for the user to write to. It will be copied * back into VRAM on unmap. */ if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK; nouveau_transfer_staging(nv, tx, TRUE); } else { if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) { /* The GPU is currently writing to this buffer. Copy its current * contents to a staging area in the GART. This is necessary since * not the whole area being mapped is being discarded. */ if (buf->data) { align_free(buf->data); buf->data = NULL; } nouveau_transfer_staging(nv, tx, FALSE); nouveau_transfer_read(nv, tx); } else { /* The buffer is currently idle. Create a staging area for writes, * and make sure that the cached data is up-to-date. */ if (usage & PIPE_TRANSFER_WRITE) nouveau_transfer_staging(nv, tx, TRUE); if (!buf->data) nouveau_buffer_cache(nv, buf); } } return buf->data ? (buf->data + box->x) : tx->map; } else if (unlikely(buf->domain == 0)) { return buf->data + box->x; } /* At this point, buf->domain == GART */ if (nouveau_buffer_should_discard(buf, usage)) { int ref = buf->base.reference.count - 1; nouveau_buffer_reallocate(nv->screen, buf, buf->domain); if (ref > 0) /* any references inside context possible ? */ nv->invalidate_resource_storage(nv, &buf->base, ref); } /* Note that nouveau_bo_map ends up doing a nouveau_bo_wait with the * relevant flags. If buf->mm is set, that means this resource is part of a * larger slab bo that holds multiple resources. So in that case, don't * wait on the whole slab and instead use the logic below to return a * reasonable buffer for that case. */ ret = nouveau_bo_map(buf->bo, buf->mm ? 0 : nouveau_screen_transfer_flags(usage), nv->client); if (ret) { FREE(tx); return NULL; } map = (uint8_t *)buf->bo->map + buf->offset + box->x; /* using kernel fences only if !buf->mm */ if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) || !buf->mm) return map; /* If the GPU is currently reading/writing this buffer, we shouldn't * interfere with its progress. So instead we either wait for the GPU to * complete its operation, or set up a staging area to perform our work in. */ if (nouveau_buffer_busy(buf, usage & PIPE_TRANSFER_READ_WRITE)) { if (unlikely(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) { /* Discarding was not possible, must sync because * subsequent transfers might use UNSYNCHRONIZED. */ nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE); } else if (usage & PIPE_TRANSFER_DISCARD_RANGE) { /* The whole range is being discarded, so it doesn't matter what was * there before. No need to copy anything over. */ nouveau_transfer_staging(nv, tx, TRUE); map = tx->map; } else if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) { if (usage & PIPE_TRANSFER_DONTBLOCK) map = NULL; else nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE); } else { /* It is expected that the returned buffer be a representation of the * data in question, so we must copy it over from the buffer. */ nouveau_transfer_staging(nv, tx, TRUE); if (tx->map) memcpy(tx->map, map, box->width); map = tx->map; } } if (!map) FREE(tx); return map; }