int main(int argc, char **argv) { struct hash_table *ht; const char *str1 = "test1"; const char *str2 = "test2"; struct hash_entry *entry; ht = _mesa_hash_table_create(NULL, badhash, _mesa_key_string_equal); _mesa_hash_table_insert(ht, str1, NULL); _mesa_hash_table_insert(ht, str2, NULL); entry = _mesa_hash_table_search(ht, str2); assert(strcmp(entry->key, str2) == 0); entry = _mesa_hash_table_search(ht, str1); assert(strcmp(entry->key, str1) == 0); _mesa_hash_table_remove(ht, entry); entry = _mesa_hash_table_search(ht, str1); assert(entry == NULL); entry = _mesa_hash_table_search(ht, str2); assert(strcmp(entry->key, str2) == 0); _mesa_hash_table_destroy(ht, NULL); return 0; }
int main(int argc, char **argv) { struct hash_table *ht; char *str1 = strdup("test1"); char *str2 = strdup("test1"); struct hash_entry *entry; (void) argc; (void) argv; assert(str1 != str2); ht = _mesa_hash_table_create(NULL, _mesa_key_hash_string, _mesa_key_string_equal); _mesa_hash_table_insert(ht, str1, str1); _mesa_hash_table_insert(ht, str2, str2); entry = _mesa_hash_table_search(ht, str1); assert(entry); assert(entry->data == str2); _mesa_hash_table_remove(ht, entry); entry = _mesa_hash_table_search(ht, str1); assert(!entry); _mesa_hash_table_destroy(ht, NULL); free(str1); free(str2); return 0; }
int main() { struct hash_table *ht; struct hash_entry *entry; const uint32_t size = 1000; bool flags[size]; uint32_t i; ht = _mesa_hash_table_create(NULL, key_hash, key_equal); for (i = 0; i < size; ++i) { flags[i] = false; _mesa_hash_table_insert(ht, make_key(i), &flags[i]); } _mesa_hash_table_clear(ht, delete_function); assert(_mesa_hash_table_next_entry(ht, NULL) == NULL); /* Check that delete_function was called and that repopulating the table * works. */ for (i = 0; i < size; ++i) { assert(flags[i]); flags[i] = false; _mesa_hash_table_insert(ht, make_key(i), &flags[i]); } /* Check that exactly the right set of entries is in the table. */ for (i = 0; i < size; ++i) { assert(_mesa_hash_table_search(ht, make_key(i))); } hash_table_foreach(ht, entry) { assert(key_id(entry->key) < size); }
void gp_ir_visitor::insert_phi(ir_phi* ir, unsigned num_sources) { lima_gp_ir_phi_node_t* phi = lima_gp_ir_phi_node_create(num_sources); lima_gp_ir_reg_t* dest = lima_gp_ir_reg_create(this->prog); dest->size = ir->dest->type->vector_elements; phi->dest = dest; _mesa_hash_table_insert(this->var_to_reg, _mesa_hash_pointer(ir->dest), ir->dest, dest); _mesa_hash_table_insert(this->phi_to_phi, _mesa_hash_pointer(ir), ir, phi); ptrset_add(&this->cur_block->phi_nodes, phi); }
/** * Allocates a block of space in the batchbuffer for indirect state. * * We don't want to allocate separate BOs for every bit of indirect * state in the driver. It means overallocating by a significant * margin (4096 bytes, even if the object is just a 20-byte surface * state), and more buffers to walk and count for aperture size checking. * * However, due to the restrictions imposed by the aperture size * checking performance hacks, we can't have the batch point at a * separate indirect state buffer, because once the batch points at * it, no more relocations can be added to it. So, we sneak these * buffers in at the top of the batchbuffer. */ void * brw_state_batch(struct brw_context *brw, int size, int alignment, uint32_t *out_offset) { struct intel_batchbuffer *batch = &brw->batch; uint32_t offset; assert(size < batch->bo->size); offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment); /* If allocating from the top would wrap below the batchbuffer, or * if the batch's used space (plus the reserved pad) collides with our * space, then flush and try again. */ if (batch->state_batch_offset < size || offset < 4 * USED_BATCH(*batch) + batch->reserved_space) { intel_batchbuffer_flush(brw); offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment); } batch->state_batch_offset = offset; if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) { _mesa_hash_table_insert(batch->state_batch_sizes, (void *) (uintptr_t) offset, (void *) (uintptr_t) size); } *out_offset = offset; return batch->map + (offset>>2); }
ir_visitor_status output_read_remover::visit(ir_dereference_variable *ir) { if (ir->var->data.mode != ir_var_shader_out) return visit_continue; if (stage == MESA_SHADER_TESS_CTRL) return visit_continue; hash_entry *entry = _mesa_hash_table_search(replacements, ir->var); ir_variable *temp = entry ? (ir_variable *) entry->data : NULL; /* If we don't have an existing temporary, create one. */ if (temp == NULL) { void *var_ctx = ralloc_parent(ir->var); temp = new(var_ctx) ir_variable(ir->var->type, ir->var->name, ir_var_temporary); _mesa_hash_table_insert(replacements, ir->var, temp); ir->var->insert_after(temp); } /* Update the dereference to use the temporary */ ir->var = temp; return visit_continue; }
int main(int argc, char **argv) { struct hash_table *ht; struct hash_entry *entry; unsigned size = 10000; uint32_t keys[size]; uint32_t i; (void) argc; (void) argv; ht = _mesa_hash_table_create(NULL, key_value, uint32_t_key_equals); for (i = 0; i < size; i++) { keys[i] = i; _mesa_hash_table_insert(ht, keys + i, NULL); } for (i = 0; i < size; i++) { entry = _mesa_hash_table_search(ht, keys + i); assert(entry); assert(key_value(entry->key) == i); } assert(ht->entries == size); _mesa_hash_table_destroy(ht, NULL); return 0; }
link_uniform_block_active * process_block(void *mem_ctx, struct hash_table *ht, ir_variable *var) { const hash_entry *const existing_block = _mesa_hash_table_search(ht, var->get_interface_type()->name); const glsl_type *const block_type = var->is_interface_instance() ? var->type : var->get_interface_type(); /* If a block with this block-name has not previously been seen, add it. * If a block with this block-name has been seen, it must be identical to * the block currently being examined. */ if (existing_block == NULL) { link_uniform_block_active *const b = rzalloc(mem_ctx, struct link_uniform_block_active); b->type = block_type; b->has_instance_name = var->is_interface_instance(); if (var->data.explicit_binding) { b->has_binding = true; b->binding = var->data.binding; } else { b->has_binding = false; b->binding = 0; } _mesa_hash_table_insert(ht, var->get_interface_type()->name, (void *) b); return b; } else {
int main(int argc, char **argv) { struct hash_table *ht; uint32_t hash_str1 = _mesa_hash_string(str1); uint32_t hash_str2 = _mesa_hash_string(str2); ht = _mesa_hash_table_create(NULL, _mesa_key_string_equal); _mesa_hash_table_insert(ht, hash_str1, str1, NULL); _mesa_hash_table_insert(ht, hash_str2, str2, NULL); _mesa_hash_table_destroy(ht, delete_callback); assert(delete_str1 && delete_str2); return 0; }
loop_variable_state * loop_state::insert(ir_loop *ir) { loop_variable_state *ls = new(this->mem_ctx) loop_variable_state; _mesa_hash_table_insert(this->ht, ir, ls); this->loop_found = true; return ls; }
int main(int argc, char **argv) { struct hash_table *ht; (void) argc; (void) argv; ht = _mesa_hash_table_create(NULL, _mesa_key_hash_string, _mesa_key_string_equal); _mesa_hash_table_insert(ht, str1, NULL); _mesa_hash_table_insert(ht, str2, NULL); _mesa_hash_table_destroy(ht, delete_callback); assert(delete_str1 && delete_str2); return 0; }
ir_visitor_status gp_ir_visitor::visit_enter(ir_loop* ir) { _mesa_hash_table_insert(this->loop_beginning_to_block, _mesa_hash_pointer(ir), ir, this->cur_block); lima_gp_ir_block_t* loop_header = lima_gp_ir_block_create(); lima_gp_ir_prog_insert(loop_header, this->cur_block); this->cur_block = loop_header; //we create after_loop and append it after loop_header, but we *do not* set //this->cur_block - any additional blocks in the loop will go in between //loop_header and after_loop lima_gp_ir_block_t* after_loop = lima_gp_ir_block_create(); lima_gp_ir_prog_insert(after_loop, this->cur_block); lima_gp_ir_block_t* old_break_block = this->break_block; lima_gp_ir_block_t* old_continue_block = this->continue_block; this->break_block = after_loop; this->continue_block = loop_header; visit_list_elements(this, &ir->begin_phi_nodes, false); visit_list_elements(this, &ir->body_instructions); _mesa_hash_table_insert(this->loop_end_to_block, _mesa_hash_pointer(ir), ir, this->cur_block); lima_gp_ir_branch_node_t* branch = lima_gp_ir_branch_node_create(lima_gp_ir_op_branch_uncond); branch->dest = loop_header; lima_gp_ir_block_insert_end(this->cur_block, &branch->root_node); this->break_block = old_break_block; this->continue_block = old_continue_block; this->cur_block = after_loop; visit_list_elements(this, &ir->end_phi_nodes, false); return visit_continue_with_parent; }
function *get_function(ir_function_signature *sig) { function *f; hash_entry *entry = _mesa_hash_table_search(this->function_hash, sig); if (entry == NULL) { f = new(mem_ctx) function(sig); _mesa_hash_table_insert(this->function_hash, sig, f); } else { f = (function *) entry->data; } return f; }
static void add_uniform(struct hash_table *ht, struct qreg reg) { struct hash_entry *entry; void *key = (void *)(uintptr_t)reg.index; entry = _mesa_hash_table_search(ht, key); if (entry) { entry->data++; } else { _mesa_hash_table_insert(ht, key, (void *)(uintptr_t)1); } }
loop_variable * loop_variable_state::insert(ir_variable *var) { void *mem_ctx = ralloc_parent(this); loop_variable *lv = rzalloc(mem_ctx, loop_variable); lv->var = var; _mesa_hash_table_insert(this->var_hash, lv->var, lv); this->variables.push_tail(lv); return lv; }
ir_variable_refcount_entry * ir_variable_refcount_visitor::get_variable_entry(ir_variable *var) { assert(var); struct hash_entry *e = _mesa_hash_table_search(this->ht, var); if (e) return (ir_variable_refcount_entry *)e->data; ir_variable_refcount_entry *entry = new ir_variable_refcount_entry(var); assert(entry->referenced_count == 0); _mesa_hash_table_insert(this->ht, var, entry); return entry; }
ir_visitor_status gp_ir_visitor::visit(ir_loop_jump* ir) { _mesa_hash_table_insert(this->loop_jump_to_block, _mesa_hash_pointer(ir), ir, this->cur_block); lima_gp_ir_branch_node_t* branch = lima_gp_ir_branch_node_create(lima_gp_ir_op_branch_uncond); if (ir->mode == ir_loop_jump::jump_break) branch->dest = this->break_block; else branch->dest = this->continue_block; lima_gp_ir_block_insert_end(this->cur_block, &branch->root_node); return visit_continue; }
static struct assignment_entry * get_assignment_entry(ir_variable *var, struct hash_table *ht) { struct hash_entry *hte = _mesa_hash_table_search(ht, var); struct assignment_entry *entry; if (hte) { entry = (struct assignment_entry *) hte->data; } else { entry = (struct assignment_entry *) calloc(1, sizeof(*entry)); entry->var = var; _mesa_hash_table_insert(ht, var, entry); } return entry; }
/* Returns the deref node associated with the given variable. This will be * the root of the tree representing all of the derefs of the given variable. */ static struct deref_node * get_deref_node_for_var(nir_variable *var, struct lower_variables_state *state) { struct deref_node *node; struct hash_entry *var_entry = _mesa_hash_table_search(state->deref_var_nodes, var); if (var_entry) { return var_entry->data; } else { node = deref_node_create(NULL, var->type, state->dead_ctx); _mesa_hash_table_insert(state->deref_var_nodes, var, node); return node; } }
static void register_var_use(nir_variable *var, nir_function_impl *impl, struct hash_table *var_func_table) { if (var->data.mode != nir_var_global) return; struct hash_entry *entry = _mesa_hash_table_search(var_func_table, var); if (entry) { if (entry->data != impl) entry->data = NULL; } else { _mesa_hash_table_insert(var_func_table, var, impl); } }
static void log_error(validate_state *state, const char *cond, const char *file, int line) { const void *obj; if (state->instr) obj = state->instr; else if (state->var) obj = state->var; else obj = cond; char *msg = ralloc_asprintf(state->errors, "error: %s (%s:%d)", cond, file, line); _mesa_hash_table_insert(state->errors, obj, msg); }
static struct partial_update_state * get_partial_update_state(struct hash_table *partial_update_ht, struct qinst *inst) { struct hash_entry *entry = _mesa_hash_table_search(partial_update_ht, &inst->dst.index); if (entry) return entry->data; struct partial_update_state *state = rzalloc(partial_update_ht, struct partial_update_state); _mesa_hash_table_insert(partial_update_ht, &inst->dst.index, state); return state; }
int main(int argc, char **argv) { struct hash_table *ht; struct hash_entry *entry; int size = 10000; uint32_t keys[size]; uint32_t i; ht = _mesa_hash_table_create(NULL, key_value, uint32_t_key_equals); for (i = 0; i < size; i++) { keys[i] = i; _mesa_hash_table_insert(ht, keys + i, NULL); if (i >= 100) { uint32_t delete_value = i - 100; entry = _mesa_hash_table_search(ht, &delete_value); _mesa_hash_table_remove(ht, entry); } } /* Make sure that all our entries were present at the end. */ for (i = size - 100; i < size; i++) { entry = _mesa_hash_table_search(ht, keys + i); assert(entry); assert(key_value(entry->key) == i); } /* Make sure that no extra entries got in */ for (entry = _mesa_hash_table_next_entry(ht, NULL); entry != NULL; entry = _mesa_hash_table_next_entry(ht, entry)) { assert(key_value(entry->key) >= size - 100 && key_value(entry->key) < size); } assert(ht->entries == 100); _mesa_hash_table_destroy(ht, NULL); return 0; }
int main(int argc, char **argv) { struct hash_table *ht; struct hash_entry *entry; uint32_t keys[SIZE]; uint32_t i, random_value; (void) argc; (void) argv; ht = _mesa_hash_table_create(NULL, key_value, uint32_t_key_equals); for (i = 0; i < SIZE; i++) { keys[i] = i; _mesa_hash_table_insert(ht, keys + i, NULL); } /* Test the no-predicate case. */ entry = _mesa_hash_table_random_entry(ht, NULL); assert(entry); /* Check that we're getting different entries and that the predicate * works. */ for (i = 0; i < 100; i++) { entry = _mesa_hash_table_random_entry(ht, uint32_t_key_is_even); assert(entry); assert((key_value(entry->key) & 1) == 0); if (i == 0 || key_value(entry->key) != random_value) break; random_value = key_value(entry->key); } assert(i != 100); _mesa_hash_table_destroy(ht, NULL); return 0; }
static void validate_ssa_def(nir_ssa_def *def, validate_state *state) { assert(def->index < state->impl->ssa_alloc); assert(!BITSET_TEST(state->ssa_defs_found, def->index)); BITSET_SET(state->ssa_defs_found, def->index); assert(def->parent_instr == state->instr); assert(def->num_components <= 4); list_validate(&def->uses); list_validate(&def->if_uses); ssa_def_validate_state *def_state = ralloc(state->ssa_defs, ssa_def_validate_state); def_state->where_defined = state->impl; def_state->uses = _mesa_set_create(def_state, _mesa_hash_pointer, _mesa_key_pointer_equal); def_state->if_uses = _mesa_set_create(def_state, _mesa_hash_pointer, _mesa_key_pointer_equal); _mesa_hash_table_insert(state->ssa_defs, def, def_state); }
static struct wsi_x11_connection * wsi_x11_get_connection(struct wsi_device *wsi_dev, const VkAllocationCallbacks *alloc, xcb_connection_t *conn) { struct wsi_x11 *wsi = (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB]; pthread_mutex_lock(&wsi->mutex); struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn); if (!entry) { /* We're about to make a bunch of blocking calls. Let's drop the * mutex for now so we don't block up too badly. */ pthread_mutex_unlock(&wsi->mutex); struct wsi_x11_connection *wsi_conn = wsi_x11_connection_create(alloc, conn); if (!wsi_conn) return NULL; pthread_mutex_lock(&wsi->mutex); entry = _mesa_hash_table_search(wsi->connections, conn); if (entry) { /* Oops, someone raced us to it */ wsi_x11_connection_destroy(alloc, wsi_conn); } else { entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn); } } pthread_mutex_unlock(&wsi->mutex); return entry->data; }
/** * Determines if the given phi node should be lowered. The only phi nodes * we will scalarize at the moment are those where all of the sources are * scalarizable. * * The reason for this comes down to coalescing. Since phi sources can't * swizzle, swizzles on phis have to be resolved by inserting a mov right * before the phi. The choice then becomes between movs to pick off * components for a scalar phi or potentially movs to recombine components * for a vector phi. The problem is that the movs generated to pick off * the components are almost uncoalescable. We can't coalesce them in NIR * because we need them to pick off components and we can't coalesce them * in the backend because the source register is a vector and the * destination is a scalar that may be used at other places in the program. * On the other hand, if we have a bunch of scalars going into a vector * phi, the situation is much better. In this case, if the SSA def is * generated in the predecessor block to the corresponding phi source, the * backend code will be an ALU op into a temporary and then a mov into the * given vector component; this move can almost certainly be coalesced * away. */ static bool should_lower_phi(nir_phi_instr *phi, struct lower_phis_to_scalar_state *state) { /* Already scalar */ if (phi->dest.ssa.num_components == 1) return false; struct hash_entry *entry = _mesa_hash_table_search(state->phi_table, phi); if (entry) return entry->data != NULL; /* Insert an entry and mark it as scalarizable for now. That way * we don't recurse forever and a cycle in the dependence graph * won't automatically make us fail to scalarize. */ entry = _mesa_hash_table_insert(state->phi_table, phi, (void *)(intptr_t)1); bool scalarizable = true; nir_foreach_phi_src(phi, src) { scalarizable = is_phi_src_scalarizable(src, state); if (!scalarizable) break; }
int iris_bo_busy(struct iris_bo *bo) { struct iris_bufmgr *bufmgr = bo->bufmgr; struct drm_i915_gem_busy busy = { .handle = bo->gem_handle }; int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); if (ret == 0) { bo->idle = !busy.busy; return busy.busy; } return false; } int iris_bo_madvise(struct iris_bo *bo, int state) { struct drm_i915_gem_madvise madv = { .handle = bo->gem_handle, .madv = state, .retained = 1, }; drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv); return madv.retained; } /* drop the oldest entries that have been purged by the kernel */ static void iris_bo_cache_purge_bucket(struct iris_bufmgr *bufmgr, struct bo_cache_bucket *bucket) { list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) { if (iris_bo_madvise(bo, I915_MADV_DONTNEED)) break; list_del(&bo->head); bo_free(bo); } } static struct iris_bo * bo_calloc(void) { struct iris_bo *bo = calloc(1, sizeof(*bo)); if (bo) { bo->hash = _mesa_hash_pointer(bo); } return bo; } static struct iris_bo * bo_alloc_internal(struct iris_bufmgr *bufmgr, const char *name, uint64_t size, enum iris_memory_zone memzone, unsigned flags, uint32_t tiling_mode, uint32_t stride) { struct iris_bo *bo; unsigned int page_size = getpagesize(); int ret; struct bo_cache_bucket *bucket; bool alloc_from_cache; uint64_t bo_size; bool zeroed = false; if (flags & BO_ALLOC_ZEROED) zeroed = true; if ((flags & BO_ALLOC_COHERENT) && !bufmgr->has_llc) { bo_size = MAX2(ALIGN(size, page_size), page_size); bucket = NULL; goto skip_cache; } /* Round the allocated size up to a power of two number of pages. */ bucket = bucket_for_size(bufmgr, size); /* If we don't have caching at this size, don't actually round the * allocation up. */ if (bucket == NULL) { bo_size = MAX2(ALIGN(size, page_size), page_size); } else { bo_size = bucket->size; } mtx_lock(&bufmgr->lock); /* Get a buffer out of the cache if available */ retry: alloc_from_cache = false; if (bucket != NULL && !list_empty(&bucket->head)) { /* If the last BO in the cache is idle, then reuse it. Otherwise, * allocate a fresh buffer to avoid stalling. */ bo = LIST_ENTRY(struct iris_bo, bucket->head.next, head); if (!iris_bo_busy(bo)) { alloc_from_cache = true; list_del(&bo->head); } if (alloc_from_cache) { if (!iris_bo_madvise(bo, I915_MADV_WILLNEED)) { bo_free(bo); iris_bo_cache_purge_bucket(bufmgr, bucket); goto retry; } if (bo_set_tiling_internal(bo, tiling_mode, stride)) { bo_free(bo); goto retry; } if (zeroed) { void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW); if (!map) { bo_free(bo); goto retry; } memset(map, 0, bo_size); } } } if (alloc_from_cache) { /* If the cached BO isn't in the right memory zone, free the old * memory and assign it a new address. */ if (memzone != iris_memzone_for_address(bo->gtt_offset)) { vma_free(bufmgr, bo->gtt_offset, bo->size); bo->gtt_offset = 0ull; } } else { skip_cache: bo = bo_calloc(); if (!bo) goto err; bo->size = bo_size; bo->idle = true; struct drm_i915_gem_create create = { .size = bo_size }; /* All new BOs we get from the kernel are zeroed, so we don't need to * worry about that here. */ ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create); if (ret != 0) { free(bo); goto err; } bo->gem_handle = create.handle; bo->bufmgr = bufmgr; bo->tiling_mode = I915_TILING_NONE; bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; bo->stride = 0; if (bo_set_tiling_internal(bo, tiling_mode, stride)) goto err_free; /* Calling set_domain() will allocate pages for the BO outside of the * struct mutex lock in the kernel, which is more efficient than waiting * to create them during the first execbuf that uses the BO. */ struct drm_i915_gem_set_domain sd = { .handle = bo->gem_handle, .read_domains = I915_GEM_DOMAIN_CPU, .write_domain = 0, }; if (drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0) goto err_free; } bo->name = name; p_atomic_set(&bo->refcount, 1); bo->reusable = bucket && bufmgr->bo_reuse; bo->cache_coherent = bufmgr->has_llc; bo->index = -1; bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED; /* By default, capture all driver-internal buffers like shader kernels, * surface states, dynamic states, border colors, and so on. */ if (memzone < IRIS_MEMZONE_OTHER) bo->kflags |= EXEC_OBJECT_CAPTURE; if (bo->gtt_offset == 0ull) { bo->gtt_offset = vma_alloc(bufmgr, memzone, bo->size, 1); if (bo->gtt_offset == 0ull) goto err_free; } mtx_unlock(&bufmgr->lock); if ((flags & BO_ALLOC_COHERENT) && !bo->cache_coherent) { struct drm_i915_gem_caching arg = { .handle = bo->gem_handle, .caching = 1, }; if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &arg) == 0) { bo->cache_coherent = true; bo->reusable = false; } } DBG("bo_create: buf %d (%s) (%s memzone) %llub\n", bo->gem_handle, bo->name, memzone_name(memzone), (unsigned long long) size); return bo; err_free: bo_free(bo); err: mtx_unlock(&bufmgr->lock); return NULL; } struct iris_bo * iris_bo_alloc(struct iris_bufmgr *bufmgr, const char *name, uint64_t size, enum iris_memory_zone memzone) { return bo_alloc_internal(bufmgr, name, size, memzone, 0, I915_TILING_NONE, 0); } struct iris_bo * iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr, const char *name, uint64_t size, enum iris_memory_zone memzone, uint32_t tiling_mode, uint32_t pitch, unsigned flags) { return bo_alloc_internal(bufmgr, name, size, memzone, flags, tiling_mode, pitch); } struct iris_bo * iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name, void *ptr, size_t size, enum iris_memory_zone memzone) { struct iris_bo *bo; bo = bo_calloc(); if (!bo) return NULL; struct drm_i915_gem_userptr arg = { .user_ptr = (uintptr_t)ptr, .user_size = size, }; if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_USERPTR, &arg)) goto err_free; bo->gem_handle = arg.handle; /* Check the buffer for validity before we try and use it in a batch */ struct drm_i915_gem_set_domain sd = { .handle = bo->gem_handle, .read_domains = I915_GEM_DOMAIN_CPU, }; if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd)) goto err_close; bo->name = name; bo->size = size; bo->map_cpu = ptr; bo->bufmgr = bufmgr; bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED; bo->gtt_offset = vma_alloc(bufmgr, memzone, size, 1); if (bo->gtt_offset == 0ull) goto err_close; p_atomic_set(&bo->refcount, 1); bo->userptr = true; bo->cache_coherent = true; bo->index = -1; bo->idle = true; return bo; err_close: drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &bo->gem_handle); err_free: free(bo); return NULL; } /** * Returns a iris_bo wrapping the given buffer object handle. * * This can be used when one application needs to pass a buffer object * to another. */ struct iris_bo * iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr, const char *name, unsigned int handle) { struct iris_bo *bo; /* At the moment most applications only have a few named bo. * For instance, in a DRI client only the render buffers passed * between X and the client are named. And since X returns the * alternating names for the front/back buffer a linear search * provides a sufficiently fast match. */ mtx_lock(&bufmgr->lock); bo = hash_find_bo(bufmgr->name_table, handle); if (bo) { iris_bo_reference(bo); goto out; } struct drm_gem_open open_arg = { .name = handle }; int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg); if (ret != 0) { DBG("Couldn't reference %s handle 0x%08x: %s\n", name, handle, strerror(errno)); bo = NULL; goto out; } /* Now see if someone has used a prime handle to get this * object from the kernel before by looking through the list * again for a matching gem_handle */ bo = hash_find_bo(bufmgr->handle_table, open_arg.handle); if (bo) { iris_bo_reference(bo); goto out; } bo = bo_calloc(); if (!bo) goto out; p_atomic_set(&bo->refcount, 1); bo->size = open_arg.size; bo->gtt_offset = 0; bo->bufmgr = bufmgr; bo->gem_handle = open_arg.handle; bo->name = name; bo->global_name = handle; bo->reusable = false; bo->external = true; bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED; bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1); _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo); _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo); struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle }; ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling); if (ret != 0) goto err_unref; bo->tiling_mode = get_tiling.tiling_mode; bo->swizzle_mode = get_tiling.swizzle_mode; /* XXX stride is unknown */ DBG("bo_create_from_handle: %d (%s)\n", handle, bo->name); out: mtx_unlock(&bufmgr->lock); return bo; err_unref: bo_free(bo); mtx_unlock(&bufmgr->lock); return NULL; } static void bo_free(struct iris_bo *bo) { struct iris_bufmgr *bufmgr = bo->bufmgr; if (bo->map_cpu && !bo->userptr) { VG_NOACCESS(bo->map_cpu, bo->size); munmap(bo->map_cpu, bo->size); } if (bo->map_wc) { VG_NOACCESS(bo->map_wc, bo->size); munmap(bo->map_wc, bo->size); } if (bo->map_gtt) { VG_NOACCESS(bo->map_gtt, bo->size); munmap(bo->map_gtt, bo->size); } if (bo->external) { struct hash_entry *entry; if (bo->global_name) { entry = _mesa_hash_table_search(bufmgr->name_table, &bo->global_name); _mesa_hash_table_remove(bufmgr->name_table, entry); } entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle); _mesa_hash_table_remove(bufmgr->handle_table, entry); } /* Close this object */ struct drm_gem_close close = { .handle = bo->gem_handle }; int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close); if (ret != 0) { DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n", bo->gem_handle, bo->name, strerror(errno)); } vma_free(bo->bufmgr, bo->gtt_offset, bo->size); free(bo); } /** Frees all cached buffers significantly older than @time. */ static void cleanup_bo_cache(struct iris_bufmgr *bufmgr, time_t time) { int i; if (bufmgr->time == time) return; for (i = 0; i < bufmgr->num_buckets; i++) { struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i]; list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) { if (time - bo->free_time <= 1) break; list_del(&bo->head); bo_free(bo); } } bufmgr->time = time; } static void bo_unreference_final(struct iris_bo *bo, time_t time) { struct iris_bufmgr *bufmgr = bo->bufmgr; struct bo_cache_bucket *bucket; DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name); bucket = NULL; if (bo->reusable) bucket = bucket_for_size(bufmgr, bo->size); /* Put the buffer into our internal cache for reuse if we can. */ if (bucket && iris_bo_madvise(bo, I915_MADV_DONTNEED)) { bo->free_time = time; bo->name = NULL; list_addtail(&bo->head, &bucket->head); } else { bo_free(bo); } } void iris_bo_unreference(struct iris_bo *bo) { if (bo == NULL) return; assert(p_atomic_read(&bo->refcount) > 0); if (atomic_add_unless(&bo->refcount, -1, 1)) { struct iris_bufmgr *bufmgr = bo->bufmgr; struct timespec time; clock_gettime(CLOCK_MONOTONIC, &time); mtx_lock(&bufmgr->lock); if (p_atomic_dec_zero(&bo->refcount)) { bo_unreference_final(bo, time.tv_sec); cleanup_bo_cache(bufmgr, time.tv_sec); } mtx_unlock(&bufmgr->lock); } } static void bo_wait_with_stall_warning(struct pipe_debug_callback *dbg, struct iris_bo *bo, const char *action) { bool busy = dbg && !bo->idle; double elapsed = unlikely(busy) ? -get_time() : 0.0; iris_bo_wait_rendering(bo); if (unlikely(busy)) { elapsed += get_time(); if (elapsed > 1e-5) /* 0.01ms */ { perf_debug(dbg, "%s a busy \"%s\" BO stalled and took %.03f ms.\n", action, bo->name, elapsed * 1000); } } } static void print_flags(unsigned flags) { if (flags & MAP_READ) DBG("READ "); if (flags & MAP_WRITE) DBG("WRITE "); if (flags & MAP_ASYNC) DBG("ASYNC "); if (flags & MAP_PERSISTENT) DBG("PERSISTENT "); if (flags & MAP_COHERENT) DBG("COHERENT "); if (flags & MAP_RAW) DBG("RAW "); DBG("\n"); } static void * iris_bo_map_cpu(struct pipe_debug_callback *dbg, struct iris_bo *bo, unsigned flags) { struct iris_bufmgr *bufmgr = bo->bufmgr; /* We disallow CPU maps for writing to non-coherent buffers, as the * CPU map can become invalidated when a batch is flushed out, which * can happen at unpredictable times. You should use WC maps instead. */ assert(bo->cache_coherent || !(flags & MAP_WRITE)); if (!bo->map_cpu) { DBG("iris_bo_map_cpu: %d (%s)\n", bo->gem_handle, bo->name); struct drm_i915_gem_mmap mmap_arg = { .handle = bo->gem_handle, .size = bo->size, }; int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg); if (ret != 0) { DBG("%s:%d: Error mapping buffer %d (%s): %s .\n", __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno)); return NULL; } void *map = (void *) (uintptr_t) mmap_arg.addr_ptr; VG_DEFINED(map, bo->size); if (p_atomic_cmpxchg(&bo->map_cpu, NULL, map)) { VG_NOACCESS(map, bo->size); munmap(map, bo->size); } } assert(bo->map_cpu); DBG("iris_bo_map_cpu: %d (%s) -> %p, ", bo->gem_handle, bo->name, bo->map_cpu); print_flags(flags); if (!(flags & MAP_ASYNC)) { bo_wait_with_stall_warning(dbg, bo, "CPU mapping"); } if (!bo->cache_coherent && !bo->bufmgr->has_llc) { /* If we're reusing an existing CPU mapping, the CPU caches may * contain stale data from the last time we read from that mapping. * (With the BO cache, it might even be data from a previous buffer!) * Even if it's a brand new mapping, the kernel may have zeroed the * buffer via CPU writes. * * We need to invalidate those cachelines so that we see the latest * contents, and so long as we only read from the CPU mmap we do not * need to write those cachelines back afterwards. * * On LLC, the emprical evidence suggests that writes from the GPU * that bypass the LLC (i.e. for scanout) do *invalidate* the CPU * cachelines. (Other reads, such as the display engine, bypass the * LLC entirely requiring us to keep dirty pixels for the scanout * out of any cache.) */ gen_invalidate_range(bo->map_cpu, bo->size); } return bo->map_cpu; } static void * iris_bo_map_wc(struct pipe_debug_callback *dbg, struct iris_bo *bo, unsigned flags) { struct iris_bufmgr *bufmgr = bo->bufmgr; if (!bo->map_wc) { DBG("iris_bo_map_wc: %d (%s)\n", bo->gem_handle, bo->name); struct drm_i915_gem_mmap mmap_arg = { .handle = bo->gem_handle, .size = bo->size, .flags = I915_MMAP_WC, }; int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg); if (ret != 0) { DBG("%s:%d: Error mapping buffer %d (%s): %s .\n", __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno)); return NULL; } void *map = (void *) (uintptr_t) mmap_arg.addr_ptr; VG_DEFINED(map, bo->size); if (p_atomic_cmpxchg(&bo->map_wc, NULL, map)) { VG_NOACCESS(map, bo->size); munmap(map, bo->size); } } assert(bo->map_wc); DBG("iris_bo_map_wc: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->map_wc); print_flags(flags); if (!(flags & MAP_ASYNC)) { bo_wait_with_stall_warning(dbg, bo, "WC mapping"); } return bo->map_wc; } /** * Perform an uncached mapping via the GTT. * * Write access through the GTT is not quite fully coherent. On low power * systems especially, like modern Atoms, we can observe reads from RAM before * the write via GTT has landed. A write memory barrier that flushes the Write * Combining Buffer (i.e. sfence/mfence) is not sufficient to order the later * read after the write as the GTT write suffers a small delay through the GTT * indirection. The kernel uses an uncached mmio read to ensure the GTT write * is ordered with reads (either by the GPU, WB or WC) and unconditionally * flushes prior to execbuf submission. However, if we are not informing the * kernel about our GTT writes, it will not flush before earlier access, such * as when using the cmdparser. Similarly, we need to be careful if we should * ever issue a CPU read immediately following a GTT write. * * Telling the kernel about write access also has one more important * side-effect. Upon receiving notification about the write, it cancels any * scanout buffering for FBC/PSR and friends. Later FBC/PSR is then flushed by * either SW_FINISH or DIRTYFB. The presumption is that we never write to the * actual scanout via a mmaping, only to a backbuffer and so all the FBC/PSR * tracking is handled on the buffer exchange instead. */ static void * iris_bo_map_gtt(struct pipe_debug_callback *dbg, struct iris_bo *bo, unsigned flags) { struct iris_bufmgr *bufmgr = bo->bufmgr; /* Get a mapping of the buffer if we haven't before. */ if (bo->map_gtt == NULL) { DBG("bo_map_gtt: mmap %d (%s)\n", bo->gem_handle, bo->name); struct drm_i915_gem_mmap_gtt mmap_arg = { .handle = bo->gem_handle }; /* Get the fake offset back... */ int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg); if (ret != 0) { DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n", __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno)); return NULL; } /* and mmap it. */ void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED, bufmgr->fd, mmap_arg.offset); if (map == MAP_FAILED) { DBG("%s:%d: Error mapping buffer %d (%s): %s .\n", __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno)); return NULL; } /* We don't need to use VALGRIND_MALLOCLIKE_BLOCK because Valgrind will * already intercept this mmap call. However, for consistency between * all the mmap paths, we mark the pointer as defined now and mark it * as inaccessible afterwards. */ VG_DEFINED(map, bo->size); if (p_atomic_cmpxchg(&bo->map_gtt, NULL, map)) { VG_NOACCESS(map, bo->size); munmap(map, bo->size); } } assert(bo->map_gtt); DBG("bo_map_gtt: %d (%s) -> %p, ", bo->gem_handle, bo->name, bo->map_gtt); print_flags(flags); if (!(flags & MAP_ASYNC)) { bo_wait_with_stall_warning(dbg, bo, "GTT mapping"); } return bo->map_gtt; } static bool can_map_cpu(struct iris_bo *bo, unsigned flags) { if (bo->cache_coherent) return true; /* Even if the buffer itself is not cache-coherent (such as a scanout), on * an LLC platform reads always are coherent (as they are performed via the * central system agent). It is just the writes that we need to take special * care to ensure that land in main memory and not stick in the CPU cache. */ if (!(flags & MAP_WRITE) && bo->bufmgr->has_llc) return true; /* If PERSISTENT or COHERENT are set, the mmapping needs to remain valid * across batch flushes where the kernel will change cache domains of the * bo, invalidating continued access to the CPU mmap on non-LLC device. * * Similarly, ASYNC typically means that the buffer will be accessed via * both the CPU and the GPU simultaneously. Batches may be executed that * use the BO even while it is mapped. While OpenGL technically disallows * most drawing while non-persistent mappings are active, we may still use * the GPU for blits or other operations, causing batches to happen at * inconvenient times. * * If RAW is set, we expect the caller to be able to handle a WC buffer * more efficiently than the involuntary clflushes. */ if (flags & (MAP_PERSISTENT | MAP_COHERENT | MAP_ASYNC | MAP_RAW)) return false; return !(flags & MAP_WRITE); } void * iris_bo_map(struct pipe_debug_callback *dbg, struct iris_bo *bo, unsigned flags) { if (bo->tiling_mode != I915_TILING_NONE && !(flags & MAP_RAW)) return iris_bo_map_gtt(dbg, bo, flags); void *map; if (can_map_cpu(bo, flags)) map = iris_bo_map_cpu(dbg, bo, flags); else map = iris_bo_map_wc(dbg, bo, flags); /* Allow the attempt to fail by falling back to the GTT where necessary. * * Not every buffer can be mmaped directly using the CPU (or WC), for * example buffers that wrap stolen memory or are imported from other * devices. For those, we have little choice but to use a GTT mmapping. * However, if we use a slow GTT mmapping for reads where we expected fast * access, that order of magnitude difference in throughput will be clearly * expressed by angry users. * * We skip MAP_RAW because we want to avoid map_gtt's fence detiling. */ if (!map && !(flags & MAP_RAW)) { perf_debug(dbg, "Fallback GTT mapping for %s with access flags %x\n", bo->name, flags); map = iris_bo_map_gtt(dbg, bo, flags); } return map; } /** Waits for all GPU rendering with the object to have completed. */ void iris_bo_wait_rendering(struct iris_bo *bo) { /* We require a kernel recent enough for WAIT_IOCTL support. * See intel_init_bufmgr() */ iris_bo_wait(bo, -1); } /** * Waits on a BO for the given amount of time. * * @bo: buffer object to wait for * @timeout_ns: amount of time to wait in nanoseconds. * If value is less than 0, an infinite wait will occur. * * Returns 0 if the wait was successful ie. the last batch referencing the * object has completed within the allotted time. Otherwise some negative return * value describes the error. Of particular interest is -ETIME when the wait has * failed to yield the desired result. * * Similar to iris_bo_wait_rendering except a timeout parameter allows * the operation to give up after a certain amount of time. Another subtle * difference is the internal locking semantics are different (this variant does * not hold the lock for the duration of the wait). This makes the wait subject * to a larger userspace race window. * * The implementation shall wait until the object is no longer actively * referenced within a batch buffer at the time of the call. The wait will * not guarantee that the buffer is re-issued via another thread, or an flinked * handle. Userspace must make sure this race does not occur if such precision * is important. * * Note that some kernels have broken the inifite wait for negative values * promise, upgrade to latest stable kernels if this is the case. */ int iris_bo_wait(struct iris_bo *bo, int64_t timeout_ns) { struct iris_bufmgr *bufmgr = bo->bufmgr; /* If we know it's idle, don't bother with the kernel round trip */ if (bo->idle && !bo->external) return 0; struct drm_i915_gem_wait wait = { .bo_handle = bo->gem_handle, .timeout_ns = timeout_ns, }; int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait); if (ret != 0) return -errno; bo->idle = true; return ret; } void iris_bufmgr_destroy(struct iris_bufmgr *bufmgr) { mtx_destroy(&bufmgr->lock); /* Free any cached buffer objects we were going to reuse */ for (int i = 0; i < bufmgr->num_buckets; i++) { struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i]; list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) { list_del(&bo->head); bo_free(bo); } } _mesa_hash_table_destroy(bufmgr->name_table, NULL); _mesa_hash_table_destroy(bufmgr->handle_table, NULL); for (int z = 0; z < IRIS_MEMZONE_COUNT; z++) { if (z != IRIS_MEMZONE_BINDER) util_vma_heap_finish(&bufmgr->vma_allocator[z]); } free(bufmgr); } static int bo_set_tiling_internal(struct iris_bo *bo, uint32_t tiling_mode, uint32_t stride) { struct iris_bufmgr *bufmgr = bo->bufmgr; struct drm_i915_gem_set_tiling set_tiling; int ret; if (bo->global_name == 0 && tiling_mode == bo->tiling_mode && stride == bo->stride) return 0; memset(&set_tiling, 0, sizeof(set_tiling)); do { /* set_tiling is slightly broken and overwrites the * input on the error path, so we have to open code * drm_ioctl. */ set_tiling.handle = bo->gem_handle; set_tiling.tiling_mode = tiling_mode; set_tiling.stride = stride; ret = ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling); } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); if (ret == -1) return -errno; bo->tiling_mode = set_tiling.tiling_mode; bo->swizzle_mode = set_tiling.swizzle_mode; bo->stride = set_tiling.stride; return 0; } int iris_bo_get_tiling(struct iris_bo *bo, uint32_t *tiling_mode, uint32_t *swizzle_mode) { *tiling_mode = bo->tiling_mode; *swizzle_mode = bo->swizzle_mode; return 0; } struct iris_bo * iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd) { uint32_t handle; struct iris_bo *bo; mtx_lock(&bufmgr->lock); int ret = drmPrimeFDToHandle(bufmgr->fd, prime_fd, &handle); if (ret) { DBG("import_dmabuf: failed to obtain handle from fd: %s\n", strerror(errno)); mtx_unlock(&bufmgr->lock); return NULL; } /* * See if the kernel has already returned this buffer to us. Just as * for named buffers, we must not create two bo's pointing at the same * kernel object */ bo = hash_find_bo(bufmgr->handle_table, handle); if (bo) { iris_bo_reference(bo); goto out; } bo = bo_calloc(); if (!bo) goto out; p_atomic_set(&bo->refcount, 1); /* Determine size of bo. The fd-to-handle ioctl really should * return the size, but it doesn't. If we have kernel 3.12 or * later, we can lseek on the prime fd to get the size. Older * kernels will just fail, in which case we fall back to the * provided (estimated or guess size). */ ret = lseek(prime_fd, 0, SEEK_END); if (ret != -1) bo->size = ret; bo->bufmgr = bufmgr; bo->gem_handle = handle; _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo); bo->name = "prime"; bo->reusable = false; bo->external = true; bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED; bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1); struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle }; if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) goto err; bo->tiling_mode = get_tiling.tiling_mode; bo->swizzle_mode = get_tiling.swizzle_mode; /* XXX stride is unknown */ out: mtx_unlock(&bufmgr->lock); return bo; err: bo_free(bo); mtx_unlock(&bufmgr->lock); return NULL; } static void iris_bo_make_external_locked(struct iris_bo *bo) { if (!bo->external) { _mesa_hash_table_insert(bo->bufmgr->handle_table, &bo->gem_handle, bo); bo->external = true; } } static void iris_bo_make_external(struct iris_bo *bo) { struct iris_bufmgr *bufmgr = bo->bufmgr; if (bo->external) return; mtx_lock(&bufmgr->lock); iris_bo_make_external_locked(bo); mtx_unlock(&bufmgr->lock); } int iris_bo_export_dmabuf(struct iris_bo *bo, int *prime_fd) { struct iris_bufmgr *bufmgr = bo->bufmgr; iris_bo_make_external(bo); if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle, DRM_CLOEXEC, prime_fd) != 0) return -errno; bo->reusable = false; return 0; } uint32_t iris_bo_export_gem_handle(struct iris_bo *bo) { iris_bo_make_external(bo); return bo->gem_handle; } int iris_bo_flink(struct iris_bo *bo, uint32_t *name) { struct iris_bufmgr *bufmgr = bo->bufmgr; if (!bo->global_name) { struct drm_gem_flink flink = { .handle = bo->gem_handle }; if (drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink)) return -errno; mtx_lock(&bufmgr->lock); if (!bo->global_name) { iris_bo_make_external_locked(bo); bo->global_name = flink.name; _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo); } mtx_unlock(&bufmgr->lock); bo->reusable = false; } *name = bo->global_name; return 0; } static void add_bucket(struct iris_bufmgr *bufmgr, int size) { unsigned int i = bufmgr->num_buckets; assert(i < ARRAY_SIZE(bufmgr->cache_bucket)); list_inithead(&bufmgr->cache_bucket[i].head); bufmgr->cache_bucket[i].size = size; bufmgr->num_buckets++; assert(bucket_for_size(bufmgr, size) == &bufmgr->cache_bucket[i]); assert(bucket_for_size(bufmgr, size - 2048) == &bufmgr->cache_bucket[i]); assert(bucket_for_size(bufmgr, size + 1) != &bufmgr->cache_bucket[i]); } static void init_cache_buckets(struct iris_bufmgr *bufmgr) { uint64_t size, cache_max_size = 64 * 1024 * 1024; /* OK, so power of two buckets was too wasteful of memory. * Give 3 other sizes between each power of two, to hopefully * cover things accurately enough. (The alternative is * probably to just go for exact matching of sizes, and assume * that for things like composited window resize the tiled * width/height alignment and rounding of sizes to pages will * get us useful cache hit rates anyway) */ add_bucket(bufmgr, PAGE_SIZE); add_bucket(bufmgr, PAGE_SIZE * 2); add_bucket(bufmgr, PAGE_SIZE * 3); /* Initialize the linked lists for BO reuse cache. */ for (size = 4 * PAGE_SIZE; size <= cache_max_size; size *= 2) { add_bucket(bufmgr, size); add_bucket(bufmgr, size + size * 1 / 4); add_bucket(bufmgr, size + size * 2 / 4); add_bucket(bufmgr, size + size * 3 / 4); } }
static void write_add_object(write_ctx *ctx, const void *obj) { uintptr_t index = ctx->next_idx++; _mesa_hash_table_insert(ctx->remap_table, obj, (void *) index); }
static void add_remap(clone_state *state, void *nptr, const void *ptr) { _mesa_hash_table_insert(state->remap_table, ptr, nptr); }