static void unlink_from_assembler_name_hash (symtab_node node) { if (assembler_name_hash) { if (node->symbol.next_sharing_asm_name) node->symbol.next_sharing_asm_name->symbol.previous_sharing_asm_name = node->symbol.previous_sharing_asm_name; if (node->symbol.previous_sharing_asm_name) { node->symbol.previous_sharing_asm_name->symbol.next_sharing_asm_name = node->symbol.next_sharing_asm_name; } else { tree name = DECL_ASSEMBLER_NAME (node->symbol.decl); void **slot; slot = htab_find_slot_with_hash (assembler_name_hash, name, decl_assembler_name_hash (name), NO_INSERT); gcc_assert (*slot == node); if (!node->symbol.next_sharing_asm_name) htab_clear_slot (assembler_name_hash, slot); else *slot = node->symbol.next_sharing_asm_name; } } }
bool journal_delete_entry(journal_t journal, journal_entry entry) { void **slot; CHECK_MUTEX_LOCKED(journal->mutex); slot = htab_find_slot_with_hash(journal->htab, entry, JOURNAL_HASH(entry), NO_INSERT); if (!slot) return false; if (entry->next) entry->next->prev = entry->prev; else journal->last = entry->prev; if (entry->prev) entry->prev->next = entry->next; else journal->first = entry->next; free(entry->name.str); zfsd_mutex_lock(&journal_mutex); pool_free(journal_pool, entry); zfsd_mutex_unlock(&journal_mutex); htab_clear_slot(journal->htab, slot); return true; }
static void gomp_task_run_post_handle_depend_hash (struct gomp_task *child_task) { struct gomp_task *parent = child_task->parent; size_t i; for (i = 0; i < child_task->depend_count; i++) if (!child_task->depend[i].redundant) { if (child_task->depend[i].next) child_task->depend[i].next->prev = child_task->depend[i].prev; if (child_task->depend[i].prev) child_task->depend[i].prev->next = child_task->depend[i].next; else { hash_entry_type *slot = htab_find_slot (&parent->depend_hash, &child_task->depend[i], NO_INSERT); if (*slot != &child_task->depend[i]) abort (); if (child_task->depend[i].next) *slot = child_task->depend[i].next; else htab_clear_slot (parent->depend_hash, slot); } } }
bool journal_delete(journal_t journal, journal_operation_t oper, string * name) { struct journal_entry_def entry; journal_entry del; void **slot; CHECK_MUTEX_LOCKED(journal->mutex); entry.oper = oper; entry.name = *name; slot = htab_find_slot_with_hash(journal->htab, &entry, JOURNAL_HASH(&entry), NO_INSERT); if (!slot) return false; del = (journal_entry) * slot; if (del->next) del->next->prev = del->prev; else journal->last = del->prev; if (del->prev) del->prev->next = del->next; else journal->first = del->next; free(del->name.str); zfsd_mutex_lock(&journal_mutex); pool_free(journal_pool, del); zfsd_mutex_unlock(&journal_mutex); htab_clear_slot(journal->htab, slot); return true; }
void gdbscm_clear_eqable_gsmob_ptr_slot (htab_t htab, eqable_gdb_smob *base) { void **slot = htab_find_slot (htab, base, NO_INSERT); gdb_assert (slot != NULL); htab_clear_slot (htab, slot); }
/* Notice that the pointer has been freed. */ static void free_overhead (struct vec_prefix *ptr) { PTR *slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), NO_INSERT); struct ptr_hash_entry *p = (struct ptr_hash_entry *) *slot; p->loc->allocated -= p->allocated; htab_clear_slot (ptr_hash, slot); free (p); }
void vec_prefix::release_overhead (void) { PTR *slot = htab_find_slot_with_hash (ptr_hash, this, htab_hash_pointer (this), NO_INSERT); struct ptr_hash_entry *p = (struct ptr_hash_entry *) *slot; p->loc->allocated -= p->allocated; htab_clear_slot (ptr_hash, slot); ::free (p); }
static int ggc_htab_delete (void **slot, void *info) { const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info; if (! (*r->marked_p) (*slot)) htab_clear_slot (*r->base, slot); else (*r->cb) (*slot); return 1; }
void lto_orig_address_remove (tree t) { struct tree_hash_entry ent; struct tree_hash_entry **slot; ent.key = t; slot = (struct tree_hash_entry **) htab_find_slot (tree_htab, &ent, NO_INSERT); gcc_assert (slot); free (*slot); htab_clear_slot (tree_htab, (PTR *)slot); }
/* Remove node from the varpool. */ void varpool_remove_node (struct varpool_node *node) { void **slot; slot = htab_find_slot (varpool_hash, node, NO_INSERT); gcc_assert (*slot == node); htab_clear_slot (varpool_hash, slot); gcc_assert (!varpool_assembled_nodes_queue); if (node->next) node->next->prev = node->prev; if (node->prev) node->prev->next = node->next; else { gcc_assert (varpool_nodes == node); varpool_nodes = node->next; } if (varpool_first_unanalyzed_node == node) varpool_first_unanalyzed_node = node->next_needed; if (node->next_needed) node->next_needed->prev_needed = node->prev_needed; else if (node->prev_needed) { gcc_assert (varpool_last_needed_node); varpool_last_needed_node = node->prev_needed; } if (node->prev_needed) node->prev_needed->next_needed = node->next_needed; else if (node->next_needed) { gcc_assert (varpool_nodes_queue == node); varpool_nodes_queue = node->next_needed; } if (node->same_comdat_group) { struct varpool_node *prev; for (prev = node->same_comdat_group; prev->same_comdat_group != node; prev = prev->same_comdat_group) ; if (node->same_comdat_group == prev) prev->same_comdat_group = NULL; else prev->same_comdat_group = node->same_comdat_group; node->same_comdat_group = NULL; } ipa_remove_all_references (&node->ref_list); ipa_remove_all_refering (&node->ref_list); ggc_free (node); }
void symtab_unregister_node (symtab_node *node) { void **slot; ipa_remove_all_references (&node->ref_list); ipa_remove_all_referring (&node->ref_list); if (node->same_comdat_group) { symtab_node *prev; for (prev = node->same_comdat_group; prev->same_comdat_group != node; prev = prev->same_comdat_group) ; if (node->same_comdat_group == prev) prev->same_comdat_group = NULL; else prev->same_comdat_group = node->same_comdat_group; node->same_comdat_group = NULL; } if (node->previous) node->previous->next = node->next; else symtab_nodes = node->next; if (node->next) node->next->previous = node->previous; node->next = NULL; node->previous = NULL; slot = htab_find_slot (symtab_hash, node, NO_INSERT); /* During LTO symtab merging we temporarily corrupt decl to symtab node hash. */ gcc_assert ((slot && *slot) || in_lto_p); if (slot && *slot && *slot == node) { symtab_node *replacement_node = NULL; if (cgraph_node *cnode = dyn_cast <cgraph_node> (node)) replacement_node = cgraph_find_replacement_node (cnode); if (!replacement_node) htab_clear_slot (symtab_hash, slot); else *slot = replacement_node; } if (!is_a <varpool_node> (node) || !DECL_HARD_REGISTER (node->decl)) unlink_from_assembler_name_hash (node, false); }
static int find_reg_kill_and_mem_invalidate (void **slot, void *arg) { struct load *load = (struct load *) *slot; rtx insn = (rtx)arg; /* Record the farthest one from the load. Ignore the effect of a store we just added. */ if (!load->reg_kill && modified_in_p (load->reg, insn)) load->reg_kill = insn; if (modified_in_p (load->mem, insn)) htab_clear_slot (htab_load, slot); return 1; }
void symtab_node::set_section_for_node (const char *section) { const char *current = get_section (); void **slot; if (current == section || (current && section && !strcmp (current, section))) return; if (current) { x_section->ref_count--; if (!x_section->ref_count) { slot = htab_find_slot_with_hash (symtab->section_hash, x_section->name, htab_hash_string (x_section->name), INSERT); ggc_free (x_section); htab_clear_slot (symtab->section_hash, slot); } x_section = NULL; } if (!section) { implicit_section = false; return; } if (!symtab->section_hash) symtab->section_hash = htab_create_ggc (10, hash_section_hash_entry, eq_sections, NULL); slot = htab_find_slot_with_hash (symtab->section_hash, section, htab_hash_string (section), INSERT); if (*slot) x_section = (section_hash_entry *)*slot; else { int len = strlen (section); *slot = x_section = ggc_cleared_alloc<section_hash_entry> (); x_section->name = ggc_vec_alloc<char> (len + 1); memcpy (x_section->name, section, len + 1); } x_section->ref_count++; }
void symtab_unregister_node (symtab_node node) { void **slot; ipa_remove_all_references (&node->symbol.ref_list); ipa_remove_all_referring (&node->symbol.ref_list); if (node->symbol.same_comdat_group) { symtab_node prev; for (prev = node->symbol.same_comdat_group; prev->symbol.same_comdat_group != node; prev = prev->symbol.same_comdat_group) ; if (node->symbol.same_comdat_group == prev) prev->symbol.same_comdat_group = NULL; else prev->symbol.same_comdat_group = node->symbol.same_comdat_group; node->symbol.same_comdat_group = NULL; } if (node->symbol.previous) node->symbol.previous->symbol.next = node->symbol.next; else symtab_nodes = node->symbol.next; if (node->symbol.next) node->symbol.next->symbol.previous = node->symbol.previous; node->symbol.next = NULL; node->symbol.previous = NULL; slot = htab_find_slot (symtab_hash, node, NO_INSERT); if (*slot == node) { symtab_node replacement_node = NULL; if (symtab_function_p (node)) replacement_node = (symtab_node)cgraph_find_replacement_node (cgraph (node)); if (!replacement_node) htab_clear_slot (symtab_hash, slot); else *slot = replacement_node; } unlink_from_assembler_name_hash (node); }
void symbol_table::unlink_from_assembler_name_hash (symtab_node *node, bool with_clones) { if (assembler_name_hash) { cgraph_node *cnode; tree decl = node->decl; if (node->next_sharing_asm_name) node->next_sharing_asm_name->previous_sharing_asm_name = node->previous_sharing_asm_name; if (node->previous_sharing_asm_name) { node->previous_sharing_asm_name->next_sharing_asm_name = node->next_sharing_asm_name; } else { tree name = DECL_ASSEMBLER_NAME (node->decl); void **slot; slot = htab_find_slot_with_hash (assembler_name_hash, name, decl_assembler_name_hash (name), NO_INSERT); gcc_assert (*slot == node); if (!node->next_sharing_asm_name) htab_clear_slot (assembler_name_hash, slot); else *slot = node->next_sharing_asm_name; } node->next_sharing_asm_name = NULL; node->previous_sharing_asm_name = NULL; /* Update also possible inline clones sharing a decl. */ cnode = dyn_cast <cgraph_node *> (node); if (cnode && cnode->clones && with_clones) for (cnode = cnode->clones; cnode; cnode = cnode->next_sibling_clone) if (cnode->decl == decl) unlink_from_assembler_name_hash (cnode, true); } }
/*! \brief Return versions during readdir Return version file names for the hash table during readdir. Hash table is first filled in by version_readdir_fill_dirhtab. All parameters are taken from readdir call. \param list \param dentry \param cookie \param data \param filldir \see version_readdir_from_dirhtab */ int32_t version_readdir_from_dirhtab(dir_list * list, internal_dentry dentry, int32_t cookie, readdir_data * data, filldir_f filldir) { unsigned int i; // retrieve from hash table for (i = 0; i < dentry->dirhtab->size; i++) { struct dirhtab_item_def *e = dentry->dirhtab->table[i]; if ((e == EMPTY_ENTRY) || (e == DELETED_ENTRY)) continue; if (!(*filldir) (e->ino, cookie, e->name, strlen(e->name), list, data)) break; htab_clear_slot(dentry->dirhtab, &dentry->dirhtab->table[i]); } RETURN_INT(ZFS_OK); }