bool journal_delete(journal_t journal, journal_operation_t oper, string * name) { struct journal_entry_def entry; journal_entry del; void **slot; CHECK_MUTEX_LOCKED(journal->mutex); entry.oper = oper; entry.name = *name; slot = htab_find_slot_with_hash(journal->htab, &entry, JOURNAL_HASH(&entry), NO_INSERT); if (!slot) return false; del = (journal_entry) * slot; if (del->next) del->next->prev = del->prev; else journal->last = del->prev; if (del->prev) del->prev->next = del->next; else journal->first = del->next; free(del->name.str); zfsd_mutex_lock(&journal_mutex); pool_free(journal_pool, del); zfsd_mutex_unlock(&journal_mutex); htab_clear_slot(journal->htab, slot); return true; }
static struct load * alloc_load (rtx set) { struct load **slot; rtx mem, reg; mem = SET_DEST (set); if (MEM_P (mem)) reg = SET_SRC (set); else { reg = mem; mem = SET_SRC (set); } slot = (struct load **) htab_find_slot_with_hash (htab_load, mem, load_rtx_hash (mem), INSERT); if (*slot == NULL) { *slot = (struct load*) xcalloc (1, sizeof (struct load)); (*slot)->mem = mem; } else (*slot)->reg_kill = 0; (*slot)->reg = reg; return *slot; }
bool journal_delete_entry(journal_t journal, journal_entry entry) { void **slot; CHECK_MUTEX_LOCKED(journal->mutex); slot = htab_find_slot_with_hash(journal->htab, entry, JOURNAL_HASH(entry), NO_INSERT); if (!slot) return false; if (entry->next) entry->next->prev = entry->prev; else journal->last = entry->prev; if (entry->prev) entry->prev->next = entry->next; else journal->first = entry->next; free(entry->name.str); zfsd_mutex_lock(&journal_mutex); pool_free(journal_pool, entry); zfsd_mutex_unlock(&journal_mutex); htab_clear_slot(journal->htab, slot); return true; }
static void unlink_from_assembler_name_hash (symtab_node node) { if (assembler_name_hash) { if (node->symbol.next_sharing_asm_name) node->symbol.next_sharing_asm_name->symbol.previous_sharing_asm_name = node->symbol.previous_sharing_asm_name; if (node->symbol.previous_sharing_asm_name) { node->symbol.previous_sharing_asm_name->symbol.next_sharing_asm_name = node->symbol.next_sharing_asm_name; } else { tree name = DECL_ASSEMBLER_NAME (node->symbol.decl); void **slot; slot = htab_find_slot_with_hash (assembler_name_hash, name, decl_assembler_name_hash (name), NO_INSERT); gcc_assert (*slot == node); if (!node->symbol.next_sharing_asm_name) htab_clear_slot (assembler_name_hash, slot); else *slot = node->symbol.next_sharing_asm_name; } } }
static void insert_to_assembler_name_hash (symtab_node node, bool with_clones) { if (is_a <varpool_node> (node) && DECL_HARD_REGISTER (node->symbol.decl)) return; gcc_checking_assert (!node->symbol.previous_sharing_asm_name && !node->symbol.next_sharing_asm_name); if (assembler_name_hash) { void **aslot; struct cgraph_node *cnode; tree decl = node->symbol.decl; tree name = DECL_ASSEMBLER_NAME (node->symbol.decl); aslot = htab_find_slot_with_hash (assembler_name_hash, name, decl_assembler_name_hash (name), INSERT); gcc_assert (*aslot != node); node->symbol.next_sharing_asm_name = (symtab_node)*aslot; if (*aslot != NULL) ((symtab_node)*aslot)->symbol.previous_sharing_asm_name = node; *aslot = node; /* Update also possible inline clones sharing a decl. */ cnode = dyn_cast <cgraph_node> (node); if (cnode && cnode->clones && with_clones) for (cnode = cnode->clones; cnode; cnode = cnode->next_sibling_clone) if (cnode->symbol.decl == decl) insert_to_assembler_name_hash ((symtab_node) cnode, true); } }
static coalesce_pair_p find_coalesce_pair (coalesce_list_p cl, int p1, int p2, bool create) { struct coalesce_pair p, *pair; void **slot; unsigned int hash; /* Normalize so that p1 is the smaller value. */ if (p2 < p1) { p.first_element = p2; p.second_element = p1; } else { p.first_element = p1; p.second_element = p2; } hash = coalesce_pair_map_hash (&p); pair = (struct coalesce_pair *) htab_find_with_hash (cl->list, &p, hash); if (create && !pair) { gcc_assert (cl->sorted == NULL); pair = XNEW (struct coalesce_pair); pair->first_element = p.first_element; pair->second_element = p.second_element; pair->cost = 0; slot = htab_find_slot_with_hash (cl->list, pair, hash, INSERT); *(struct coalesce_pair **)slot = pair; }
symtab_node symtab_node_for_asm (const_tree asmname) { symtab_node node; void **slot; if (!assembler_name_hash) { assembler_name_hash = htab_create_ggc (10, hash_node_by_assembler_name, eq_assembler_name, NULL); FOR_EACH_SYMBOL (node) insert_to_assembler_name_hash (node); } slot = htab_find_slot_with_hash (assembler_name_hash, asmname, decl_assembler_name_hash (asmname), NO_INSERT); if (slot) { node = (symtab_node) *slot; return node; } return NULL; }
void symtab_node::set_section_for_node (const char *section) { const char *current = get_section (); void **slot; if (current == section || (current && section && !strcmp (current, section))) return; if (current) { x_section->ref_count--; if (!x_section->ref_count) { slot = htab_find_slot_with_hash (symtab->section_hash, x_section->name, htab_hash_string (x_section->name), INSERT); ggc_free (x_section); htab_clear_slot (symtab->section_hash, slot); } x_section = NULL; } if (!section) { implicit_section = false; return; } if (!symtab->section_hash) symtab->section_hash = htab_create_ggc (10, hash_section_hash_entry, eq_sections, NULL); slot = htab_find_slot_with_hash (symtab->section_hash, section, htab_hash_string (section), INSERT); if (*slot) x_section = (section_hash_entry *)*slot; else { int len = strlen (section); *slot = x_section = ggc_cleared_alloc<section_hash_entry> (); x_section->name = ggc_vec_alloc<char> (len + 1); memcpy (x_section->name, section, len + 1); } x_section->ref_count++; }
/* Notice that the pointer has been freed. */ static void free_overhead (struct vec_prefix *ptr) { PTR *slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), NO_INSERT); struct ptr_hash_entry *p = (struct ptr_hash_entry *) *slot; p->loc->allocated -= p->allocated; htab_clear_slot (ptr_hash, slot); free (p); }
void vec_prefix::release_overhead (void) { PTR *slot = htab_find_slot_with_hash (ptr_hash, this, htab_hash_pointer (this), NO_INSERT); struct ptr_hash_entry *p = (struct ptr_hash_entry *) *slot; p->loc->allocated -= p->allocated; htab_clear_slot (ptr_hash, slot); ::free (p); }
void decl_shadowed_for_var_insert (tree from, tree to) { struct tree_map *h; void **loc; h = GGC_NEW (struct tree_map); h->hash = htab_hash_pointer (from); h->from = from; h->to = to; loc = htab_find_slot_with_hash (shadowed_var_for_decl, h, h->hash, INSERT); *(struct tree_map **) loc = h; }
void decl_shadowed_for_var_insert (tree from, tree to) { struct tree_decl_map *h; void **loc; h = ggc_alloc<tree_decl_map> (); h->base.from = from; h->to = to; loc = htab_find_slot_with_hash (shadowed_var_for_decl, h, DECL_UID (from), INSERT); *(struct tree_decl_map **) loc = h; }
static void decl_for_type_insert (tree type, tree decl) { struct tree_type_map *h; void **slot; h = ggc_alloc<tree_type_map> (); h->type.from = type; h->decl = decl; slot = htab_find_slot_with_hash (decl_tree_for_type, h, TYPE_UID (type), INSERT); *(struct tree_type_map **) slot = h; }
static void cvc_insert (unsigned int uid, tree to) { struct int_tree_map *h; void **loc; h = XNEW (struct int_tree_map); h->uid = uid; h->to = to; loc = htab_find_slot_with_hash (complex_variable_components, h, uid, INSERT); *(struct int_tree_map **) loc = h; }
void register_attribute (const struct attribute_spec *attr) { struct substring str; const void **slot; str.str = attr->name; str.length = strlen (str.str); slot = (const void **)htab_find_slot_with_hash (attribute_hash, &str, substring_hash (str.str, str.length), INSERT); gcc_assert (!*slot); *slot = attr; }
static struct file_hash_entry * file_hash_lookup (const char *string) { void **e; e = htab_find_slot_with_hash (file_table, string, (*htab_hash_string) (string), INSERT); if (*e == NULL) { struct file_hash_entry *v; *e = v = XCNEW (struct file_hash_entry); v->key = xstrdup (string); } return (struct file_hash_entry *) *e; }
static struct demangled_hash_entry * demangled_hash_lookup (const char *string, int create) { void **e; e = htab_find_slot_with_hash (demangled_table, string, (*htab_hash_string) (string), create ? INSERT : NO_INSERT); if (e == NULL) return NULL; if (*e == NULL) { struct demangled_hash_entry *v; *e = v = XCNEW (struct demangled_hash_entry); v->key = xstrdup (string); } return (struct demangled_hash_entry *) *e; }
static struct symbol_hash_entry * symbol_hash_lookup (const char *string, int create) { void **e; e = htab_find_slot_with_hash (symbol_table, string, (*htab_hash_string) (string), create ? INSERT : NO_INSERT); if (e == NULL) return NULL; if (*e == NULL) { struct symbol_hash_entry *v; *e = v = xcalloc (1, sizeof (*v)); v->key = xstrdup (string); } return *e; }
/* For given name, return descriptor, create new if needed. */ static struct alloc_pool_descriptor * alloc_pool_descriptor (const char *name) { struct alloc_pool_descriptor **slot; if (!alloc_pool_hash) alloc_pool_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL); slot = (struct alloc_pool_descriptor **) htab_find_slot_with_hash (alloc_pool_hash, name, htab_hash_pointer (name), 1); if (*slot) return *slot; *slot = xcalloc (sizeof (**slot), 1); (*slot)->name = name; return *slot; }
symtab_node * symtab_node::get_for_asmname (const_tree asmname) { symtab_node *node; void **slot; symtab->symtab_initialize_asm_name_hash (); slot = htab_find_slot_with_hash (symtab->assembler_name_hash, asmname, symtab->decl_assembler_name_hash (asmname), NO_INSERT); if (slot) { node = (symtab_node *) *slot; return node; } return NULL; }
/* For given name, return descriptor, create new if needed. */ static struct varray_descriptor * varray_descriptor (const char *name) { struct varray_descriptor **slot; if (!varray_hash) varray_hash = htab_create (10, hash_descriptor, eq_descriptor, NULL); slot = (struct varray_descriptor **) htab_find_slot_with_hash (varray_hash, name, htab_hash_pointer (name), INSERT); if (*slot) return *slot; *slot = XCNEW (struct varray_descriptor); (*slot)->name = name; return *slot; }
symtab_node symtab_node_for_asm (const_tree asmname) { symtab_node node; void **slot; symtab_initialize_asm_name_hash (); slot = htab_find_slot_with_hash (assembler_name_hash, asmname, decl_assembler_name_hash (asmname), NO_INSERT); if (slot) { node = (symtab_node) *slot; return node; } return NULL; }
static bool interesting_second_load (rtx set, struct load ***load, rtx insn) { rtx mem, reg; if (!set) return false; mem = SET_SRC (set); reg = SET_DEST (set); if (!MEM_P (mem) || MEM_VOLATILE_P (mem) || !REG_P (reg)) return false; *load = (struct load **) htab_find_slot_with_hash (htab_load, mem, load_rtx_hash (mem), NO_INSERT); if (!*load) return false; /* Don't work on cases that never happen: if there is no kill, we would have inherited the reload; if the store and load regs are the same we would need to find an available register. If the kill insn was already replaced by a move this information is stale, disregard it. */ if (rtx_equal_p (reg, (**load)->reg) || !(**load)->reg_kill || INSN_DELETED_P ((**load)->reg_kill) || reg_used_between_p (reg, PREV_INSN ((**load)->reg_kill), NEXT_INSN (insn)) || reg_set_between_p (reg, PREV_INSN ((**load)->reg_kill), insn)) { if (dump_file) { fputs ("\nCan't insert the move before the kill for this load:\n ", dump_file); print_inline_rtx (dump_file, insn, 2); fputs ("\n\n", dump_file); } return false; } return true; }
void symbol_table::unlink_from_assembler_name_hash (symtab_node *node, bool with_clones) { if (assembler_name_hash) { cgraph_node *cnode; tree decl = node->decl; if (node->next_sharing_asm_name) node->next_sharing_asm_name->previous_sharing_asm_name = node->previous_sharing_asm_name; if (node->previous_sharing_asm_name) { node->previous_sharing_asm_name->next_sharing_asm_name = node->next_sharing_asm_name; } else { tree name = DECL_ASSEMBLER_NAME (node->decl); void **slot; slot = htab_find_slot_with_hash (assembler_name_hash, name, decl_assembler_name_hash (name), NO_INSERT); gcc_assert (*slot == node); if (!node->next_sharing_asm_name) htab_clear_slot (assembler_name_hash, slot); else *slot = node->next_sharing_asm_name; } node->next_sharing_asm_name = NULL; node->previous_sharing_asm_name = NULL; /* Update also possible inline clones sharing a decl. */ cnode = dyn_cast <cgraph_node *> (node); if (cnode && cnode->clones && with_clones) for (cnode = cnode->clones; cnode; cnode = cnode->next_sibling_clone) if (cnode->decl == decl) unlink_from_assembler_name_hash (cnode, true); } }
static void insert_to_assembler_name_hash (symtab_node node) { gcc_checking_assert (!node->symbol.previous_sharing_asm_name && !node->symbol.next_sharing_asm_name); if (assembler_name_hash) { void **aslot; tree name = DECL_ASSEMBLER_NAME (node->symbol.decl); aslot = htab_find_slot_with_hash (assembler_name_hash, name, decl_assembler_name_hash (name), INSERT); gcc_assert (*aslot != node); node->symbol.next_sharing_asm_name = (symtab_node)*aslot; if (*aslot != NULL) ((symtab_node)*aslot)->symbol.previous_sharing_asm_name = node; *aslot = node; } }
/* Account the overhead. */ static void register_overhead (struct vec_prefix *ptr, size_t size, const char *name, int line, const char *function) { struct vec_descriptor *loc = vec_descriptor (name, line, function); struct ptr_hash_entry *p = XNEW (struct ptr_hash_entry); PTR *slot; p->ptr = ptr; p->loc = loc; p->allocated = size; if (!ptr_hash) ptr_hash = htab_create (10, hash_ptr, eq_ptr, NULL); slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), INSERT); gcc_assert (!*slot); *slot = p; loc->allocated += size; if (loc->peak < loc->allocated) loc->peak += loc->allocated; loc->times++; }
static void process_i386_opcodes (FILE *table) { FILE *fp; char buf[2048]; unsigned int i, j; char *str, *p, *last, *name; struct opcode_hash_entry **hash_slot, **entry, *next; htab_t opcode_hash_table; struct opcode_hash_entry **opcode_array; unsigned int opcode_array_size = 1024; int lineno = 0; filename = "i386-opc.tbl"; fp = fopen (filename, "r"); if (fp == NULL) fail (_("can't find i386-opc.tbl for reading, errno = %s\n"), xstrerror (errno)); i = 0; opcode_array = (struct opcode_hash_entry **) xmalloc (sizeof (*opcode_array) * opcode_array_size); opcode_hash_table = htab_create_alloc (16, opcode_hash_hash, opcode_hash_eq, NULL, xcalloc, free); fprintf (table, "\n/* i386 opcode table. */\n\n"); fprintf (table, "const insn_template i386_optab[] =\n{\n"); /* Put everything on opcode array. */ while (!feof (fp)) { if (fgets (buf, sizeof (buf), fp) == NULL) break; lineno++; p = remove_leading_whitespaces (buf); /* Skip comments. */ str = strstr (p, "//"); if (str != NULL) str[0] = '\0'; /* Remove trailing white spaces. */ remove_trailing_whitespaces (p); switch (p[0]) { case '#': /* Ignore comments. */ case '\0': continue; break; default: break; } last = p + strlen (p); /* Find name. */ name = next_field (p, ',', &str, last); /* Get the slot in hash table. */ hash_slot = (struct opcode_hash_entry **) htab_find_slot_with_hash (opcode_hash_table, name, htab_hash_string (name), INSERT); if (*hash_slot == NULL) { /* It is the new one. Put it on opcode array. */ if (i >= opcode_array_size) { /* Grow the opcode array when needed. */ opcode_array_size += 1024; opcode_array = (struct opcode_hash_entry **) xrealloc (opcode_array, sizeof (*opcode_array) * opcode_array_size); } opcode_array[i] = (struct opcode_hash_entry *) xmalloc (sizeof (struct opcode_hash_entry)); opcode_array[i]->next = NULL; opcode_array[i]->name = xstrdup (name); opcode_array[i]->opcode = xstrdup (str); opcode_array[i]->lineno = lineno; *hash_slot = opcode_array[i]; i++; } else { /* Append it to the existing one. */ entry = hash_slot; while ((*entry) != NULL) entry = &(*entry)->next; *entry = (struct opcode_hash_entry *) xmalloc (sizeof (struct opcode_hash_entry)); (*entry)->next = NULL; (*entry)->name = (*hash_slot)->name; (*entry)->opcode = xstrdup (str); (*entry)->lineno = lineno; } } /* Process opcode array. */ for (j = 0; j < i; j++) { for (next = opcode_array[j]; next; next = next->next) { name = next->name; str = next->opcode; lineno = next->lineno; last = str + strlen (str); output_i386_opcode (table, name, str, last, lineno); } } fclose (fp); fprintf (table, " { NULL, 0, 0, 0, 0,\n"); process_i386_cpu_flag (table, "0", 0, ",", " ", -1); process_i386_opcode_modifier (table, "0", -1); fprintf (table, " { "); process_i386_operand_type (table, "0", 0, "\t ", -1); fprintf (table, " } }\n"); fprintf (table, "};\n"); }
EXPR to the value set for value VAL. VUSES represents the virtual use operands associated with EXPR. It is used when computing a hash value for EXPR. */ void vn_add_with_vuses (tree expr, tree val, VEC (tree, gc) *vuses) { void **slot; val_expr_pair_t new_pair; new_pair = XNEW (struct val_expr_pair_d); new_pair->e = expr; new_pair->v = val; new_pair->vuses = vuses; new_pair->hashcode = vn_compute (expr, 0); slot = htab_find_slot_with_hash (value_table, new_pair, new_pair->hashcode, INSERT); if (*slot) free (*slot); *slot = (void *) new_pair; set_value_handle (expr, val); if (TREE_CODE (val) == VALUE_HANDLE) add_to_value (val, expr); } /* Search in VALUE_TABLE for an existing instance of expression EXPR, and return its value, or NULL if none has been set. STMT represents the stmt associated with EXPR. It is used when computing the hash value for EXPR. */
bfd_boolean _bfd_elf_discard_section_eh_frame (bfd *abfd, struct bfd_link_info *info, asection *sec, bfd_boolean (*reloc_symbol_deleted_p) (bfd_vma, void *), struct elf_reloc_cookie *cookie) { #define REQUIRE(COND) \ do \ if (!(COND)) \ goto free_no_table; \ while (0) bfd_byte *ehbuf = NULL, *buf; bfd_byte *last_fde; struct eh_cie_fde *ent, *this_inf; unsigned int hdr_length, hdr_id; struct extended_cie { struct cie cie; unsigned int offset; unsigned int usage_count; unsigned int entry; } *ecies = NULL, *ecie; unsigned int ecie_count = 0, ecie_alloced = 0; struct cie *cie; struct elf_link_hash_table *htab; struct eh_frame_hdr_info *hdr_info; struct eh_frame_sec_info *sec_info = NULL; unsigned int offset; unsigned int ptr_size; unsigned int entry_alloced; if (sec->size == 0) { /* This file does not contain .eh_frame information. */ return FALSE; } if (bfd_is_abs_section (sec->output_section)) { /* At least one of the sections is being discarded from the link, so we should just ignore them. */ return FALSE; } htab = elf_hash_table (info); hdr_info = &htab->eh_info; if (hdr_info->cies == NULL && !info->relocatable) hdr_info->cies = htab_try_create (1, cie_hash, cie_eq, free); /* Read the frame unwind information from abfd. */ REQUIRE (bfd_malloc_and_get_section (abfd, sec, &ehbuf)); if (sec->size >= 4 && bfd_get_32 (abfd, ehbuf) == 0 && cookie->rel == cookie->relend) { /* Empty .eh_frame section. */ free (ehbuf); return FALSE; } /* If .eh_frame section size doesn't fit into int, we cannot handle it (it would need to use 64-bit .eh_frame format anyway). */ REQUIRE (sec->size == (unsigned int) sec->size); ptr_size = (get_elf_backend_data (abfd) ->elf_backend_eh_frame_address_size (abfd, sec)); REQUIRE (ptr_size != 0); buf = ehbuf; sec_info = bfd_zmalloc (sizeof (struct eh_frame_sec_info) + 99 * sizeof (struct eh_cie_fde)); REQUIRE (sec_info); entry_alloced = 100; #define ENSURE_NO_RELOCS(buf) \ REQUIRE (!(cookie->rel < cookie->relend \ && (cookie->rel->r_offset \ < (bfd_size_type) ((buf) - ehbuf)) \ && cookie->rel->r_info != 0)) #define SKIP_RELOCS(buf) \ while (cookie->rel < cookie->relend \ && (cookie->rel->r_offset \ < (bfd_size_type) ((buf) - ehbuf))) \ cookie->rel++ #define GET_RELOC(buf) \ ((cookie->rel < cookie->relend \ && (cookie->rel->r_offset \ == (bfd_size_type) ((buf) - ehbuf))) \ ? cookie->rel : NULL) for (;;) { char *aug; bfd_byte *start, *end, *insns, *insns_end; bfd_size_type length; unsigned int set_loc_count; if (sec_info->count == entry_alloced) { sec_info = bfd_realloc (sec_info, sizeof (struct eh_frame_sec_info) + ((entry_alloced + 99) * sizeof (struct eh_cie_fde))); REQUIRE (sec_info); memset (&sec_info->entry[entry_alloced], 0, 100 * sizeof (struct eh_cie_fde)); entry_alloced += 100; } this_inf = sec_info->entry + sec_info->count; last_fde = buf; if ((bfd_size_type) (buf - ehbuf) == sec->size) break; /* Read the length of the entry. */ REQUIRE (skip_bytes (&buf, ehbuf + sec->size, 4)); hdr_length = bfd_get_32 (abfd, buf - 4); /* 64-bit .eh_frame is not supported. */ REQUIRE (hdr_length != 0xffffffff); /* The CIE/FDE must be fully contained in this input section. */ REQUIRE ((bfd_size_type) (buf - ehbuf) + hdr_length <= sec->size); end = buf + hdr_length; this_inf->offset = last_fde - ehbuf; this_inf->size = 4 + hdr_length; if (hdr_length == 0) { /* A zero-length CIE should only be found at the end of the section. */ REQUIRE ((bfd_size_type) (buf - ehbuf) == sec->size); ENSURE_NO_RELOCS (buf); sec_info->count++; break; } REQUIRE (skip_bytes (&buf, end, 4)); hdr_id = bfd_get_32 (abfd, buf - 4); if (hdr_id == 0) { unsigned int initial_insn_length; /* CIE */ this_inf->cie = 1; if (ecie_count == ecie_alloced) { ecies = bfd_realloc (ecies, (ecie_alloced + 20) * sizeof (*ecies)); REQUIRE (ecies); memset (&ecies[ecie_alloced], 0, 20 * sizeof (*ecies)); ecie_alloced += 20; } cie = &ecies[ecie_count].cie; ecies[ecie_count].offset = this_inf->offset; ecies[ecie_count++].entry = sec_info->count; cie->length = hdr_length; start = buf; REQUIRE (read_byte (&buf, end, &cie->version)); /* Cannot handle unknown versions. */ REQUIRE (cie->version == 1 || cie->version == 3); REQUIRE (strlen ((char *) buf) < sizeof (cie->augmentation)); strcpy (cie->augmentation, (char *) buf); buf = (bfd_byte *) strchr ((char *) buf, '\0') + 1; ENSURE_NO_RELOCS (buf); if (buf[0] == 'e' && buf[1] == 'h') { /* GCC < 3.0 .eh_frame CIE */ /* We cannot merge "eh" CIEs because __EXCEPTION_TABLE__ is private to each CIE, so we don't need it for anything. Just skip it. */ REQUIRE (skip_bytes (&buf, end, ptr_size)); SKIP_RELOCS (buf); } REQUIRE (read_uleb128 (&buf, end, &cie->code_align)); REQUIRE (read_sleb128 (&buf, end, &cie->data_align)); if (cie->version == 1) { REQUIRE (buf < end); cie->ra_column = *buf++; } else REQUIRE (read_uleb128 (&buf, end, &cie->ra_column)); ENSURE_NO_RELOCS (buf); cie->lsda_encoding = DW_EH_PE_omit; cie->fde_encoding = DW_EH_PE_omit; cie->per_encoding = DW_EH_PE_omit; aug = cie->augmentation; if (aug[0] != 'e' || aug[1] != 'h') { if (*aug == 'z') { aug++; REQUIRE (read_uleb128 (&buf, end, &cie->augmentation_size)); ENSURE_NO_RELOCS (buf); } while (*aug != '\0') switch (*aug++) { case 'L': REQUIRE (read_byte (&buf, end, &cie->lsda_encoding)); ENSURE_NO_RELOCS (buf); REQUIRE (get_DW_EH_PE_width (cie->lsda_encoding, ptr_size)); break; case 'R': REQUIRE (read_byte (&buf, end, &cie->fde_encoding)); ENSURE_NO_RELOCS (buf); REQUIRE (get_DW_EH_PE_width (cie->fde_encoding, ptr_size)); break; case 'S': break; case 'P': { int per_width; REQUIRE (read_byte (&buf, end, &cie->per_encoding)); per_width = get_DW_EH_PE_width (cie->per_encoding, ptr_size); REQUIRE (per_width); if ((cie->per_encoding & 0xf0) == DW_EH_PE_aligned) { length = -(buf - ehbuf) & (per_width - 1); REQUIRE (skip_bytes (&buf, end, length)); } ENSURE_NO_RELOCS (buf); /* Ensure we have a reloc here. */ if (GET_RELOC (buf) != NULL) { unsigned long r_symndx; #ifdef BFD64 if (ptr_size == 8) r_symndx = ELF64_R_SYM (cookie->rel->r_info); else #endif r_symndx = ELF32_R_SYM (cookie->rel->r_info); if (r_symndx >= cookie->locsymcount || ELF_ST_BIND (cookie->locsyms[r_symndx] .st_info) != STB_LOCAL) { struct elf_link_hash_entry *h; r_symndx -= cookie->extsymoff; h = cookie->sym_hashes[r_symndx]; while (h->root.type == bfd_link_hash_indirect || h->root.type == bfd_link_hash_warning) h = (struct elf_link_hash_entry *) h->root.u.i.link; cie->personality.h = h; } else { Elf_Internal_Sym *sym; asection *sym_sec; bfd_vma val; sym = &cookie->locsyms[r_symndx]; sym_sec = (bfd_section_from_elf_index (abfd, sym->st_shndx)); if (sym_sec != NULL) { if (sym_sec->kept_section != NULL) sym_sec = sym_sec->kept_section; if (sym_sec->output_section != NULL) { val = (sym->st_value + sym_sec->output_offset + sym_sec->output_section->vma); cie->personality.val = val; cie->local_personality = 1; } } } /* Cope with MIPS-style composite relocations. */ do cookie->rel++; while (GET_RELOC (buf) != NULL); } REQUIRE (skip_bytes (&buf, end, per_width)); REQUIRE (cie->local_personality || cie->personality.h); } break; default: /* Unrecognized augmentation. Better bail out. */ goto free_no_table; } } /* For shared libraries, try to get rid of as many RELATIVE relocs as possible. */ if (info->shared && (get_elf_backend_data (abfd) ->elf_backend_can_make_relative_eh_frame (abfd, info, sec))) { if ((cie->fde_encoding & 0xf0) == DW_EH_PE_absptr) cie->make_relative = 1; /* If the CIE doesn't already have an 'R' entry, it's fairly easy to add one, provided that there's no aligned data after the augmentation string. */ else if (cie->fde_encoding == DW_EH_PE_omit && (cie->per_encoding & 0xf0) != DW_EH_PE_aligned) { if (*cie->augmentation == 0) this_inf->add_augmentation_size = 1; this_inf->add_fde_encoding = 1; cie->make_relative = 1; } } if (info->shared && (get_elf_backend_data (abfd) ->elf_backend_can_make_lsda_relative_eh_frame (abfd, info, sec)) && (cie->lsda_encoding & 0xf0) == DW_EH_PE_absptr) cie->make_lsda_relative = 1; /* If FDE encoding was not specified, it defaults to DW_EH_absptr. */ if (cie->fde_encoding == DW_EH_PE_omit) cie->fde_encoding = DW_EH_PE_absptr; initial_insn_length = end - buf; if (initial_insn_length <= sizeof (cie->initial_instructions)) { cie->initial_insn_length = initial_insn_length; memcpy (cie->initial_instructions, buf, initial_insn_length); } insns = buf; buf += initial_insn_length; ENSURE_NO_RELOCS (buf); } else { /* Find the corresponding CIE. */ unsigned int cie_offset = this_inf->offset + 4 - hdr_id; for (ecie = ecies; ecie < ecies + ecie_count; ++ecie) if (cie_offset == ecie->offset) break; /* Ensure this FDE references one of the CIEs in this input section. */ REQUIRE (ecie != ecies + ecie_count); cie = &ecie->cie; ENSURE_NO_RELOCS (buf); REQUIRE (GET_RELOC (buf)); if ((*reloc_symbol_deleted_p) (buf - ehbuf, cookie)) /* This is a FDE against a discarded section. It should be deleted. */ this_inf->removed = 1; else { if (info->shared && (((cie->fde_encoding & 0xf0) == DW_EH_PE_absptr && cie->make_relative == 0) || (cie->fde_encoding & 0xf0) == DW_EH_PE_aligned)) { /* If a shared library uses absolute pointers which we cannot turn into PC relative, don't create the binary search table, since it is affected by runtime relocations. */ hdr_info->table = FALSE; (*info->callbacks->einfo) (_("%P: fde encoding in %B(%A) prevents .eh_frame_hdr" " table being created.\n"), abfd, sec); } ecie->usage_count++; hdr_info->fde_count++; this_inf->cie_inf = (void *) (ecie - ecies); } /* Skip the initial location and address range. */ start = buf; length = get_DW_EH_PE_width (cie->fde_encoding, ptr_size); REQUIRE (skip_bytes (&buf, end, 2 * length)); /* Skip the augmentation size, if present. */ if (cie->augmentation[0] == 'z') REQUIRE (read_uleb128 (&buf, end, &length)); else length = 0; /* Of the supported augmentation characters above, only 'L' adds augmentation data to the FDE. This code would need to be adjusted if any future augmentations do the same thing. */ if (cie->lsda_encoding != DW_EH_PE_omit) { this_inf->lsda_offset = buf - start; /* If there's no 'z' augmentation, we don't know where the CFA insns begin. Assume no padding. */ if (cie->augmentation[0] != 'z') length = end - buf; } /* Skip over the augmentation data. */ REQUIRE (skip_bytes (&buf, end, length)); insns = buf; buf = last_fde + 4 + hdr_length; SKIP_RELOCS (buf); } /* Try to interpret the CFA instructions and find the first padding nop. Shrink this_inf's size so that it doesn't include the padding. */ length = get_DW_EH_PE_width (cie->fde_encoding, ptr_size); set_loc_count = 0; insns_end = skip_non_nops (insns, end, length, &set_loc_count); /* If we don't understand the CFA instructions, we can't know what needs to be adjusted there. */ if (insns_end == NULL /* For the time being we don't support DW_CFA_set_loc in CIE instructions. */ || (set_loc_count && this_inf->cie)) goto free_no_table; this_inf->size -= end - insns_end; if (insns_end != end && this_inf->cie) { cie->initial_insn_length -= end - insns_end; cie->length -= end - insns_end; } if (set_loc_count && ((cie->fde_encoding & 0xf0) == DW_EH_PE_pcrel || cie->make_relative)) { unsigned int cnt; bfd_byte *p; this_inf->set_loc = bfd_malloc ((set_loc_count + 1) * sizeof (unsigned int)); REQUIRE (this_inf->set_loc); this_inf->set_loc[0] = set_loc_count; p = insns; cnt = 0; while (p < end) { if (*p == DW_CFA_set_loc) this_inf->set_loc[++cnt] = p + 1 - start; REQUIRE (skip_cfa_op (&p, end, length)); } } this_inf->fde_encoding = cie->fde_encoding; this_inf->lsda_encoding = cie->lsda_encoding; sec_info->count++; } elf_section_data (sec)->sec_info = sec_info; sec->sec_info_type = ELF_INFO_TYPE_EH_FRAME; /* Look at all CIEs in this section and determine which can be removed as unused, which can be merged with previous duplicate CIEs and which need to be kept. */ for (ecie = ecies; ecie < ecies + ecie_count; ++ecie) { if (ecie->usage_count == 0) { sec_info->entry[ecie->entry].removed = 1; continue; } ecie->cie.output_sec = sec->output_section; ecie->cie.cie_inf = sec_info->entry + ecie->entry; cie_compute_hash (&ecie->cie); if (hdr_info->cies != NULL) { void **loc = htab_find_slot_with_hash (hdr_info->cies, &ecie->cie, ecie->cie.hash, INSERT); if (loc != NULL) { if (*loc != HTAB_EMPTY_ENTRY) { sec_info->entry[ecie->entry].removed = 1; ecie->cie.cie_inf = ((struct cie *) *loc)->cie_inf; continue; } *loc = malloc (sizeof (struct cie)); if (*loc == NULL) *loc = HTAB_DELETED_ENTRY; else memcpy (*loc, &ecie->cie, sizeof (struct cie)); } } ecie->cie.cie_inf->make_relative = ecie->cie.make_relative; ecie->cie.cie_inf->make_lsda_relative = ecie->cie.make_lsda_relative; ecie->cie.cie_inf->per_encoding_relative = (ecie->cie.per_encoding & 0x70) == DW_EH_PE_pcrel; } /* Ok, now we can assign new offsets. */ offset = 0; for (ent = sec_info->entry; ent < sec_info->entry + sec_info->count; ++ent) if (!ent->removed) { if (!ent->cie) { ecie = ecies + (bfd_hostptr_t) ent->cie_inf; ent->cie_inf = ecie->cie.cie_inf; } ent->new_offset = offset; offset += size_of_output_cie_fde (ent, ptr_size); } /* Resize the sec as needed. */ sec->rawsize = sec->size; sec->size = offset; free (ehbuf); if (ecies) free (ecies); return offset != sec->rawsize; free_no_table: (*info->callbacks->einfo) (_("%P: error in %B(%A); no .eh_frame_hdr table will be created.\n"), abfd, sec); if (ehbuf) free (ehbuf); if (sec_info) free (sec_info); if (ecies) free (ecies); hdr_info->table = FALSE; return FALSE; #undef REQUIRE }
bool journal_insert(journal_t journal, journal_operation_t oper, zfs_fh * local_fh, zfs_fh * master_fh, uint64_t master_version, string * name, bool copy) { journal_entry entry; void **slot; CHECK_MUTEX_LOCKED(journal->mutex); if (oper == JOURNAL_OPERATION_DEL) { /* If we are adding a DEL entry try to anihilate ;-) it with the corresponding ADD entry. */ if (journal_delete(journal, JOURNAL_OPERATION_ADD, name)) { if (!copy) { /* If we shall not copy NAME the NAME is dynamically allocated and caller does not free it so we have to free it now. */ free(name->str); } return true; } } zfsd_mutex_lock(&journal_mutex); entry = (journal_entry) pool_alloc(journal_pool); zfsd_mutex_unlock(&journal_mutex); entry->oper = oper; entry->name = *name; slot = htab_find_slot_with_hash(journal->htab, entry, JOURNAL_HASH(entry), INSERT); if (*slot) { journal_entry old = (journal_entry) * slot; /* When there already is an entry with the same operation and name in the journal, zfsd has crashed and left the journal in inconsistent state. In this case, delete the old entry and add a new one. */ if (old->next) old->next->prev = old->prev; else journal->last = old->prev; if (old->prev) old->prev->next = old->next; else journal->first = old->next; free(old->name.str); zfsd_mutex_lock(&journal_mutex); pool_free(journal_pool, old); zfsd_mutex_unlock(&journal_mutex); } entry->dev = local_fh->dev; entry->ino = local_fh->ino; entry->gen = local_fh->gen; entry->master_fh = *master_fh; entry->master_version = master_version; if (copy) entry->name.str = (char *)xmemdup(name->str, entry->name.len + 1); *slot = entry; entry->next = NULL; entry->prev = journal->last; if (journal->last) journal->last->next = entry; journal->last = entry; if (journal->first == NULL) journal->first = entry; return true; }