void solaris_insert_attributes (tree decl, tree *attributes) { tree *x, next; if (solaris_pending_aligns != NULL && TREE_CODE (decl) == VAR_DECL) for (x = &solaris_pending_aligns; *x; x = &TREE_CHAIN (*x)) { tree name = TREE_PURPOSE (*x); tree value = TREE_VALUE (*x); if (DECL_NAME (decl) == name) { if (lookup_attribute ("aligned", DECL_ATTRIBUTES (decl)) || lookup_attribute ("aligned", *attributes)) warning (0, "ignoring %<#pragma align%> for explicitly " "aligned %q+D", decl); else *attributes = tree_cons (get_identifier ("aligned"), value, *attributes); next = TREE_CHAIN (*x); ggc_free (*x); *x = next; break; } } if (solaris_pending_inits != NULL && TREE_CODE (decl) == FUNCTION_DECL) for (x = &solaris_pending_inits; *x; x = &TREE_CHAIN (*x)) { tree name = TREE_PURPOSE (*x); if (DECL_NAME (decl) == name) { *attributes = tree_cons (get_identifier ("init"), NULL, *attributes); *attributes = tree_cons (get_identifier ("used"), NULL, *attributes); next = TREE_CHAIN (*x); ggc_free (*x); *x = next; break; } } if (solaris_pending_finis != NULL && TREE_CODE (decl) == FUNCTION_DECL) for (x = &solaris_pending_finis; *x; x = &TREE_CHAIN (*x)) { tree name = TREE_PURPOSE (*x); if (DECL_NAME (decl) == name) { *attributes = tree_cons (get_identifier ("fini"), NULL, *attributes); *attributes = tree_cons (get_identifier ("used"), NULL, *attributes); next = TREE_CHAIN (*x); ggc_free (*x); *x = next; break; } } }
static void objc_map_private_resize (objc_map_t map, size_t new_number_of_slots) { tree *old_slots = map->slots; tree *old_values = map->values; size_t i, old_number_of_slots = map->number_of_slots; if (new_number_of_slots < (map->number_of_non_empty_slots)) new_number_of_slots = 2 * map->number_of_non_empty_slots; new_number_of_slots = next_power_of_two (new_number_of_slots); map->number_of_slots = new_number_of_slots; map->mask = map->number_of_slots - 1; map->max_number_of_non_empty_slots = (map->number_of_slots * map->maximum_load_factor) / 100; map->slots = (tree *)ggc_internal_cleared_vec_alloc (map->number_of_slots, sizeof (tree)); map->values = (tree *)ggc_internal_cleared_vec_alloc (map->number_of_slots, sizeof (tree)); if (map->slots == NULL) OUT_OF_MEMORY; if (map->values == NULL) OUT_OF_MEMORY; for (i = 0; i < old_number_of_slots; i++) if (old_slots[i] != OBJC_MAP_PRIVATE_EMPTY_SLOT) { size_t k = IDENTIFIER_HASH_VALUE (old_slots[i]) & map->mask; if (map->slots[k] == OBJC_MAP_PRIVATE_EMPTY_SLOT) { map->slots[k] = old_slots[i]; map->values[k] = old_values[i]; } else { size_t j = 1; while (1) { k = (k + j) & map->mask; if (map->slots[k] == OBJC_MAP_PRIVATE_EMPTY_SLOT) { map->slots[k] = old_slots[i]; map->values[k] = old_values[i]; break; } j++; } } } ggc_free (old_slots); ggc_free (old_values); }
void * vec_gc_o_reserve_1 (void *vec, int reserve, size_t vec_offset, size_t elt_size, bool exact MEM_STAT_DECL) { struct vec_prefix *pfx = (struct vec_prefix *) vec; unsigned alloc = calculate_allocation (pfx, reserve, exact); size_t size; if (!alloc) { if (pfx) ggc_free (pfx); return NULL; } /* Calculate the amount of space we want. */ size = vec_offset + alloc * elt_size; /* Ask the allocator how much space it will really give us. */ size = ggc_round_alloc_size (size); /* Adjust the number of slots accordingly. */ alloc = (size - vec_offset) / elt_size; /* And finally, recalculate the amount of space we ask for. */ size = vec_offset + alloc * elt_size; vec = ggc_realloc_stat (vec, size PASS_MEM_STAT); ((struct vec_prefix *)vec)->alloc = alloc; if (!pfx) ((struct vec_prefix *)vec)->num = 0; return vec; }
void init_tree_optimization_optabs (tree optnode) { /* Quick exit if we have already computed optabs for this target. */ if (TREE_OPTIMIZATION_BASE_OPTABS (optnode) == this_target_optabs) return; /* Forget any previous information and set up for the current target. */ TREE_OPTIMIZATION_BASE_OPTABS (optnode) = this_target_optabs; struct target_optabs *tmp_optabs = (struct target_optabs *) TREE_OPTIMIZATION_OPTABS (optnode); if (tmp_optabs) memset (tmp_optabs, 0, sizeof (struct target_optabs)); else tmp_optabs = ggc_alloc<target_optabs> (); /* Generate a new set of optabs into tmp_optabs. */ init_all_optabs (tmp_optabs); /* If the optabs changed, record it. */ if (memcmp (tmp_optabs, this_target_optabs, sizeof (struct target_optabs))) TREE_OPTIMIZATION_OPTABS (optnode) = tmp_optabs; else { TREE_OPTIMIZATION_OPTABS (optnode) = NULL; ggc_free (tmp_optabs); } }
void fini_ssa_operands (struct function *fn) { struct ssa_operand_memory_d *ptr; if (!--n_initialized) { build_uses.release (); build_vdef = NULL_TREE; build_vuse = NULL_TREE; } gimple_ssa_operands (fn)->free_uses = NULL; while ((ptr = gimple_ssa_operands (fn)->operand_memory) != NULL) { gimple_ssa_operands (fn)->operand_memory = gimple_ssa_operands (fn)->operand_memory->next; ggc_free (ptr); } gimple_ssa_operands (fn)->ops_active = false; if (!n_initialized) bitmap_obstack_release (&operands_bitmap_obstack); fn->gimple_df->vop = NULL_TREE; }
inline void odr_hasher::remove (value_type *v) { v->bases.release (); v->derived_types.release (); if (v->types_set) pointer_set_destroy (v->types_set); ggc_free (v); }
/* Remove node from the varpool. */ void varpool_remove_node (struct varpool_node *node) { symtab_unregister_node ((symtab_node)node); if (DECL_INITIAL (node->symbol.decl) && !DECL_IN_CONSTANT_POOL (node->symbol.decl) /* Keep vtables for BINFO folding. */ && !DECL_VIRTUAL_P (node->symbol.decl) /* FIXME: http://gcc.gnu.org/PR55395 */ && debug_info_level == DINFO_LEVEL_NONE) DECL_INITIAL (node->symbol.decl) = error_mark_node; ggc_free (node); }
/* Remove node from the varpool. */ void varpool_remove_node (struct varpool_node *node) { void **slot; slot = htab_find_slot (varpool_hash, node, NO_INSERT); gcc_assert (*slot == node); htab_clear_slot (varpool_hash, slot); gcc_assert (!varpool_assembled_nodes_queue); if (node->next) node->next->prev = node->prev; if (node->prev) node->prev->next = node->next; else { gcc_assert (varpool_nodes == node); varpool_nodes = node->next; } if (varpool_first_unanalyzed_node == node) varpool_first_unanalyzed_node = node->next_needed; if (node->next_needed) node->next_needed->prev_needed = node->prev_needed; else if (node->prev_needed) { gcc_assert (varpool_last_needed_node); varpool_last_needed_node = node->prev_needed; } if (node->prev_needed) node->prev_needed->next_needed = node->next_needed; else if (node->next_needed) { gcc_assert (varpool_nodes_queue == node); varpool_nodes_queue = node->next_needed; } if (node->same_comdat_group) { struct varpool_node *prev; for (prev = node->same_comdat_group; prev->same_comdat_group != node; prev = prev->same_comdat_group) ; if (node->same_comdat_group == prev) prev->same_comdat_group = NULL; else prev->same_comdat_group = node->same_comdat_group; node->same_comdat_group = NULL; } ipa_remove_all_references (&node->ref_list); ipa_remove_all_refering (&node->ref_list); ggc_free (node); }
void symtab_node::set_section_for_node (const char *section) { const char *current = get_section (); void **slot; if (current == section || (current && section && !strcmp (current, section))) return; if (current) { x_section->ref_count--; if (!x_section->ref_count) { slot = htab_find_slot_with_hash (symtab->section_hash, x_section->name, htab_hash_string (x_section->name), INSERT); ggc_free (x_section); htab_clear_slot (symtab->section_hash, slot); } x_section = NULL; } if (!section) { implicit_section = false; return; } if (!symtab->section_hash) symtab->section_hash = htab_create_ggc (10, hash_section_hash_entry, eq_sections, NULL); slot = htab_find_slot_with_hash (symtab->section_hash, section, htab_hash_string (section), INSERT); if (*slot) x_section = (section_hash_entry *)*slot; else { int len = strlen (section); *slot = x_section = ggc_cleared_alloc<section_hash_entry> (); x_section->name = ggc_vec_alloc<char> (len + 1); memcpy (x_section->name, section, len + 1); } x_section->ref_count++; }
void fini_ssa_operands (void) { struct ssa_operand_memory_d *ptr; if (!--n_initialized) { VEC_free (tree, heap, build_defs); VEC_free (tree, heap, build_uses); build_vdef = NULL_TREE; build_vuse = NULL_TREE; } gimple_ssa_operands (cfun)->free_defs = NULL; gimple_ssa_operands (cfun)->free_uses = NULL; while ((ptr = gimple_ssa_operands (cfun)->operand_memory) != NULL) { gimple_ssa_operands (cfun)->operand_memory = gimple_ssa_operands (cfun)->operand_memory->next; ggc_free (ptr); } gimple_ssa_operands (cfun)->ops_active = false; if (!n_initialized) bitmap_obstack_release (&operands_bitmap_obstack); cfun->gimple_df->vop = NULL_TREE; if (dump_file && (dump_flags & TDF_STATS)) { fprintf (dump_file, "Original clobbered vars: %d\n", clobber_stats.clobbered_vars); fprintf (dump_file, "Static write clobbers avoided: %d\n", clobber_stats.static_write_clobbers_avoided); fprintf (dump_file, "Static read clobbers avoided: %d\n", clobber_stats.static_read_clobbers_avoided); fprintf (dump_file, "Unescapable clobbers avoided: %d\n", clobber_stats.unescapable_clobbers_avoided); fprintf (dump_file, "Original read-only clobbers: %d\n", clobber_stats.readonly_clobbers); fprintf (dump_file, "Static read-only clobbers avoided: %d\n", clobber_stats.static_readonly_clobbers_avoided); } }
void loop_optimizer_finalize (void) { struct loop *loop; basic_block bb; timevar_push (TV_LOOP_FINI); if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS)) release_recorded_exits (); free_numbers_of_iterations_estimates (); /* If we should preserve loop structure, do not free it but clear flags that advanced properties are there as we are not preserving that in full. */ if (cfun->curr_properties & PROP_loops) { loops_state_clear (LOOP_CLOSED_SSA | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS | LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES | LOOPS_HAVE_FALLTHRU_PREHEADERS); loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES); goto loop_fini_done; } gcc_assert (current_loops != NULL); FOR_EACH_LOOP (loop, 0) free_simple_loop_desc (loop); /* Clean up. */ flow_loops_free (current_loops); ggc_free (current_loops); current_loops = NULL; FOR_ALL_BB_FN (bb, cfun) { bb->loop_father = NULL; } loop_fini_done: timevar_pop (TV_LOOP_FINI); }
/* Remove node from the varpool. */ void varpool_remove_node (varpool_node *node) { tree init; varpool_call_node_removal_hooks (node); symtab_unregister_node (node); /* Because we remove references from external functions before final compilation, we may end up removing useful constructors. FIXME: We probably want to trace boundaries better. */ if (cgraph_state == CGRAPH_LTO_STREAMING) ; else if ((init = ctor_for_folding (node->decl)) == error_mark_node) varpool_remove_initializer (node); else DECL_INITIAL (node->decl) = init; ggc_free (node); }
/* Resize a block of memory, possibly re-allocating it. */ void * ggc_realloc_stat (void *x, size_t size MEM_STAT_DECL) { void *r; size_t old_size; if (x == NULL) return ggc_alloc_stat (size PASS_MEM_STAT); old_size = ggc_get_size (x); if (size <= old_size) { /* Mark the unwanted memory as unaccessible. We also need to make the "new" size accessible, since ggc_get_size returns the size of the pool, not the size of the individually allocated object, the size which was previously made accessible. Unfortunately, we don't know that previously allocated size. Without that knowledge we have to lose some initialization-tracking for the old parts of the object. An alternative is to mark the whole old_size as reachable, but that would lose tracking of writes after the end of the object (by small offsets). Discard the handle to avoid handle leak. */ VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) x + size, old_size - size)); VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, size)); return x; } r = ggc_alloc_stat (size PASS_MEM_STAT); /* Since ggc_get_size returns the size of the pool, not the size of the individually allocated object, we'd access parts of the old object that were marked invalid with the memcpy below. We lose a bit of the initialization-tracking since some of it may be uninitialized. */ VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, old_size)); memcpy (r, x, old_size); /* The old object is not supposed to be used anymore. */ ggc_free (x); return r; }
static void * vec_gc_o_reserve_1 (void *vec, int reserve, size_t vec_offset, size_t elt_size, bool exact MEM_STAT_DECL) { struct vec_prefix *pfx = (struct vec_prefix *) vec; unsigned alloc = calculate_allocation (pfx, reserve, exact); if (!alloc) { if (pfx) ggc_free (pfx); return NULL; } vec = ggc_realloc_stat (vec, vec_offset + alloc * elt_size PASS_MEM_STAT); ((struct vec_prefix *)vec)->alloc = alloc; if (!pfx) ((struct vec_prefix *)vec)->num = 0; return vec; }
/* Remove node from the varpool. */ void varpool_remove_node (struct varpool_node *node) { gcc_assert (!varpool_assembled_nodes_queue); symtab_unregister_node ((symtab_node)node); if (varpool_first_unanalyzed_node == node) x_varpool_first_unanalyzed_node = (symtab_node)node->next_needed; if (node->next_needed) node->next_needed->prev_needed = node->prev_needed; else if (node->prev_needed) { gcc_assert (varpool_last_needed_node); x_varpool_last_needed_node = (symtab_node)node->prev_needed; } if (node->prev_needed) node->prev_needed->next_needed = node->next_needed; else if (node->next_needed) { gcc_assert (varpool_nodes_queue == node); x_varpool_nodes_queue = (symtab_node)node->next_needed; } ggc_free (node); }
static void free_edge (function *fn, edge e) { n_edges_for_fn (fn)--; ggc_free (e); }
static void free_edge (edge e) { n_edges_for_fn (cfun)--; ggc_free (e); }
static void free_edge (edge e) { n_edges--; ggc_free (e); }