void *big_realloc(char *p, void *q, size_t n) { void *a; size_t m; if (debug_memory) { mrlog("big_realloc(%s, %p, %ld)", p, q, n); m = memsize*(n+PATTERN_SIZE); } else { m = memsize*n; } remove_chunk(q, p); a = realloc(q, m); if (debug > 2) { mrlog("Reallocating %ld bytes (%p => %p) on behalf of %s", (long)m, q, a, p); } if (a == NULL) { mrlog("Allocation '%s' failed, exiting", p); mrexit("Out of memory", EXIT_FAILURE); } store_chunk(a, m, p); return a; }
static void mem_enter_free(struct ls_state *ls, bool in_kernel, unsigned int base) { struct mem_state *m = in_kernel ? &ls->kern_mem : &ls->user_mem; struct chunk *chunk; assert(!m->in_mm_init); if (m->in_alloc || m->in_free) { FOUND_A_BUG(ls, "Free (in %s) reentered %s!", K_STR(in_kernel), m->in_alloc ? "Malloc" : "Free"); } chunk = remove_chunk(&m->heap, base); if (base == 0) { assert(chunk == NULL); lsprintf(INFO, "Free() NULL (in %s); ok, I guess...\n", K_STR(in_kernel)); } else if (chunk == NULL) { struct hax *before; struct hax *after; chunk = find_freed_chunk(ls, base, in_kernel, &before, &after); if (chunk != NULL) { print_freed_chunk_info(chunk, before, after, NULL); char buf[BUF_SIZE]; int len = scnprintf(buf, BUF_SIZE, "DOUBLE FREE (in %s)" " of 0x%x!", K_STR(in_kernel), base); FOUND_A_BUG_HTML_INFO(ls, buf, len, html_env, print_freed_chunk_info(chunk, before, after, html_env); ); } else {
void big_free(char *p, void *q) { if (q == NULL) { if (debug) mrlog("No need to free %p on behalf of %s", q, p); return; } if (debug > 2) mrlog("Freeing %p on behalf of %s", q, p); remove_chunk(q, p); free(q); }
/** ** Liberation d'une zone **/ STATUS smMemFree(void *ptr) { SM_MALLOC_CHUNK *oc, *c; LOGDBG(("comLib:smMemLib: free 0x%x\n", (unsigned)ptr)); if (ptr == NULL) { LOGDBG(("comLib:smMemLib: free(NULL)\n")); return ERROR; } /* get a pointer to the header */ oc = (SM_MALLOC_CHUNK *)ptr - 1; /* test for allocated bloc */ if (oc->next != MALLOC_MAGIC) { /* what to do ? */ LOGDBG(("comLib:smMemLib: free(something not returned by malloc)\n")); return ERROR; } /* insert free chunk in the free list */ insert_after(&smMemFreeList, oc); /* test if can merge with preceding chunk */ c = smObjGlobalToLocal(oc->prev); if (c != NULL && oc == (SM_MALLOC_CHUNK *)((char *)c + REAL_SIZE(c->length))) { /* merge */ c->length += REAL_SIZE(oc->length); remove_chunk(&smMemFreeList, oc); oc = c; } /* test if can merge with following chunk */ c = smObjGlobalToLocal(oc->next); if (c == (SM_MALLOC_CHUNK *)((char *)oc + REAL_SIZE(oc->length))) { /* merge (=> oc->next != NULL) */ oc->length += REAL_SIZE(c->length); remove_chunk(&smMemFreeList, c); } return OK; }
/** ** malloc function **/ static void * internal_malloc(size_t size) { SM_MALLOC_CHUNK *c, *nc; #ifdef MALLOC_ZERO_RETURNS_NULL if (size == 0) { return(NULL); } #endif /* allocate at least MALLOC_MIN_CHUNK bytes and multiple of sizeof (double) */ size = ROUNDUP(size, sizeof(double)); if (size < MALLOC_MIN_CHUNK) { size = MALLOC_MIN_CHUNK; } /* look for a free chunk of size > size */ EVERY_FREE(c->length >= size); if (c != NULL) { /* found a chunk */ assert(c->signature == SIGNATURE); if (c->length > size + REAL_SIZE(MALLOC_MIN_CHUNK)) { /* split it */ nc = (SM_MALLOC_CHUNK *)((char *)c + c->length - size); nc->length = size; c->length -= REAL_SIZE(size); nc->next = MALLOC_MAGIC; nc->signature = SIGNATURE; return((void *)(nc+1)); } else { /* supress it from free list */ remove_chunk(&smMemFreeList, c); c->next = MALLOC_MAGIC; return((void *)(c+1)); } } else { errnoSet(ENOMEM); return NULL; } } /* malloc */
void sys$mem_init(L4_KernelInterfacePage_t *kip, struct vms$meminfo *mem_info, vms$pointer pagesize) { static struct initial_obj static_objects[NUM_MI_OBJECTS]; static struct memdesc static_regions[NUM_MI_REGIONS]; static struct memdesc static_io_regions[NUM_MI_IOREGIONS]; static struct memdesc static_vm_regions[NUM_MI_VMREGIONS]; unsigned int i; notice(SYSBOOT_I_SYSBOOT "initializing memory\n"); mem_info->regions = static_regions; mem_info->max_regions = NUM_MI_REGIONS; mem_info->num_regions = sys$find_memory_region(kip, NUM_MI_REGIONS, VMS$MEM_RAM, VMS$MEM_IO, static_regions); mem_info->io_regions = static_io_regions; mem_info->max_io_regions = NUM_MI_IOREGIONS; mem_info->num_io_regions = sys$find_memory_region(kip, NUM_MI_IOREGIONS, VMS$MEM_IO, VMS$MEM_RAM, static_io_regions); mem_info->vm_regions = static_vm_regions; mem_info->max_vm_regions = NUM_MI_VMREGIONS; mem_info->num_vm_regions = sys$find_memory_region(kip, NUM_MI_VMREGIONS, VMS$MEM_VM, 0, static_vm_regions); // Create a guard page mem_info->num_vm_regions = sys$remove_chunk(mem_info->vm_regions, mem_info->num_vm_regions, NUM_MI_VMREGIONS, 0, pagesize - 1); mem_info->objects = static_objects; mem_info->max_objects = NUM_MI_OBJECTS; mem_info->num_objects = sys$find_initial_objects(kip, NUM_MI_OBJECTS, static_objects); // Remove any initial objects from free physical memory for(i = 0; i < mem_info->num_objects; i++) { if (mem_info->objects[i].flags & VMS$IOF_PHYS) { mem_info->num_regions = sys$remove_chunk(mem_info->regions, mem_info->num_regions, NUM_MI_REGIONS, sys$page_round_down(mem_info->objects[i].base, pagesize), sys$page_round_up(mem_info->objects[i].end, pagesize) - 1); } } sys$set_flags(mem_info, VMS$IOF_APP, VMS$IOF_VIRT); mem_info->swapper_base = 0; for(i = 0; i < mem_info->num_regions; i++) { notice(MEM_I_AREA "$%016lX - $%016lX: physical memory\n", mem_info->regions[i].base, mem_info->regions[i].end); if (mem_info->swapper_base < mem_info->regions[i].end) { mem_info->swapper_base = mem_info->regions[i].end + 1; } } for(i = 0; i < mem_info->num_vm_regions; i++) { notice(MEM_I_AREA "$%016lX - $%016lX: virtual memory\n", mem_info->vm_regions[i].base, mem_info->vm_regions[i].end); } for(i = 0; i < mem_info->num_io_regions; i++) { notice(MEM_I_AREA "$%016lX - $%016lX: mapped IO\n", mem_info->io_regions[i].base, mem_info->io_regions[i].end); } for(i = 0; i < mem_info->num_objects; i++) { if (mem_info->objects[i].flags & VMS$IOF_ROOT) { notice(MEM_I_AREA "$%016lX - $%016lX: kernel\n", mem_info->objects[i].base, mem_info->objects[i].end); } else if (mem_info->objects[i].flags & VMS$IOF_RESERVED) { notice(MEM_I_AREA "$%016lX - $%016lX: reserved by kernel\n", mem_info->objects[i].base, mem_info->objects[i].end); } else if (mem_info->objects[i].flags & VMS$IOF_BOOT) { notice(MEM_I_AREA "$%016lX - $%016lX: boot information\n", mem_info->objects[i].base, mem_info->objects[i].end); } else { notice(MEM_I_AREA "$%016lX - $%016lX: modules\n", mem_info->objects[i].base, mem_info->objects[i].end); } } return; }
static unsigned int sys$find_memory_region(L4_KernelInterfacePage_t *kip, unsigned int max, int memory_type, int except_type, struct memdesc *mem_desc) { int covered; int mem_desc_type; L4_Word_t high; L4_Word_t low; L4_Word_t type; unsigned int i; unsigned int j; unsigned int pos; pos = 0; for(i = 0; i < kip->MemoryInfo.n; i++) { if (sys$find_memory_info(kip, i, &low, &high, &type)) { // Physical memory switch(type) { case L4_ConventionalMemoryType: mem_desc_type = VMS$MEM_RAM; break; case L4_SharedMemoryType: case L4_DedicatedMemoryType: mem_desc_type = VMS$MEM_IO; break; default: mem_desc_type = VMS$MEM_OTHER; break; } } else { // No physical memory mem_desc_type = VMS$MEM_VM; } if (mem_desc_type & memory_type) { // Add it to the array covered = 0; PANIC(pos >= (max - 1)); for(j = 0; j < pos; j++) { if ((low >= mem_desc[j].base) && (low < mem_desc[j].end) && (high >= mem_desc[j].end)) { mem_desc[j].end = high; covered = 1; break; } else if ((low <= mem_desc[j].base) && (high <= mem_desc[j].end) && (high > mem_desc[j].base)) { mem_desc[j].base = high + 1; covered = 1; break; } else if ((low > mem_desc[j].base) && (low < mem_desc[j].end) && (high < mem_desc[j].end) && (high > mem_desc[j].base)) { covered = 1; break; } } if (covered == 0) { mem_desc[pos].base = low; mem_desc[pos].end = high; pos++; } } else if (mem_desc_type & except_type) { pos = sys$remove_chunk(mem_desc, pos, max, low, high); } } // Return number of actual descriptors we have copied. return(pos); }