static int do_free(vm_map_t map, void *addr) { struct region *reg; addr = (void *)PAGE_TRUNC(addr); /* * Find the target region. */ reg = region_find(&map->head, addr, 1); if (reg == NULL || reg->addr != addr || (reg->flags & REG_FREE)) return EINVAL; /* * Unmap pages of the region. */ mmu_map(map->pgd, reg->phys, reg->addr, reg->size, PG_UNMAP); /* * Relinquish use of the page if it is not shared and mapped. */ if (!(reg->flags & REG_SHARED) && !(reg->flags & REG_MAPPED)) page_free(reg->phys, reg->size); region_free(&map->head, reg); return 0; }
static int do_allocate(vm_map_t map, void **addr, size_t size, int anywhere) { struct region *reg; char *start, *end, *phys; if (size == 0) return EINVAL; /* * Allocate region */ if (anywhere) { size = (size_t)PAGE_ALIGN(size); if ((reg = region_alloc(&map->head, size)) == NULL) return ENOMEM; } else { start = (char *)PAGE_TRUNC(*addr); end = (char *)PAGE_ALIGN(start + size); size = (size_t)(end - start); reg = region_find(&map->head, start, size); if (reg == NULL || !(reg->flags & REG_FREE)) return EINVAL; reg = region_split(&map->head, reg, start, size); if (reg == NULL) return ENOMEM; } reg->flags = REG_READ | REG_WRITE; /* * Allocate physical pages, and map them into virtual address */ if ((phys = page_alloc(size)) == 0) goto err1; if (mmu_map(map->pgd, phys, reg->addr, size, PG_WRITE)) goto err2; reg->phys = phys; /* Zero fill */ memset(phys_to_virt(phys), 0, reg->size); *addr = reg->addr; return 0; err2: page_free(phys, size); err1: region_free(&map->head, reg); return ENOMEM; }
void *__ast_realloc(void *ptr, size_t size, const char *file, int lineno, const char *func) { size_t len; struct ast_region *found; void *new_mem; if (ptr) { ast_mutex_lock(®lock); found = region_find(ptr); if (!found) { ast_mutex_unlock(®lock); astmm_log("WARNING: Realloc of unregistered memory %p by %s %s() line %d\n", ptr, file, func, lineno); my_do_crash(); return NULL; } len = found->len; ast_mutex_unlock(®lock); } else { found = NULL; len = 0; } if (!size) { __ast_free_region(ptr, file, lineno, func); return NULL; } new_mem = __ast_alloc_region(size, FUNC_REALLOC, file, lineno, func, 0); if (new_mem) { if (found) { /* Copy the old data to the new malloced memory. */ if (size <= len) { memcpy(new_mem, ptr, size); } else { memcpy(new_mem, ptr, len); /* Make sure that the added memory is not zero. */ memset(new_mem + len, MALLOC_FILLER, size - len); } __ast_free_region(ptr, file, lineno, func); } else { /* Make sure that the malloced memory is not zero. */ memset(new_mem, MALLOC_FILLER, size); } } return new_mem; }
static int do_map(vm_map_t map, void *addr, size_t size, void **alloc) { vm_map_t curmap; task_t self; char *start, *end; struct region *reg, *tgt; void *tmp; if (size == 0) return EINVAL; /* check fault */ tmp = NULL; if (umem_copyout(&tmp, alloc, sizeof(tmp))) return EFAULT; start = (char *)PAGE_TRUNC(addr); end = (char *)PAGE_ALIGN((char *)addr + size); size = (size_t)(end - start); /* * Find the region that includes target address */ reg = region_find(&map->head, start, size); if (reg == NULL || (reg->flags & REG_FREE)) return EINVAL; /* not allocated */ tgt = reg; /* * Create new region to map */ self = cur_task(); curmap = self->map; reg = region_create(&curmap->head, start, size); if (reg == NULL) return ENOMEM; reg->flags = tgt->flags | REG_MAPPED; umem_copyout(&addr, alloc, sizeof(addr)); return 0; }
static int do_free(vm_map_t map, void *addr) { struct region *reg; addr = (void *)PAGE_TRUNC(addr); /* * Find the target region. */ reg = region_find(&map->head, addr, 1); if (reg == NULL || reg->addr != addr || (reg->flags & REG_FREE)) return EINVAL; /* not allocated */ /* * Free pages if it is not shared and mapped. */ if (!(reg->flags & REG_SHARED) && !(reg->flags & REG_MAPPED)) page_free(reg->addr, reg->size); region_free(&map->head, reg); return 0; }
static int do_attribute(vm_map_t map, void *addr, int attr) { struct region *reg; int new_flags = 0; addr = (void *)PAGE_TRUNC(addr); /* * Find the target region. */ reg = region_find(&map->head, addr, 1); if (reg == NULL || reg->addr != addr || (reg->flags & REG_FREE)) { return EINVAL; /* not allocated */ } /* * The attribute of the mapped or shared region can not be changed. */ if ((reg->flags & REG_MAPPED) || (reg->flags & REG_SHARED)) return EINVAL; /* * Check new and old flag. */ if (reg->flags & REG_WRITE) { if (!(attr & VMA_WRITE)) new_flags = REG_READ; } else { if (attr & VMA_WRITE) new_flags = REG_READ | REG_WRITE; } if (new_flags == 0) return 0; /* same attribute */ reg->flags = new_flags; return 0; }
static int do_map(vm_map_t map, void *addr, size_t size, void **alloc) { vm_map_t curmap; char *start, *end, *phys; size_t offset; struct region *reg, *cur, *tgt; task_t self; int map_type; void *tmp; if (size == 0) return EINVAL; /* check fault */ tmp = NULL; if (umem_copyout(&tmp, alloc, sizeof(tmp))) return EFAULT; start = (char *)PAGE_TRUNC(addr); end = (char *)PAGE_ALIGN((char *)addr + size); size = (size_t)(end - start); offset = (size_t)((char *)addr - start); /* * Find the region that includes target address */ reg = region_find(&map->head, start, size); if (reg == NULL || (reg->flags & REG_FREE)) return EINVAL; /* not allocated */ tgt = reg; /* * Find the free region in current task */ self = cur_task(); curmap = self->map; if ((reg = region_alloc(&curmap->head, size)) == NULL) return ENOMEM; cur = reg; /* * Try to map into current memory */ if (tgt->flags & REG_WRITE) map_type = PG_WRITE; else map_type = PG_READ; phys = (char *)tgt->phys + (start - (char *)tgt->addr); if (mmu_map(curmap->pgd, phys, cur->addr, size, map_type)) { region_free(&curmap->head, reg); return ENOMEM; } cur->flags = tgt->flags | REG_MAPPED; cur->phys = phys; tmp = (char *)cur->addr + offset; umem_copyout(&tmp, alloc, sizeof(tmp)); return 0; }
static int do_attribute(vm_map_t map, void *addr, int attr) { struct region *reg; int new_flags = 0; void *old_addr, *new_addr = NULL; int map_type; addr = (void *)PAGE_TRUNC(addr); /* * Find the target region. */ reg = region_find(&map->head, addr, 1); if (reg == NULL || reg->addr != addr || (reg->flags & REG_FREE)) { return EINVAL; /* not allocated */ } /* * The attribute of the mapped region can not be changed. */ if (reg->flags & REG_MAPPED) return EINVAL; /* * Check new and old flag. */ if (reg->flags & REG_WRITE) { if (!(attr & VMA_WRITE)) new_flags = REG_READ; } else { if (attr & VMA_WRITE) new_flags = REG_READ | REG_WRITE; } if (new_flags == 0) return 0; /* same attribute */ map_type = (new_flags & REG_WRITE) ? PG_WRITE : PG_READ; /* * If it is shared region, duplicate it. */ if (reg->flags & REG_SHARED) { old_addr = reg->phys; /* Allocate new physical page. */ if ((new_addr = page_alloc(reg->size)) == 0) return ENOMEM; /* Copy source page */ memcpy(phys_to_virt(new_addr), phys_to_virt(old_addr), reg->size); /* Map new region */ if (mmu_map(map->pgd, new_addr, reg->addr, reg->size, map_type)) { page_free(new_addr, reg->size); return ENOMEM; } reg->phys = new_addr; /* Unlink from shared list */ reg->sh_prev->sh_next = reg->sh_next; reg->sh_next->sh_prev = reg->sh_prev; if (reg->sh_prev == reg->sh_next) reg->sh_prev->flags &= ~REG_SHARED; reg->sh_next = reg->sh_prev = reg; } else { if (mmu_map(map->pgd, reg->phys, reg->addr, reg->size, map_type)) return ENOMEM; } reg->flags = new_flags; return 0; }