void *tiny_malloc(size_t size) { void *ret; t_sm *bws_tiny; t_sm *keep; if (!g_pool.tiny_m) { g_pool.tiny_m = (t_sm *)mmap(0, 101 * TINY_M, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); init_mem(g_pool.tiny_m); } bws_tiny = g_pool.tiny_m; while (bws_tiny) { if ((ret = find_alloc(bws_tiny, size, TINY_M))) return (ret); keep = bws_tiny; bws_tiny = bws_tiny->next; } keep->next = (t_sm *)mmap(0, 101 * TINY_M, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); init_mem(keep->next); return (find_alloc(keep->next, size, TINY_M)); }
void *small_malloc(size_t size) { void *ret; t_sm *bws_small; t_sm *keep; if (!g_pool.small_m) { g_pool.small_m = (t_sm *)mmap(0, 101 * SMALL_M, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); init_mem(g_pool.small_m); } bws_small = g_pool.small_m; while (bws_small) { if ((ret = find_alloc(bws_small, size, SMALL_M))) return (ret); keep = bws_small; bws_small = bws_small->next; } keep->next = (t_sm *)mmap(0, 101 * SMALL_M, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); init_mem(keep->next); return (find_alloc(keep->next, size, SMALL_M)); }
static void __free(void *vaddr, bool unmap) { struct alloc *node = find_alloc((unsigned long)vaddr); if (!node) return; #ifndef CONFIG_UML if (unmap) /* * We need the double cast because otherwise gcc complains about * cast to pointer of different size. This is technically a down * cast but if unmap is being called, this had better be an * actual 32-bit pointer anyway. */ iounmap((void *)(unsigned long)node->vaddr); #endif gen_pool_free(node->mpool->gpool, node->paddr, node->len); node->mpool->free += node->len; remove_alloc(node); kfree(node); }
unsigned long memory_pool_node_len(void *vaddr) { struct alloc *node = find_alloc(vaddr); if (!node) return -EINVAL; return node->len; }
phys_addr_t memory_pool_node_paddr(void *vaddr) { struct alloc *node = find_alloc((unsigned long)vaddr); if (!node) return -EINVAL; return node->paddr; }
static void __free(void *vaddr, bool unmap) { struct alloc *node = find_alloc(vaddr); if (!node) return; if (unmap) iounmap(node->vaddr); gen_pool_free(node->mpool->gpool, node->paddr, node->len); node->mpool->free += node->len; remove_alloc(node); kfree(node); }
inline static void deleted_ptr(void* ptr) { if (enable_buffer_protection) { // Calculate where the real allocation starts (at our first checksum) ptr = reinterpret_cast<void*>(reinterpret_cast<char*>(ptr) - sizeof(buffer_protection_checksum)); } if (enable_debugging) { auto* x = find_alloc((char*) ptr); if (x == nullptr) { if (ptr < heap_begin || ptr > heap_end) { DPRINTF("[ERROR] Free on invalid non-heap address: %p\n", ptr); } else { DPRINTF("[ERROR] Possible double free on address: %p\n", ptr); } print_backtrace(); return; } else if (x->addr == ptr) { if (enable_debugging_verbose) { DPRINTF("free(%p) == %llu bytes\n", x->addr, (unsigned long long) x->len); safe_print_symbol(1, __builtin_return_address(1)); safe_print_symbol(2, __builtin_return_address(2)); } // This is the only place where we can verify the buffer protection // checksums, since we need to know the length of the allocation if (enable_buffer_protection) { auto* temp = reinterpret_cast<char*>(ptr); auto underflow = memcmp(temp, &buffer_protection_checksum, sizeof(buffer_protection_checksum)); auto overflow = memcmp(temp + sizeof(buffer_protection_checksum) + x->len, &buffer_protection_checksum, sizeof(buffer_protection_checksum)); if (underflow) { DPRINTF("[ERROR] Buffer underflow found on address: %p\n", ptr); // TODO: print stacktrace } if (overflow) { DPRINTF("[ERROR] Buffer overflow found on address: %p\n", ptr); // TODO: print stacktrace } } // perfect match x->addr = nullptr; x->len = 0; free_allocs.add(x); } else if (x->addr != ptr) { DPRINTF("[ERROR] Free on misaligned address: %p inside %p:%llu", ptr, x->addr, (unsigned long long) x->len); print_backtrace(); return; } } free(ptr); }