int main(int argc, char** argv) { struct buddy* buddy = buddy_create(32); buddy_dump(buddy); int mval = buddy_alloc(buddy, 4); buddy_dump(buddy); printf("malloc value: %d\n", mval); mval = buddy_alloc(buddy, 9); buddy_dump(buddy); printf("malloc value: %d\n", mval); mval = buddy_alloc(buddy, 3); buddy_dump(buddy); printf("malloc value: %d\n", mval); mval = buddy_alloc(buddy, 7); buddy_dump(buddy); printf("malloc value: %d\n", mval); buddy_destroy(buddy); return 0; }
int main() { struct Node * tempNode1, *tempNode2, *tempNode3; int size = 10; struct HeadNode * tempHead = buddy_new(size); if (tempHead!=NULL) { //注意申请的空间最大不是2^k, 而是2^k - 1 tempNode1 = buddy_alloc(tempHead,size,511); buddy_print(tempHead, size); tempNode2 = buddy_alloc(tempHead,size,10); buddy_print(tempHead, size); tempNode3 = buddy_alloc(tempHead,size,10); buddy_print(tempHead, size); buddy_combine(tempHead, size, tempNode3); buddy_print(tempHead, size); buddy_combine(tempHead, size, tempNode2); buddy_print(tempHead, size); buddy_combine(tempHead, size, tempNode1); buddy_print(tempHead, size); } buddy_free(tempHead); getchar(); return 0; }
void mem_buddy_test() { as_mem_buddy_t *b = buddy_new(4); unsigned x = buddy_alloc(b, 4); buddy_alloc(b, 8); buddy_alloc(b, 2); buddy_free(b, x); buddy_print(b); buddy_destroy(b); }
/** * Allocates pages of memory from the kernel memory pool. The number of pages * requested must be a power of two and the returned pages will be contiguous * in physical memory. The memory returned is zeroed. * * \returns Pointer to the start of the allocated memory on succcess * or NULL for failure.. */ void * kmem_get_pages( /** Number of pages to allocated, 2^order: * - 0 = 1 page * - 1 = 2 pages * - 2 = 4 pages * - 3 = 8 pages * - ... */ unsigned long order ) { unsigned long block_order; void *addr; unsigned long flags; /* Calculate the block size needed; convert page order to byte order */ block_order = order + ilog2(PAGE_SIZE); /* Allocate memory from the underlying buddy system */ spin_lock_irqsave(&kmem_lock, flags); addr = buddy_alloc(kmem, block_order); if (addr) kmem_bytes_allocated += (1UL << block_order); spin_unlock_irqrestore(&kmem_lock, flags); if (addr == NULL) return NULL; /* Zero the block and return its address */ memset(addr, 0, (1UL << block_order)); return addr; }
/** * Allocates memory from the kernel memory pool. This will return a memory * region that is at least 16-byte aligned. The memory returned is zeroed. * * Arguments: * [IN] size: Amount of memory to allocate in bytes. * * Returns: * Success: Pointer to the start of the allocated memory. * Failure: NULL */ void * kmem_alloc(size_t size) { unsigned long order; struct kmem_block_hdr *hdr; unsigned long flags; /* Make room for block header */ size += sizeof(struct kmem_block_hdr); /* Calculate the block order needed */ order = ilog2(roundup_pow_of_two(size)); if (order < MIN_ORDER) order = MIN_ORDER; /* Allocate memory from the underlying buddy system */ spin_lock_irqsave(&kmem_lock, flags); hdr = buddy_alloc(kmem, order); if (hdr) kmem_bytes_allocated += (1UL << order); spin_unlock_irqrestore(&kmem_lock, flags); if (hdr == NULL) return NULL; /* Zero the block */ memset(hdr, 0, (1UL << order)); /* Initialize the block header */ hdr->order = order; /* kmem_free() needs this to free the block */ hdr->magic = KMEM_MAGIC; /* used for sanity check */ /* Return address of first byte after block header to caller */ return hdr + 1; }
void* kma_malloc(kma_size_t size) { /* cannot allocate more than 1 page */ if (size > PAGESIZE) { error("requested size is larger than PAGESIZE.", ""); return NULL; } /* initialize the central data structure */ if (budfls == NULL) { /* if size is too large for keeping a page header, return */ if (!init(size)) { return NULL; } } if (PAGESIZE / 2 < size) { // the requested size needs a new page return big_size_alloc(size); } else { // the requested size might fit in a free buffer return buddy_alloc(size); } return NULL; }
/** * Parses an allocation instruction * * @param cmd String representing an allocation command in the program * @returns Status of read and execute */ static status_t parse_alloc(char* cmd) { assert(cmd != NULL); assert(cmd[0] != '\0'); char var_name; int size; char alter_size; int matched; errno = 0; matched = sscanf(cmd, "%c=alloc(%d%c)", &var_name, &size, &alter_size); // Error check sprintf if (matched == 3 && errno == 0) { // Check what the alter_size variable actually contains switch (alter_size) { case 'k': case 'K': size *= 1024; case ')': break; default: return parse_error(cmd); } } else { return parse_error(cmd); } // Resolve variable var_t* var = get_var(var_name); if (var == NULL) return parse_error(cmd); // Allocate variable var->mem = buddy_alloc(size); if (var->mem == NULL) { print_fault(cmd, "buddy_alloc returned NULL", WARNING); printf("Out of memory\n"); return OUTOFMEMORY; } var->in_use = true; return SUCCESS; }
int main() { setbuf(stdout, NULL); memarea ma; unsigned int sizepow2 = 25; /* 32 Mb */ blockinfo *blocks = malloc(buddy_nblocks(sizepow2) * sizeof(blockinfo)); char *membase = malloc(1 << sizepow2); memset(membase, 0, 1 << sizepow2); buddy_init(&ma, sizepow2, membase, blocks); srand(1); int iterations = 40000; void *allocated[iterations]; int got = 0; int malloc_probability = 50; int i; for (i = 0; i < iterations; i++) { if (((rand() % 100) < malloc_probability) || (0 == got)) { int nbytes = rand() % (2 * 1024 * 1024); allocated[got] = buddy_alloc(&ma, nbytes); if (NULL != allocated[got]) got++; } else { int index = rand() % got; buddy_free(&ma, allocated[index]); memmove(&allocated[index], &allocated[index + 1], (got - index - 1) * sizeof(void *)); got--; } print_mem_line(&ma, 128); if (0 < DELAYMS) { struct timespec st; st.tv_sec = DELAYMS / 1000; st.tv_nsec = (DELAYMS % 1000) * 1000000; nanosleep(&st, NULL); } } return 0; }
uintptr_t vmspace_alloc(vmspace_t *vms, unsigned sz, int alloc_phys) { /* FIXME: Assert sz is page aligned. */ spinlock_acquire(&vms->lock); uint64_t addr = buddy_alloc(&vms->allocator, sz); if (alloc_phys && addr != ~0ULL) { size_t npages = sz >> get_page_shift(); uintptr_t phys_pages = alloc_pages(PAGE_REQ_NONE, npages); #if CPUBITS == 64 assert(phys_pages != ~0UL && phys_pages != ~0ULL && "Out of memory!"); #elif CPUBITS == 32 assert(phys_pages != ~0UL && "Out of memory!"); #endif int ok = map(addr, phys_pages, npages, alloc_phys); assert(ok == 0 && "vmspace_alloc: map failed!"); }
void* buddy_alloc(kma_size_t reqSize) { kma_size_t reqBufSize = get_roundup(reqSize); int reqBufClass = get_buf_class(reqBufSize); int bufClass = reqBufClass; void* bufPtr; /* find available buffer on free lists */ while ((budfls->fl[bufClass]).ptr == NULL && bufClass >= 0) { bufClass--; } /* buffer found */ if (bufClass == reqBufClass) { bufPtr = (budfls->fl[bufClass]).ptr; remove_buffer_from_free_list((budfls->fl[bufClass]).ptr, bufClass); update_bitmap(((bufferHeader_t*)bufPtr)->pagePtr, bufPtr, reqBufSize, USED); } else { if (bufClass < 0) { /* no buffer large enough for requested size, need new page */ kpage_t* page = get_page(); *((kpage_t**)page->ptr) = page; budfls->pagesUsed++; header_alloc(page->ptr, PAGEHEADERSIZE); return buddy_alloc(reqSize); } else { /* need to split a larger buffer */ bufPtr = (budfls->fl[bufClass]).ptr; remove_buffer_from_free_list((budfls->fl[bufClass]).ptr, bufClass); get_buffer_from_large_buffer(((bufferHeader_t*)bufPtr)->pagePtr, reqBufSize, (budfls->fl[bufClass]).size, (kma_size_t)(bufPtr - ((bufferHeader_t*)bufPtr)->pagePtr)); update_bitmap(((bufferHeader_t*)bufPtr)->pagePtr, bufPtr, reqBufSize, USED); } } /* update metadata */ ((pageHeader_t*)(((bufferHeader_t*)bufPtr)->pagePtr))->spaceUsed += reqBufSize; return bufPtr; }
/* ioctl callback function */ long ioctl(struct file *file, unsigned int ioctl_num, unsigned long ioctl_param) { char ch; /* current character to or from userspace */ int size; /* size of page being written to */ int bytes; /* current number of bytes being read or written */ switch (ioctl_num) { case IOCTL_ALLOC: printk(KERN_INFO "vmm: allocating %d bytes\n", (int)ioctl_param); return buddy_alloc((int)ioctl_param); case IOCTL_FREE: printk(KERN_INFO "vmm: freeing idx %d\n", (int)ioctl_param); return buddy_free((int)ioctl_param); case IOCTL_SET_IDX: current_idx = (int)ioctl_param; printk(KERN_INFO "vmm: setting idx to %d\n", current_idx); return 0; case IOCTL_SET_READ_SIZE: read_size = (int)ioctl_param; printk(KERN_INFO "vmm: setting read size to %d\n", read_size); return 0; case IOCTL_WRITE: /* reset the number of bytes written */ bytes = 0; /* get the page size */ size = buddy_size(current_idx); /* keep writing until some condition breaks us out */ while (1) { /* get the next byte from userspace */ get_user(ch, (char*)ioctl_param+bytes); /* if it's a null terminator we are finished writing */ if (ch == '\0') break; /* if we are about to write outside the page error out */ if (bytes > size) { printk(KERN_INFO "vmm: writing out of allocated area\n"); return -1; } /* write the byte into the pool */ *(buddy_pool+current_idx+bytes) = ch; printk(KERN_INFO "wrote %c to %d\n", ch, current_idx+bytes); /* go to the next byte */ bytes++; } printk(KERN_INFO "vmm: wrote %d bytes\n", bytes); /* return how many bytes were written */ return bytes; case IOCTL_READ: /* return error if trying to read more than the page size */ if (read_size > buddy_size(current_idx)) { printk(KERN_INFO "vmm: read bigger than allocated area\n"); return -1; } /* reset the number of bytes read */ bytes = 0; /* keep reading bytes until we've satisfied the request */ while (bytes < read_size) { /* get byte out of pool and send it to user */ put_user(*(buddy_pool+current_idx+bytes), (char*)ioctl_param+bytes); /* go to the next byte */ bytes++; } printk(KERN_INFO "vmm: read %d bytes\n", bytes); /* return how many bytes were read */ return bytes; default: printk(KERN_INFO "vmm: unknown ioctl call\n"); } return 0; }