static void test02(void) { size_t alloc_size0 = get_allocsize(); tst_resm(TINFO, "read allocated file size '%zu'", alloc_size0); tst_resm(TINFO, "make a hole with FALLOC_FL_PUNCH_HOLE"); if (tst_kvercmp(2, 6, 38) < 0) { tst_brkm(TCONF, cleanup, "FALLOC_FL_PUNCH_HOLE needs Linux 2.6.38 or newer"); } if (fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, block_size, block_size) == -1) { if (errno == EOPNOTSUPP) { tst_brkm(TCONF, cleanup, "FALLOC_FL_PUNCH_HOLE not supported"); } tst_brkm(TFAIL | TERRNO, cleanup, "fallocate() failed"); } tst_resm(TINFO, "check that file has a hole with lseek(,,SEEK_HOLE)"); off_t ret = lseek(fd, 0, SEEK_HOLE); if (ret != (ssize_t)block_size) { /* exclude error when kernel doesn't have SEEK_HOLE support */ if (errno != EINVAL) { tst_brkm(TFAIL | TERRNO, cleanup, "fallocate() or lseek() failed"); } if (tst_kvercmp(3, 1, 0) < 0) { tst_resm(TINFO, "lseek() doesn't support SEEK_HOLE, " "this is expected for < 3.1 kernels"); } else { tst_brkm(TBROK | TERRNO, cleanup, "lseek() doesn't support SEEK_HOLE"); } } else { tst_resm(TINFO, "found a hole at '%ld' offset", ret); } size_t alloc_size1 = get_allocsize(); tst_resm(TINFO, "allocated file size before '%zu' and after '%zu'", alloc_size0, alloc_size1); if ((alloc_size0 - block_size) != alloc_size1) tst_brkm(TFAIL, cleanup, "not expected allocated size"); char exp_buf[buf_size]; fill_tst_buf(exp_buf); memset(exp_buf + block_size, 0, block_size); check_file_data(exp_buf, buf_size); tst_resm(TPASS, "test-case succeeded"); }
static void test03(void) { tst_resm(TINFO, "zeroing file space with FALLOC_FL_ZERO_RANGE"); if (tst_kvercmp(3, 15, 0) < 0) { tst_brkm(TCONF, cleanup, "FALLOC_FL_ZERO_RANGE needs Linux 3.15 or newer"); } size_t alloc_size0 = get_allocsize(); tst_resm(TINFO, "read current allocated file size '%zu'", alloc_size0); if (fallocate(fd, FALLOC_FL_ZERO_RANGE, block_size - 1, block_size + 2) == -1) { if (errno == EOPNOTSUPP) { tst_brkm(TCONF, cleanup, "FALLOC_FL_ZERO_RANGE not supported"); } tst_brkm(TFAIL | TERRNO, cleanup, "fallocate failed"); } /* The file hole in the specified range must be allocated and * filled with zeros. Check it. */ size_t alloc_size1 = get_allocsize(); tst_resm(TINFO, "allocated file size before '%zu' and after '%zu'", alloc_size0, alloc_size1); if ((alloc_size0 + block_size) != alloc_size1) tst_brkm(TFAIL, cleanup, "not expected allocated size"); char exp_buf[buf_size]; fill_tst_buf(exp_buf); memset(exp_buf + block_size - 1, 0, block_size + 2); check_file_data(exp_buf, buf_size); tst_resm(TPASS, "test-case succeeded"); }
static void test04(void) { tst_resm(TINFO, "collapsing file space with FALLOC_FL_COLLAPSE_RANGE"); size_t alloc_size0 = get_allocsize(); tst_resm(TINFO, "read current allocated file size '%zu'", alloc_size0); if (fallocate(fd, FALLOC_FL_COLLAPSE_RANGE, block_size, block_size) == -1) { if (errno == EOPNOTSUPP) { tst_brkm(TCONF, cleanup, "FALLOC_FL_COLLAPSE_RANGE not supported"); } tst_brkm(TFAIL | TERRNO, cleanup, "fallocate failed"); } size_t alloc_size1 = get_allocsize(); tst_resm(TINFO, "allocated file size before '%zu' and after '%zu'", alloc_size0, alloc_size1); if ((alloc_size0 - block_size) != alloc_size1) tst_brkm(TFAIL, cleanup, "not expected allocated size"); size_t size = buf_size - block_size; char tmp_buf[buf_size]; char exp_buf[size]; fill_tst_buf(tmp_buf); memcpy(exp_buf, tmp_buf, block_size); memcpy(exp_buf + block_size, tmp_buf + size, block_size); exp_buf[block_size - 1] = exp_buf[block_size] = '\0'; check_file_data(exp_buf, size); tst_resm(TPASS, "test-case succeeded"); }
void free(void *data) { register vm_size_t freesize; union header *fl; union header *addr = ((union header *)data) - 1; freesize = get_allocsize(addr->size, &fl); if (freesize < kalloc_max) { addr->next = fl->next; fl->next = addr; } else { (void) vm_deallocate(mach_task_self(), (vm_offset_t)addr, freesize); } }
void * malloc(size_t size) { register vm_size_t allocsize; union header *addr; union header *fl; if (size <= 0) return NULL; if (!kalloc_initialized) { kalloc_init(); kalloc_initialized = TRUE; } /* compute the size of the block that we will actually allocate */ size += sizeof(union header); allocsize = get_allocsize(size, &fl); /* * If our size is still small enough, check the queue for that size * and allocate. */ if (allocsize < kalloc_max) { if ((addr = fl->next) != 0) { fl->next = addr->next; } else { addr = kget_space(allocsize); } } else { /* This will allocate page 0 if it is free, but the header will prevent us from returning a 0 pointer. */ if (vm_allocate(mach_task_self(), (vm_offset_t *)&addr, allocsize, TRUE) != KERN_SUCCESS) return(0); } addr->size = allocsize; return (void *) (addr + 1); }
/* The most common use of realloc is to manage a buffer of unlimited size that is grown as it fills. So we try to optimise the case where you are growing the last object allocated to avoid copies. */ void * realloc(void *data, size_t size) { void *p; union header *addr = ((union header *) data) - 1; vm_address_t vmaddr = (vm_address_t) addr; vm_address_t newaddr; vm_size_t oldsize, allocsize; size_t tocopy; if (data == NULL) return malloc(size); oldsize = addr->size; allocsize = get_allocsize(size + sizeof(union header), NULL); if (allocsize == oldsize) return data; /* Deal with every case where we don't want to do a simple malloc+memcpy+free. Otherwise it is a "simple case" in the comments. */ if (allocsize < oldsize) { /* Shrinking. We favour space over time here since if time is really important you can just not do the realloc. */ if (oldsize >= kalloc_max) { /* Shrinking a lot. */ if (allocsize >= kalloc_max) { (void) vm_deallocate(mach_task_self(), vmaddr + allocsize, oldsize - allocsize); addr->size = allocsize; return data; } /* Simple case: shrinking from a whole page or pages to less than a page. */ } else { if (vmaddr + oldsize == kalloc_next_space) { /* Shrinking the last item in the current page. */ kalloc_next_space = vmaddr + allocsize; addr->size = allocsize; return data; } /* Simple case: shrinking enough to fit in a smaller power of two. */ } tocopy = size; } else { /* Growing. */ if (allocsize >= kalloc_max) { /* Growing a lot. */ if (oldsize >= kalloc_max) { /* We could try to vm_allocate extra pages after the old data, but vm_allocate + vm_copy is not much more expensive than that, even if it does fragment the address space a bit more. */ newaddr = vmaddr; if (vm_allocate(mach_task_self(), &newaddr, allocsize, TRUE) != KERN_SUCCESS || vm_copy(mach_task_self(), vmaddr, oldsize, newaddr) != KERN_SUCCESS) return NULL; (void) vm_deallocate(mach_task_self(), vmaddr, oldsize); addr = (union header *) newaddr; addr->size = allocsize; return (void *) (addr + 1); } /* Simple case: growing from less than a page to one or more whole pages. */ } else { /* Growing from a within-page size to a larger within-page size. Frequently the item being grown is the last one allocated so try to avoid copies in that case. */ if (vmaddr + oldsize == kalloc_next_space) { if (vmaddr + allocsize <= kalloc_end_of_space) { kalloc_next_space = vmaddr + allocsize; addr->size = allocsize; return data; } else { newaddr = round_page(vmaddr); if (vm_allocate(mach_task_self(), &newaddr, vm_page_size, FALSE) == KERN_SUCCESS) { kalloc_next_space = vmaddr + allocsize; kalloc_end_of_space = newaddr + vm_page_size; addr->size = allocsize; return (void *) (addr + 1); } /* Simple case: growing the last object in the page past the end of the page when the next page is unavailable. */ } } /* Simple case: growing a within-page object that is not the last object allocated. */ } tocopy = oldsize - sizeof(union header); } /* So if we get here, we can't do any better than this: */ p = malloc(size); if (p != NULL) { memcpy(p, data, tocopy); free(data); } return p; }