int main(int argc, char *argv[]) { VMEM *vmp; START(argc, argv, "vmem_create_error"); if (argc > 1) UT_FATAL("usage: %s", argv[0]); errno = 0; vmp = vmem_create_in_region(mem_pool, 0); UT_ASSERTeq(vmp, NULL); UT_ASSERTeq(errno, EINVAL); errno = 0; vmp = vmem_create("./", 0); UT_ASSERTeq(vmp, NULL); UT_ASSERTeq(errno, EINVAL); errno = 0; vmp = vmem_create("invalid dir !@#$%^&*()=", VMEM_MIN_POOL); UT_ASSERTeq(vmp, NULL); UT_ASSERTne(errno, 0); DONE(NULL); }
EMBX_VOID *EMBX_OS_PhysMemMap(EMBX_UINT pMem, int size, int cached) { EMBX_VOID *vaddr = NULL; unsigned mode; EMBX_Info(EMBX_INFO_OS, (">>>>PhysMemMap(0x%08x, %d)\n", (unsigned int) pMem, size)); mode = VMEM_CREATE_READ|VMEM_CREATE_WRITE; if (cached) mode |= VMEM_CREATE_CACHED; else mode |= VMEM_CREATE_UNCACHED | VMEM_CREATE_NO_WRITE_BUFFER; vaddr = vmem_create((EMBX_VOID *)pMem, size, NULL, mode); if (NULL == vaddr) { EMBX_DebugMessage(("PhysMemMap: pMem %p size %d cached %d failed\n", pMem, size, cached)); } EMBX_Info(EMBX_INFO_OS, ("PhysMemMap: *vMem = %p\n", vaddr)); EMBX_Info(EMBX_INFO_OS, ("<<<<PhysMemMap\n")); return vaddr; }
static void namenodeno_init(void) { nm_inoarena = vmem_create("namefs_inodes", (void *)1, NM_INOQUANT, 1, NULL, NULL, NULL, 1, VM_SLEEP); mutex_init(&nm_inolock, NULL, MUTEX_DEFAULT, NULL); }
/* * uvm_emap_init: initialize subsystem. */ void uvm_emap_sysinit(void) { struct uvm_cpu *ucpu; size_t qmax; u_int i; uvm_emap_size = roundup(uvm_emap_size, PAGE_SIZE); qmax = 16 * PAGE_SIZE; #if 0 uvm_emap_va = uvm_km_alloc(kernel_map, uvm_emap_size, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); if (uvm_emap_va == 0) { panic("uvm_emap_init: KVA allocation failed"); } uvm_emap_vmem = vmem_create("emap", uvm_emap_va, uvm_emap_size, PAGE_SIZE, NULL, NULL, NULL, qmax, VM_SLEEP, IPL_NONE); if (uvm_emap_vmem == NULL) { panic("uvm_emap_init: vmem creation failed"); } #else uvm_emap_va = 0; uvm_emap_vmem = NULL; #endif /* Initial generation value is 1. */ uvm_emap_gen = 1; for (i = 0; i < maxcpus; i++) { ucpu = uvm.cpus[i]; if (ucpu != NULL) { ucpu->emap_gen = 1; } } }
int main(int argc, char *argv[]) { START(argc, argv, "vmem_create"); if (argc < 2 || argc > 3) FATAL("usage: %s directory", argv[0]); Vmp = vmem_create(argv[1], VMEM_MIN_POOL); if (Vmp == NULL) OUT("!vmem_create"); else { struct sigvec v = { 0 }; v.sv_handler = signal_handler; if (sigvec(SIGSEGV, &v, NULL) < 0) FATAL("!sigvec"); /* try to deref the opaque handle */ char x = *(char *)Vmp; OUT("x = %c", x); } FATAL("no signal received"); }
int main(int argc, char *argv[]) { VMEM *vmp; char *ptr; /* create minimum size pool of memory */ if ((vmp = vmem_create("/pmem-fs", VMEM_MIN_POOL)) == NULL) { perror("vmem_create"); exit(1); } if ((ptr = vmem_malloc(vmp, 100)) == NULL) { perror("vmem_malloc"); exit(1); } strcpy(ptr, "hello, world"); /* give the memory back */ vmem_free(vmp, ptr); /* ... */ }
int main(int argc, char *argv[]) { char *dir = NULL; void *mem_pool = NULL; VMEM *vmp; START(argc, argv, "vmem_check"); if (argc == 2) { dir = argv[1]; } else if (argc > 2) { FATAL("usage: %s [directory]", argv[0]); } if (dir == NULL) { /* allocate memory for function vmem_create_in_region() */ mem_pool = MMAP(NULL, VMEM_MIN_POOL*2, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL); if (vmp == NULL) FATAL("!vmem_create_in_region"); } else { vmp = vmem_create(dir, VMEM_MIN_POOL); if (vmp == NULL) FATAL("!vmem_create"); } ASSERTeq(1, vmem_check(vmp)); /* create pool in this same memory region */ if (dir == NULL) { unsigned long Pagesize = (unsigned long) sysconf(_SC_PAGESIZE); void *mem_pool2 = (void *)(((uintptr_t)mem_pool + VMEM_MIN_POOL/2) & ~(Pagesize-1)); VMEM *vmp2 = vmem_create_in_region(mem_pool2, VMEM_MIN_POOL); if (vmp2 == NULL) FATAL("!vmem_create_in_region"); /* detect memory range collision */ ASSERTne(1, vmem_check(vmp)); ASSERTne(1, vmem_check(vmp2)); vmem_delete(vmp2); ASSERTne(1, vmem_check(vmp2)); } vmem_delete(vmp); /* for vmem_create() memory unmapped after delete pool */ if (!dir) ASSERTne(1, vmem_check(vmp)); DONE(NULL); }
int main(int argc, char *argv[]) { START(argc, argv, "vmem_create"); if (argc < 2 || argc > 3) FATAL("usage: %s directory", argv[0]); Vmp = vmem_create(argv[1], VMEM_MIN_POOL); if (Vmp == NULL) OUT("!vmem_create"); else { struct sigaction v; sigemptyset(&v.sa_mask); v.sa_flags = 0; v.sa_handler = signal_handler; if (sigaction(SIGSEGV, &v, NULL) < 0) FATAL("!sigaction"); /* try to dereference the opaque handle */ char x = *(char *)Vmp; OUT("x = %c", x); } FATAL("no signal received"); }
int main(int argc, char *argv[]) { const int test_value = 123456; char *dir = NULL; int count = DEFAULT_COUNT; int n = DEFAULT_N; VMEM *vmp; int opt; int i, j; int use_calloc = 0; START(argc, argv, "vmem_pages_purging"); while ((opt = getopt(argc, argv, "z")) != -1) { switch (opt) { case 'z': use_calloc = 1; break; default: usage(argv[0]); } } if (optind < argc) { dir = argv[optind]; } else { usage(argv[0]); } vmp = vmem_create(dir, VMEM_MIN_POOL); if (vmp == NULL) FATAL("!vmem_create"); for (i = 0; i < n; i++) { int *test = NULL; if (use_calloc) test = vmem_calloc(vmp, 1, count * sizeof (int)); else test = vmem_malloc(vmp, count * sizeof (int)); ASSERTne(test, NULL); if (use_calloc) { /* vmem_calloc should return zeroed memory */ for (j = 0; j < count; j++) ASSERTeq(test[j], 0); } for (j = 0; j < count; j++) test[j] = test_value; for (j = 0; j < count; j++) ASSERTeq(test[j], test_value); vmem_free(vmp, test); } vmem_delete(vmp); DONE(NULL); }
int main(int argc, char *argv[]) { char *dir = NULL; void *mem_pool = NULL; VMEM *vmp; START(argc, argv, "vmem_check"); if (argc == 2) { dir = argv[1]; } else if (argc > 2) { FATAL("usage: %s [directory]", argv[0]); } if (dir == NULL) { /* allocate memory for function vmem_create_in_region() */ mem_pool = MMAP_ANON_ALIGNED(VMEM_MIN_POOL * 2, 4 << 20); vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL); if (vmp == NULL) FATAL("!vmem_create_in_region"); } else { vmp = vmem_create(dir, VMEM_MIN_POOL); if (vmp == NULL) FATAL("!vmem_create"); } ASSERTeq(1, vmem_check(vmp)); /* create pool in this same memory region */ if (dir == NULL) { void *mem_pool2 = (void *)(((uintptr_t)mem_pool + VMEM_MIN_POOL / 2) & ~(Ut_pagesize - 1)); VMEM *vmp2 = vmem_create_in_region(mem_pool2, VMEM_MIN_POOL); if (vmp2 == NULL) FATAL("!vmem_create_in_region"); /* detect memory range collision */ ASSERTne(1, vmem_check(vmp)); ASSERTne(1, vmem_check(vmp2)); vmem_delete(vmp2); ASSERTne(1, vmem_check(vmp2)); } vmem_delete(vmp); /* for vmem_create() memory unmapped after delete pool */ if (!dir) ASSERTne(1, vmem_check(vmp)); DONE(NULL); }
int main(int argc, char *argv[]) { const int test_value = 123456; char *dir = NULL; void *mem_pool = NULL; VMEM *vmp; START(argc, argv, "vmem_realloc"); if (argc == 2) { dir = argv[1]; } else if (argc > 2) { FATAL("usage: %s [directory]", argv[0]); } if (dir == NULL) { /* allocate memory for function vmem_create_in_region() */ mem_pool = MMAP(NULL, VMEM_MIN_POOL, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL); if (vmp == NULL) FATAL("!vmem_create_in_region"); } else { vmp = vmem_create(dir, VMEM_MIN_POOL); if (vmp == NULL) FATAL("!vmem_create"); } int *test = vmem_realloc(vmp, NULL, sizeof (int)); ASSERTne(test, NULL); test[0] = test_value; ASSERTeq(test[0], test_value); /* check that pointer came from mem_pool */ if (dir == NULL) { ASSERTrange(test, mem_pool, VMEM_MIN_POOL); } test = vmem_realloc(vmp, test, sizeof (int) * 10); ASSERTne(test, NULL); ASSERTeq(test[0], test_value); test[1] = test_value; test[9] = test_value; /* check that pointer came from mem_pool */ if (dir == NULL) { ASSERTrange(test, mem_pool, VMEM_MIN_POOL); } vmem_free(vmp, test); vmem_delete(vmp); DONE(NULL); }
int main(int argc, char *argv[]) { char *dir = NULL; void *mem_pool = NULL; VMEM *vmp; START(argc, argv, "vmem_out_of_memory"); if (argc == 2) { dir = argv[1]; } else if (argc > 2) { FATAL("usage: %s [directory]", argv[0]); } if (dir == NULL) { /* allocate memory for function vmem_create_in_region() */ mem_pool = MMAP_ANON_ALIGNED(VMEM_MIN_POOL, 4 << 20); vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL); if (vmp == NULL) FATAL("!vmem_create_in_region"); } else { vmp = vmem_create(dir, VMEM_MIN_POOL); if (vmp == NULL) FATAL("!vmem_create"); } /* allocate all memory */ void *prev = NULL; for (;;) { void **next = vmem_malloc(vmp, sizeof (void *)); if (next == NULL) { /* out of memory */ break; } /* check that pointer came from mem_pool */ if (dir == NULL) { ASSERTrange(next, mem_pool, VMEM_MIN_POOL); } *next = prev; prev = next; } ASSERTne(prev, NULL); /* free all allocations */ while (prev != NULL) { void **act = prev; prev = *act; vmem_free(vmp, act); } vmem_delete(vmp); DONE(NULL); }
int main(int argc, char *argv[]) { char *dir = NULL; void *mem_pool = NULL; VMEM *vmp; size_t obj_size; int *ptr[COUNT]; int i = 0; size_t sum_alloc = 0; START(argc, argv, "vmem_mix_allocations"); if (argc == 2) { dir = argv[1]; } else if (argc > 2) { UT_FATAL("usage: %s [directory]", argv[0]); } if (dir == NULL) { /* allocate memory for function vmem_create_in_region() */ mem_pool = MMAP_ANON_ALIGNED(POOL_SIZE, 4 << 20); vmp = vmem_create_in_region(mem_pool, POOL_SIZE); if (vmp == NULL) UT_FATAL("!vmem_create_in_region"); } else { vmp = vmem_create(dir, POOL_SIZE); if (vmp == NULL) UT_FATAL("!vmem_create"); } obj_size = MAX_SIZE; /* test with multiple size of allocations from 4MB to 2B */ for (i = 0; i < COUNT; ++i, obj_size /= 2) { ptr[i] = vmem_malloc(vmp, obj_size); if (ptr[i] == NULL) continue; sum_alloc += obj_size; /* check that pointer came from mem_pool */ if (dir == NULL) UT_ASSERTrange(ptr[i], mem_pool, POOL_SIZE); } /* allocate more than half of pool size */ UT_ASSERT(sum_alloc * 2 > POOL_SIZE); while (i > 0) vmem_free(vmp, ptr[--i]); vmem_delete(vmp); DONE(NULL); }
/* * Create an arena to represent the range [low, high). * Caller must be in a context in which VM_SLEEP is legal. */ id_space_t * id_space_create(const char *name, id_t low, id_t high) { ASSERT(low >= 0); ASSERT(low < high); return (vmem_create(name, (void *)(uintptr_t)(low + 1), high - low, 1, NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER)); }
int main(int argc, char *argv[]) { char *dir = NULL; void *mem_pool = NULL; VMEM *vmp; void *alloc; size_t usable_size; size_t size; unsigned i; START(argc, argv, "vmem_malloc_usable_size"); if (argc == 2) { dir = argv[1]; } else if (argc > 2) { FATAL("usage: %s [directory]", argv[0]); } if (dir == NULL) { /* allocate memory for function vmem_create_in_region() */ mem_pool = MMAP_ANON_ALIGNED(POOL_SIZE, 4 << 20); vmp = vmem_create_in_region(mem_pool, POOL_SIZE); if (vmp == NULL) FATAL("!vmem_create_in_region"); } else { vmp = vmem_create(dir, POOL_SIZE); if (vmp == NULL) FATAL("!vmem_create"); } ASSERTeq(vmem_malloc_usable_size(vmp, NULL), 0); for (i = 0; i < (sizeof (Check_sizes) / sizeof (Check_sizes[0])); ++i) { size = Check_sizes[i].size; alloc = vmem_malloc(vmp, size); ASSERTne(alloc, NULL); usable_size = vmem_malloc_usable_size(vmp, alloc); ASSERT(usable_size >= size); if (usable_size - size > Check_sizes[i].spacing) { FATAL("Size %zu: spacing %zu is bigger" "than expected: %zu", size, (usable_size - size), Check_sizes[i].spacing); } memset(alloc, 0xEE, usable_size); vmem_free(vmp, alloc); } ASSERTeq(vmem_check(vmp), 1); vmem_delete(vmp); DONE(NULL); }
int main(int argc, char *argv[]) { const char *text = "Some test text"; const char *text_empty = ""; char *dir = NULL; void *mem_pool = NULL; VMEM *vmp; START(argc, argv, "vmem_strdup"); if (argc == 2) { dir = argv[1]; } else if (argc > 2) { UT_FATAL("usage: %s [directory]", argv[0]); } if (dir == NULL) { /* allocate memory for function vmem_create_in_region() */ mem_pool = MMAP_ANON_ALIGNED(VMEM_MIN_POOL, 4 << 20); vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL); if (vmp == NULL) UT_FATAL("!vmem_create_in_region"); } else { vmp = vmem_create(dir, VMEM_MIN_POOL); if (vmp == NULL) UT_FATAL("!vmem_create"); } char *str1 = vmem_strdup(vmp, text); UT_ASSERTne(str1, NULL); UT_ASSERTeq(strcmp(text, str1), 0); /* check that pointer came from mem_pool */ if (dir == NULL) { UT_ASSERTrange(str1, mem_pool, VMEM_MIN_POOL); } char *str2 = vmem_strdup(vmp, text_empty); UT_ASSERTne(str2, NULL); UT_ASSERTeq(strcmp(text_empty, str2), 0); /* check that pointer came from mem_pool */ if (dir == NULL) { UT_ASSERTrange(str2, mem_pool, VMEM_MIN_POOL); } vmem_free(vmp, str1); vmem_free(vmp, str2); vmem_delete(vmp); DONE(NULL); }
void t4_init_ddp(struct adapter *sc, struct tom_data *td) { td->ppod_start = sc->vres.ddp.start; td->ppod_arena = vmem_create("DDP page pods", sc->vres.ddp.start, sc->vres.ddp.size, 1, 32, M_FIRSTFIT | M_NOWAIT); t4_register_cpl_handler(sc, CPL_RX_DATA_DDP, do_rx_data_ddp); t4_register_cpl_handler(sc, CPL_RX_DDP_COMPLETE, do_rx_ddp_complete); }
/* * Allocate the segment specific private data struct and fill it in * with the per kp segment mutex, anon ptr. array and hash table. */ int segkp_create(struct seg *seg) { struct segkp_segdata *kpsd; size_t np; ASSERT(seg != NULL && seg->s_as == &kas); ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock)); if (seg->s_size & PAGEOFFSET) { panic("Bad segkp size"); /*NOTREACHED*/ } kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP); /* * Allocate the virtual memory for segkp and initialize it */ if (segkp_fromheap) { np = btop(kvseg.s_size); segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP); kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE, vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP); } else { segkp_bitmap = NULL; np = btop(seg->s_size); kpsd->kpsd_arena = vmem_create("segkp", seg->s_base, seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE, VM_SLEEP); } kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE); kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *), KM_SLEEP); seg->s_data = (void *)kpsd; seg->s_ops = &segkp_ops; segkpinit_mem_config(seg); return (0); }
static void* MapMemory(ULONG ulBaseAddr, ULONG ulSize) { void *map = vmem_create((void*)ulBaseAddr,ulSize, NULL, (VMEM_CREATE_UNCACHED|VMEM_CREATE_READ|VMEM_CREATE_WRITE)); if(map != NULL) return map; /* * Mapping the 7200 register base fails, but that is ok because the address * is directly usable, so just return it back. */ return (void*)ulBaseAddr; }
void percpu_init(void) { ASSERT_SLEEPABLE(); rw_init(&percpu_swap_lock); mutex_init(&percpu_allocation_lock, MUTEX_DEFAULT, IPL_NONE); percpu_offset_arena = vmem_create("percpu", 0, 0, PERCPU_QUANTUM_SIZE, percpu_backend_alloc, NULL, NULL, PERCPU_QCACHE_MAX, VM_SLEEP, IPL_NONE); }
void * promplat_alloc(size_t size) { mutex_enter(&promplat_lock); if (promplat_arena == NULL) { promplat_arena = vmem_create("promplat", NULL, 0, 8, segkmem_alloc, segkmem_free, heap32_arena, 0, VM_SLEEP); } mutex_exit(&promplat_lock); return (vmem_alloc(promplat_arena, size, VM_NOSLEEP)); }
int main(int argc, char *argv[]) { const int test_value = 123456; char *dir = NULL; void *mem_pool = NULL; VMEM *vmp; START(argc, argv, "vmem_calloc"); if (argc == 2) { dir = argv[1]; } else if (argc > 2) { UT_FATAL("usage: %s [directory]", argv[0]); } if (dir == NULL) { /* allocate memory for function vmem_create_in_region() */ mem_pool = MMAP_ANON_ALIGNED(VMEM_MIN_POOL, 4 << 20); vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL); if (vmp == NULL) UT_FATAL("!vmem_create_in_region"); } else { vmp = vmem_create(dir, VMEM_MIN_POOL); if (vmp == NULL) UT_FATAL("!vmem_create"); } int *test = vmem_calloc(vmp, 1, sizeof(int)); UT_ASSERTne(test, NULL); /* pool_calloc should return zeroed memory */ UT_ASSERTeq(*test, 0); *test = test_value; UT_ASSERTeq(*test, test_value); /* check that pointer came from mem_pool */ if (dir == NULL) { UT_ASSERTrange(test, mem_pool, VMEM_MIN_POOL); } vmem_free(vmp, test); vmem_delete(vmp); DONE(NULL); }
int main(int argc, char *argv[]) { char *dir = NULL; void *mem_pool = NULL; VMEM *vmp; START(argc, argv, "vmem_realloc_inplace"); if (argc == 2) { dir = argv[1]; } else if (argc > 2) { FATAL("usage: %s [directory]", argv[0]); } if (dir == NULL) { /* allocate memory for function vmem_create_in_region() */ mem_pool = MMAP_ANON_ALIGNED(POOL_SIZE, 4 << 20); vmp = vmem_create_in_region(mem_pool, POOL_SIZE); if (vmp == NULL) FATAL("!vmem_create_in_region"); } else { vmp = vmem_create(dir, POOL_SIZE); if (vmp == NULL) FATAL("!vmem_create"); } int *test = vmem_malloc(vmp, 12 * 1024 * 1024); ASSERTne(test, NULL); test = vmem_realloc(vmp, test, 6 * 1024 * 1024); ASSERTne(test, NULL); test = vmem_realloc(vmp, test, 12 * 1024 * 1024); ASSERTne(test, NULL); vmem_free(vmp, test); vmem_delete(vmp); DONE(NULL); }
/* * pool_test -- test pool * * This function creates a memory pool in a file (if dir is not NULL), * or in RAM (if dir is NULL) and allocates memory for the test. */ void pool_test(const char *dir) { VMEM *vmp = NULL; if (dir != NULL) { vmp = vmem_create(dir, VMEM_MIN_POOL); } else { /* allocate memory for function vmem_create_in_region() */ void *mem_pool = MMAP(NULL, VMEM_MIN_POOL, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL); } if (expect_create_pool == 0) { ASSERTeq(vmp, NULL); DONE(NULL); } else { if (vmp == NULL) { if (dir == NULL) { FATAL("!vmem_create_in_region"); } else { FATAL("!vmem_create"); } } } char *test = vmem_malloc(vmp, strlen(TEST_STRING_VALUE) + 1); ASSERTne(test, NULL); strcpy(test, TEST_STRING_VALUE); ASSERTeq(strcmp(test, TEST_STRING_VALUE), 0); ASSERT(vmem_malloc_usable_size(vmp, test) > 0); vmem_free(vmp, test); vmem_delete(vmp); }
static void * thread_func(void *arg) { int start_idx = *(int *)arg; for (int repeat = 0; repeat < TEST_REPEAT_CREATE_POOLS; ++repeat) { for (int idx = 0; idx < npools; ++idx) { int pool_id = start_idx + idx; /* delete old pool with the same id if exist */ if (pools[pool_id] != NULL) { vmem_delete(pools[pool_id]); pools[pool_id] = NULL; } if (pool_id % 2 == 0) { /* for even pool_id, create in region */ pools[pool_id] = vmem_create_in_region( mem_pools[pool_id / 2], VMEM_MIN_POOL); if (pools[pool_id] == NULL) UT_FATAL("!vmem_create_in_region"); } else { /* for odd pool_id, create in file */ pools[pool_id] = vmem_create(dir, VMEM_MIN_POOL); if (pools[pool_id] == NULL) UT_FATAL("!vmem_create"); } void *test = vmem_malloc(pools[pool_id], sizeof(void *)); UT_ASSERTne(test, NULL); vmem_free(pools[pool_id], test); } } return NULL; }
/* * pool_test -- test pool * * This function creates a memory pool in a file (if dir is not NULL), * or in RAM (if dir is NULL) and allocates memory for the test. */ static void pool_test(const char *dir) { VMEM *vmp = NULL; if (dir != NULL) { vmp = vmem_create(dir, VMEM_MIN_POOL); } else { /* allocate memory for function vmem_create_in_region() */ void *mem_pool = MMAP_ANON_ALIGNED(VMEM_MIN_POOL, 4 << 20); vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL); } if (vmp == NULL) { if (dir == NULL) { FATAL("!vmem_create_in_region"); } else { FATAL("!vmem_create"); } } char *test = vmem_malloc(vmp, strlen(TEST_STRING_VALUE) + 1); if (expect_malloc == 0) { ASSERTeq(test, NULL); } else { strcpy(test, TEST_STRING_VALUE); ASSERTeq(strcmp(test, TEST_STRING_VALUE), 0); ASSERT(vmem_malloc_usable_size(vmp, test) > 0); vmem_free(vmp, test); } vmem_delete(vmp); }
EMBX_VOID *EMBX_OS_PhysMemMap(EMBX_UINT pMem, int size, int cached) { EMBX_VOID *vaddr = NULL; EMBX_Info(EMBX_INFO_OS, (">>>>PhysMemMap(0x%08x, %d)\n", (unsigned int) pMem, size)); /* Test the weak symbol for being non NULL, if true we are linked against a vmem capable OS21 */ if (vmem_create) { unsigned mode; mode = VMEM_CREATE_READ|VMEM_CREATE_WRITE; if (cached) mode |= VMEM_CREATE_CACHED; else mode |= VMEM_CREATE_UNCACHED | VMEM_CREATE_NO_WRITE_BUFFER; vaddr = vmem_create((EMBX_VOID *)pMem, size, NULL, mode); } else { #if defined __ST231__ /* This assumes that pMem is a true physical address */ vaddr = mmap_translate_virtual((EMBX_VOID *)pMem); vaddr = (cached ? mmap_translate_cached(vaddr) : mmap_translate_uncached(vaddr)); if (!vaddr) { /* Failed to find a current translation, so create our own */ EMBX_UINT page_size = 0x10000000; /* Map 256MB pages unconditionally */ EMBX_UINT pMem_base = pMem & ~(page_size-1); EMBX_UINT pMem_size = (size + (page_size-1)) & ~(page_size-1); vaddr = mmap_create((void *)pMem_base, pMem_size, mmap_protect_rwx, (cached ? mmap_cached : mmap_uncached), page_size); /* Adjust the returned vaddr accordingly */ if (vaddr) vaddr = (void *) ((EMBX_UINT) vaddr + (pMem - pMem_base)); } #elif defined __sh__ if (cached) vaddr = ST40_P1_ADDR(pMem); else vaddr = ST40_P2_ADDR(pMem); #endif /* defined __ST231__ */ if (NULL == vaddr) { EMBX_DebugMessage(("PhysMemMap: pMem %p size %d cached %d failed\n", pMem, size, cached)); } } EMBX_Info(EMBX_INFO_OS, ("PhysMemMap: *vMem = %p\n", vaddr)); EMBX_Info(EMBX_INFO_OS, ("<<<<PhysMemMap\n")); return vaddr; }
static void dtrace_load(void *dummy) { dtrace_provider_id_t id; CPU_INFO_ITERATOR cpuind; struct cpu_info *cinfo; dtrace_debug_init(NULL); dtrace_gethrtime_init(NULL); /* Hook into the trap handler. */ dtrace_trap_func = dtrace_trap; /* Hang our hook for thread switches. */ dtrace_vtime_switch_func = dtrace_vtime_switch; /* Hang our hook for exceptions. */ dtrace_invop_init(); /* * XXX This is a short term hack to avoid having to comment * out lots and lots of lock/unlock calls. */ mutex_init(&mod_lock,"XXX mod_lock hack", MUTEX_DEFAULT, NULL); /* * Initialise the mutexes without 'witness' because the dtrace * code is mostly written to wait for memory. To have the * witness code change a malloc() from M_WAITOK to M_NOWAIT * because a lock is held would surely create a panic in a * low memory situation. And that low memory situation might be * the very problem we are trying to trace. */ mutex_init(&dtrace_lock,"dtrace probe state", MUTEX_DEFAULT, NULL); mutex_init(&dtrace_provider_lock,"dtrace provider state", MUTEX_DEFAULT, NULL); mutex_init(&dtrace_meta_lock,"dtrace meta-provider state", MUTEX_DEFAULT, NULL); mutex_init(&dtrace_errlock,"dtrace error lock", MUTEX_DEFAULT, NULL); mutex_enter(&dtrace_provider_lock); mutex_enter(&dtrace_lock); mutex_enter(&cpu_lock); ASSERT(MUTEX_HELD(&cpu_lock)); dtrace_arena = vmem_create("dtrace", 1, INT_MAX, 1, NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE); dtrace_state_cache = kmem_cache_create(__UNCONST("dtrace_state_cache"), sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, NULL, NULL, NULL, NULL, NULL, 0); ASSERT(MUTEX_HELD(&cpu_lock)); dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), offsetof(dtrace_probe_t, dtpr_nextmod), offsetof(dtrace_probe_t, dtpr_prevmod)); dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), offsetof(dtrace_probe_t, dtpr_nextfunc), offsetof(dtrace_probe_t, dtpr_prevfunc)); dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), offsetof(dtrace_probe_t, dtpr_nextname), offsetof(dtrace_probe_t, dtpr_prevname)); if (dtrace_retain_max < 1) { cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " "setting to 1", dtrace_retain_max); dtrace_retain_max = 1; } /* * Now discover our toxic ranges. */ dtrace_toxic_ranges(dtrace_toxrange_add); /* * Before we register ourselves as a provider to our own framework, * we would like to assert that dtrace_provider is NULL -- but that's * not true if we were loaded as a dependency of a DTrace provider. * Once we've registered, we can assert that dtrace_provider is our * pseudo provider. */ (void) dtrace_register("dtrace", &dtrace_provider_attr, DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); ASSERT(dtrace_provider != NULL); ASSERT((dtrace_provider_id_t)dtrace_provider == id); dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "END", 0, NULL); dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) dtrace_provider, NULL, NULL, "ERROR", 1, NULL); mutex_exit(&cpu_lock); /* * If DTrace helper tracing is enabled, we need to allocate the * trace buffer and initialize the values. */ if (dtrace_helptrace_enabled) { ASSERT(dtrace_helptrace_buffer == NULL); dtrace_helptrace_buffer = kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); dtrace_helptrace_next = 0; dtrace_helptrace_size = dtrace_helptrace_bufsize; } mutex_exit(&dtrace_lock); mutex_exit(&dtrace_provider_lock); mutex_enter(&cpu_lock); /* Setup the CPUs */ for (CPU_INFO_FOREACH(cpuind, cinfo)) { (void) dtrace_cpu_setup(CPU_CONFIG, cpu_index(cinfo)); } mutex_exit(&cpu_lock); dtrace_anon_init(NULL); #if 0 dtrace_dev = make_dev(&dtrace_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "dtrace/dtrace"); #endif return; }
int main(int argc, char *argv[]) { const int test_value = 123456; char *dir = NULL; VMEM *vmp; size_t alignment; unsigned i; int *ptr; int *ptrs[MAX_ALLOCS]; START(argc, argv, "vmem_aligned_alloc"); if (argc == 2) { dir = argv[1]; } else if (argc > 2) { FATAL("usage: %s [directory]", argv[0]); } /* allocate memory for function vmem_create_in_region() */ void *mem_pool = MMAP_ANON_ALIGNED(VMEM_MIN_POOL, 4 << 20); /* use custom alloc functions to check for memory leaks */ vmem_set_funcs(malloc_custom, free_custom, realloc_custom, strdup_custom, NULL); /* test with address alignment from 2B to 4MB */ for (alignment = 2; alignment <= 4 * 1024 * 1024; alignment *= 2) { custom_alloc_calls = 0; if (dir == NULL) { vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL); if (vmp == NULL) FATAL("!vmem_create_in_region"); } else { vmp = vmem_create(dir, VMEM_MIN_POOL); if (vmp == NULL) FATAL("!vmem_create"); } memset(ptrs, 0, MAX_ALLOCS * sizeof (ptrs[0])); for (i = 0; i < MAX_ALLOCS; ++i) { ptr = vmem_aligned_alloc(vmp, alignment, sizeof (int)); ptrs[i] = ptr; /* at least one allocation must succeed */ ASSERT(i != 0 || ptr != NULL); if (ptr == NULL) break; /* ptr should be usable */ *ptr = test_value; ASSERTeq(*ptr, test_value); /* check for correct address alignment */ ASSERTeq((uintptr_t)(ptr) & (alignment - 1), 0); /* check that pointer came from mem_pool */ if (dir == NULL) { ASSERTrange(ptr, mem_pool, VMEM_MIN_POOL); } } for (i = 0; i < MAX_ALLOCS; ++i) { if (ptrs[i] == NULL) break; vmem_free(vmp, ptrs[i]); } vmem_delete(vmp); /* check memory leaks */ ASSERTne(custom_alloc_calls, 0); ASSERTeq(custom_allocs, 0); } DONE(NULL); }
int main(int argc, char *argv[]) { char *dir = NULL; VMEM *vmp; int *ptr; int test_case = -1; int expect_custom_alloc = 0; START(argc, argv, "vmem_valgrind"); if (argc >= 2 && argc <= 3) { test_case = atoi(argv[1]); if (test_case > 9) test_case = -1; if (argc > 2) dir = argv[2]; } if (test_case < 0) FATAL("usage: %s <test-number from 0 to 9> [directory]", argv[0]); if (test_case < 5) { OUT("use default allocator"); expect_custom_alloc = 0; } else { OUT("use custom alloc functions"); test_case -= 5; expect_custom_alloc = 1; vmem_set_funcs(malloc_custom, free_custom, realloc_custom, strdup_custom, NULL); } if (dir == NULL) { /* allocate memory for function vmem_create_in_region() */ void *mem_pool = MMAP_ANON_ALIGNED(VMEM_MIN_POOL, 4 << 20); vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL); if (vmp == NULL) FATAL("!vmem_create_in_region"); } else { vmp = vmem_create(dir, VMEM_MIN_POOL); if (vmp == NULL) FATAL("!vmem_create"); } switch (test_case) { case 0: { OUT("remove all allocations and delete pool"); ptr = vmem_malloc(vmp, sizeof (int)); if (ptr == NULL) FATAL("!vmem_malloc"); vmem_free(vmp, ptr); vmem_delete(vmp); break; } case 1: { OUT("only remove allocations"); ptr = vmem_malloc(vmp, sizeof (int)); if (ptr == NULL) FATAL("!vmem_malloc"); vmem_free(vmp, ptr); break; } case 2: { OUT("only delete pool"); ptr = vmem_malloc(vmp, sizeof (int)); if (ptr == NULL) FATAL("!vmem_malloc"); vmem_delete(vmp); /* prevent reporting leaked memory as still reachable */ ptr = NULL; break; } case 3: { OUT("memory leaks"); ptr = vmem_malloc(vmp, sizeof (int)); if (ptr == NULL) FATAL("!vmem_malloc"); /* prevent reporting leaked memory as still reachable */ ptr = NULL; break; } case 4: { OUT("heap block overrun"); ptr = vmem_malloc(vmp, 12 * sizeof (int)); if (ptr == NULL) FATAL("!vmem_malloc"); /* heap block overrun */ ptr[12] = 7; vmem_free(vmp, ptr); vmem_delete(vmp); break; } default: { FATAL("!unknown test-number"); } } /* check memory leak in custom allocator */ ASSERTeq(custom_allocs, 0); if (expect_custom_alloc == 0) { ASSERTeq(custom_alloc_calls, 0); } else { ASSERTne(custom_alloc_calls, 0); } DONE(NULL); }