bool_t check_inx_cursor_mthread(check_inx_opt_t *opts) { bool_t result = true; void *outcome = NULL; pthread_t *threads = NULL; if (!INX_CHECK_MTHREADS) { return true; } else if (!(threads = mm_alloc(sizeof(pthread_t) * INX_CHECK_MTHREADS))) { return false; } for (uint64_t counter = 0; counter < INX_CHECK_MTHREADS; counter++) { if (thread_launch(threads + counter, &check_inx_cursor_mthread_cnv, opts)) { result = false; } } for (uint64_t counter = 0; counter < INX_CHECK_MTHREADS; counter++) { if (thread_result(*(threads + counter), &outcome) || !outcome || !*(bool_t *)outcome) { result = false; } if (outcome) { mm_free(outcome); } } mm_free(threads); return result; }
struct thread *thread_create(unsigned int flags, void *(*run)(void *), void *arg) { struct thread *t; int priority; /* check mutually exclusive flags */ if ((flags & THREAD_FLAG_PRIORITY_LOWER) && (flags & THREAD_FLAG_PRIORITY_HIGHER)) { return err_ptr(EINVAL); } if((flags & THREAD_FLAG_NOTASK) && !(flags & THREAD_FLAG_SUSPENDED)) { return err_ptr(EINVAL); } /* check correct executive function */ if (!run) { return err_ptr(EINVAL); } /* calculate current thread priority. It can be change later with * thread_set_priority () function */ priority = thread_priority_by_flags(flags); /* below we will work with thread's instances and therefore we need to * lock scheduler (disable scheduling) to our structures is not be * corrupted */ sched_lock(); { /* allocate memory */ if (!(t = thread_alloc())) { t = err_ptr(ENOMEM); goto out_unlock; } /* initialize internal thread structure */ thread_init(t, priority, run, arg); /* link with task if needed */ if (!(flags & THREAD_FLAG_NOTASK)) { task_thread_register(task_self(), t); } thread_cancel_init(t); if (!(flags & THREAD_FLAG_SUSPENDED)) { thread_launch(t); } if (flags & THREAD_FLAG_DETACHED) { thread_detach(t); } } out_unlock: sched_unlock(); return t; }
stringer_t * check_rand_mthread(void) { void *outcome = NULL; stringer_t *result = NULL; pthread_t *threads = NULL; if (!RAND_CHECK_MTHREADS) { return NULL; } else if (!(threads = mm_alloc(sizeof(pthread_t) * RAND_CHECK_MTHREADS))) { return st_dupe(NULLER("Thread allocation error."));; } for (uint64_t counter = 0; counter < RAND_CHECK_MTHREADS; counter++) { if (thread_launch(threads + counter, &check_rand_mthread_wrap, NULL)) { result = false; } } for (uint64_t counter = 0; counter < RAND_CHECK_MTHREADS; counter++) { if (thread_result(*(threads + counter), &outcome)) { st_cleanup(result); result = st_dupe(NULLER("Thread join error.")); } else if (outcome) { st_cleanup(result); result = outcome; } } mm_free(threads); return result; }
static void fs_test_flock(void) { int fd; int l = 200, h = 210; /* Prepare file and threads for test */ test_assert(-1 != (fd = open(FS_FLOCK, O_CREAT, S_IRUSR | S_IWUSR))); fftt = thread_create(THREAD_FLAG_SUSPENDED, first_flock_test_thread, (void *) &fd); test_assert_zero(err(fftt)); sftt = thread_create(THREAD_FLAG_SUSPENDED, second_flock_test_thread, (void *) &fd); test_assert_zero(err(sftt)); test_assert_zero(schedee_priority_set(&fftt->schedee, l)); test_assert_zero(schedee_priority_set(&sftt->schedee, l)); test_assert_zero(thread_launch(fftt)); test_assert_zero(thread_join(fftt, NULL)); test_assert_zero(thread_join(sftt, NULL)); test_assert_emitted("abcdefg"); /* Test cleanup */ test_assert_zero(remove(FS_FLOCK)); }
int main() { thread_manager_init(); thread_startup_report(); /***********/ thread_launch( 4000, test_thread, 5 ); thread_manager_start(); /* control never reaches this point */ }
static void *mid_run(void *arg) { test_emit('c'); semaphore_enter(&s); test_emit('d'); test_assert_zero(thread_launch(high)); test_emit('f'); semaphore_leave(&s); test_emit('i'); return NULL; }
static void *low_run(void *arg) { test_emit('a'); semaphore_enter(&s); test_emit('b'); test_assert_zero(thread_launch(mid)); test_emit('j'); semaphore_leave(&s); test_emit('k'); return NULL; }
void test_thread( int n ) { printf( "A%d\n", n ); if (n > 1) thread_launch( 4000, test_thread, n-1 ); thread_relinquish(); printf( "B%d\n", n ); thread_relinquish(); printf( "C%d\n", n ); thread_relinquish(); }
static void *first_flock_test_thread(void *arg) { int fd = *((int *) arg); test_emit('a'); test_assert_zero(flock(fd, LOCK_EX)); test_emit('b'); test_assert_zero(thread_launch(sftt)); test_emit('d'); test_assert_zero(flock(fd, LOCK_UN)); test_emit('g'); return NULL; }
bool_t check_inx_append_mthread(MAGMA_INDEX inx_type, stringer_t *errmsg) { void *result; inx_t *inx = NULL; bool_t outcome = true; pthread_t *threads = NULL; if (status() && (!(inx = inx_alloc(inx_type | M_INX_LOCK_MANUAL, &ns_free)))) { st_sprint(errmsg, "An error occured during initial allocation in the inx check append multi-threaded test."); outcome = false; } else { if (!INX_CHECK_MTHREADS || !(threads = mm_alloc(sizeof(pthread_t) * INX_CHECK_MTHREADS))) { outcome = false; } else { for (uint64_t counter = 0; counter < INX_CHECK_MTHREADS; counter++) { if (thread_launch(threads + counter, &check_inx_append_mthread_test, inx)) { st_sprint(errmsg, "An error occured when launching a thread."); outcome = false; } } for (uint64_t counter = 0; counter < INX_CHECK_MTHREADS; counter++) { if (thread_result(*(threads + counter), &result) || !result || !*(bool_t *)result) { if (st_empty(errmsg)) st_sprint(errmsg, "One of the append check threads returned false."); outcome = false; } mm_cleanup(result); } mm_free(threads); } if (inx_count(inx) != 0 && st_empty(errmsg)) { st_sprint(errmsg, "The index was not properly cleared."); outcome = false; } } if (inx) { inx_cleanup(inx); } return outcome; }
int idle_thread_create(void) { struct thread *t; t = thread_create(THREAD_FLAG_NOTASK | THREAD_FLAG_SUSPENDED, idle_run, NULL); if (err(t)) { log_error(" Couldn't create thread err=%d", err(t)); return err(t); } task_thread_register(task_kernel_task(), t); schedee_priority_set(&t->schedee, SCHED_PRIORITY_MIN); log_debug("idle_schedee = %#x", &t->schedee); cpu_init(cpu_get_id(), t); thread_launch(t); return 0; }
bool_t check_tokyo_tank_mthread(check_tank_opt_t *opts) { bool_t result = true; void *outcome = NULL; pthread_t *threads = NULL; uint64_t local_objects = tank_count(); if (!TANK_CHECK_DATA_MTHREADS) { return true; } else if (!(threads = mm_alloc(sizeof(pthread_t) * TANK_CHECK_DATA_MTHREADS))) { return false; } for (uint64_t counter = 0; counter < TANK_CHECK_DATA_MTHREADS; counter++) { if (thread_launch(threads + counter, &check_tokyo_tank_mthread_cnv, opts)) { result = false; } } for (uint64_t counter = 0; counter < TANK_CHECK_DATA_MTHREADS; counter++) { if (thread_result(*(threads + counter), &outcome) || !outcome || !*(bool_t *)outcome) { result = false; } if (outcome) { mm_free(outcome); } } mm_free(threads); if (TANK_CHECK_DATA_CLEANUP && tank_count() != local_objects) { log_info("The number of objects doesn't match what we started with. {start = %lu / finish = %lu}", local_objects, tank_count()); return false; } return result; }