static void test_multitasks_start() { task_t taskA, taskB; kprintf("create taskA with pri=200\n"); taskA = task_create("ttaskA", task_multitasks_taskA, NULL, NULL, 0x1000, 200, 0, 0); task_resume_noschedule(taskA); kprintf("create taskB with pri=210\n"); taskB = task_create("ttaskB", task_multitasks_taskB, NULL, NULL, 0x1000, 210, 0, 0); task_resume_noschedule(taskB); task_set_schedule_hook(sched_hook); kprintf("cmd task Delay 500 ticks\n"); task_delay(500); kprintf("destroy taskA\n"); task_destroy(taskA); kprintf("destroy taskB\n"); task_destroy(taskB); task_set_schedule_hook(NULL); kprintf("testcase multitasks end\n"); }
static void test_tasklock_start() { task_t taskA, taskB; taskA = task_create("ttaskA", task_lock_taskA, NULL, NULL, 0x1000, 210, 0, 0); task_resume_noschedule(taskA); taskB = task_create("ttaskB", task_lock_taskB, NULL, NULL, 0x1000, 90, 0, 0); task_resume_noschedule(taskB); task_delay(500); task_destroy(taskA); task_destroy(taskB); }
static void handle_disconnect_stage2(int sock, short event_flags, void *arg) { struct async_network_task_s *task = arg; GHashTableIter iter; gpointer key, val; pthread_mutex_lock(&lock); g_hash_table_iter_init(&iter, tasks_ht); while (g_hash_table_iter_next(&iter, &key, &val)) { struct async_network_task_s *cur = key; if (cur == task) // skip current task continue; if (cur->resource == task->resource) { g_hash_table_iter_remove(&iter); event_free(cur->event); ppb_message_loop_post_work_with_result(cur->callback_ml, cur->callback, 0, PP_ERROR_ABORTED, 0, __func__); g_slice_free(struct async_network_task_s, cur); } } pthread_mutex_unlock(&lock); close(task->sock); task_destroy(task); }
int main() { Queue* q = queue_new("example", "localhost", 6379, 0, NULL); queue_connect(q); printf("Current Queue Length: %lld\n", queue_length(q)); /* char *data = calloc(20, sizeof(char)); strncpy(data, "\"ac\"", 7); Task *t = task_new(data, "0"); Job *j = queue_enqueue(q, t); printf("Got job: %s\n", j->urn); task_destroy(t); job_destroy(j); */ Task* t = queue_wait(q, 0); printf("Received: %s URN: %s \n", t->data, t->urn); task_destroy(t); queue_destroy(q); return 0; }
void acm_stop() { free(acm_send_buffer); free(acm_recv_buffer); task_destroy(&acm_parse_task); }
static void *task_consum(void *pl) { thread_pool_t p=NULL; task_t t=NULL; routine_t r=NULL; arg_t a=NULL; assert(pl!=NULL); p=*(thread_pool_t *) pl; assert(p!=NULL); assert(p->tasks!=NULL); _Y while(!p->pool_finish) { p->tasks=task_queue_pop(p->tasks,&t); _Y assert(t!=NULL); _Y /* take the routine and the argument and execute it */ r=task_routine(t); _Y a=task_arg(t); _Y t=task_destroy(t); _Y t=NULL; assert(r!=NULL); r(a); /* execute the routine */ } pthread_mutex_lock(&p->mutex); p->finalizados++; pthread_mutex_unlock(&p->mutex); pthread_exit(NULL); }
static void test_multitake_start() { task_t taskA, taskB; mtx_initialize(&mtx_multitake); taskA = task_create("ttaskA", task_multitake_taskA, NULL, NULL, 0x1000, 150, 0, 0); task_resume_noschedule(taskA); taskB = task_create("ttaskB", task_multitake_taskB, NULL, NULL, 0x1000, 150, 0, 0); task_resume_noschedule(taskB); task_delay(500); task_destroy(taskA); task_destroy(taskB); mtx_destroy(&mtx_multitake); }
/* Intended to be called from interrupt context only */ void task_trigger_exception (struct task *task, int exception, busword_t textaddr, busword_t data, int code) { DECLARE_CRITICAL_SECTION (except); ASSERT (get_current_context () == KERNEL_CONTEXT_INTERRUPT); ASSERT (get_current_task () == task); if (exception < 0 || exception >= EX_MAX) FAIL ("exception code unrecognized\n"); if (task->ts_ex_handlers[exception] == NULL) { /* Process shall be killed */ TASK_ATOMIC_ENTER (except); (void) wake_up (task, TASK_STATE_EXITED, 0); task_destroy (task); schedule (); TASK_ATOMIC_LEAVE (except); } else (task->ts_ex_handlers[exception]) (task, exception, textaddr, data, code); }
void *_thread_start_local (void* arg) { //writeDebug("In _thread_start *************"); struct Msg* msg = (struct Msg*) arg; //fprintf(stderr, "Message place is: %d\n", msg->placeTo); _thread_sethere(msg->placeTo); // dump_memory(1, msg->tb.addr, msg->tb.sz); task_create(); method_t method = *((method_t*)(msg->tb.addr+sizeof(_task*))); void* params = (void*)(msg->tb.addr+sizeof(_task*)+sizeof(method_t)); //writeDebug("passed this point32_t ************"); //if(method == 31) fprintf(stderr, "CREATE ASYNC PASSGED BEFORE: %u\n", method); _thread_run_local(method, params); //uint64_t value = (((_task*) _thread_getspecific()) -> total_memory); //if(value != 0){ //fprintf(stderr, "TOTAL MEMORY USED =%lu=\n", value); //} //if(method == 31) fprintf(stderr, "CREATE ASYNC PASSGED AFTER: %u\n", method); task_destroy(); volatile _task* parent = *((_task**)msg->tb.addr); //fprintf(stderr, "THIS IS TASK START %x\n", parent); volatile place_t parent_place = msg->placeFrom; task_deregister_send((void*)parent, parent_place); //writeDebug("passed this point32_t ================"); free(msg->tb.addr); _deallocate_msg_only(msg); if(!USE_THREAD_POOL){ _thread_exit(NULL); } }
static void handle_tcp_connect_stage1(struct async_network_task_s *task) { struct evdns_request *req; struct sockaddr_in sai; memset(&sai, 0, sizeof(sai)); if (inet_pton(AF_INET, task->host, &sai.sin_addr) == 1) { // already a valid IP address handle_tcp_connect_stage2(DNS_ERR_NONE, DNS_IPv4_A, 1, 300, &sai.sin_addr, task); return; } // queue DNS request req = evdns_base_resolve_ipv4(evdns_b, task->host, DNS_QUERY_NO_SEARCH, handle_tcp_connect_stage2, task); // TODO: what about ipv6? if (!req) { trace_warning("%s, early dns resolution failure (%s:%u)\n", __func__, task->host, (unsigned int)task->port); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, PP_ERROR_NAME_NOT_RESOLVED, 0, __func__); task_destroy(task); return; } }
static void handle_tcp_connect_stage4(int sock, short event_flags, void *arg) { struct async_network_task_s *task = arg; struct pp_tcp_socket_s *ts = pp_resource_acquire(task->resource, PP_RESOURCE_TCP_SOCKET); if (!ts) { trace_warning("%s, tcp socket resource was closed during request (%s:%u)\n", __func__, task->host, (unsigned int)task->port); free(task->addr); task_destroy(task); return; } char buf[200]; socklen_t len = sizeof(buf); if (event_flags & EV_TIMEOUT) ts->is_connected = 0; else ts->is_connected = (getpeername(ts->sock, (struct sockaddr *)buf, &len) == 0); if (ts->is_connected) { ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, PP_OK, 0, __func__); pp_resource_release(task->resource); free(task->addr); task_destroy(task); return; } // try other addresses, one by one task->addr_ptr++; if (task->addr_ptr < task->addr_count) { pp_resource_release(task->resource); handle_tcp_connect_stage3(task); return; } // no addresses left, fail gracefully trace_warning("%s, connection failed to all addresses (%s:%u)\n", __func__, task->host, (unsigned int)task->port); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, get_pp_errno(), 0, __func__); pp_resource_release(task->resource); free(task->addr); task_destroy(task); }
static void test_destroyipc_start() { task_t taskA, taskB; sem_initialize(&sem_destroyipc, 1); taskA = task_create("ttaskA", task_destroyipc_taskA, NULL, NULL, 0x1000, 150, 0, 0); task_resume_noschedule(taskA); taskB = task_create("ttaskB", task_destroyipc_taskB, NULL, NULL, 0x1000, 150, 0, 0); task_resume_noschedule(taskB); task_delay(200); sem_destroy(&sem_destroyipc); task_delay(300); task_destroy(taskA); task_destroy(taskB); }
static void test_taskpriority_start() { task_t taskA, taskB, taskC; taskA = task_create("ttaskA", task_priority_taskA, NULL, NULL, 0x1000, 150, 0, 0); task_resume_noschedule(taskA); taskB = task_create("ttaskB", task_priority_taskB, NULL, NULL, 0x1000, 90, 0, 0); task_resume_noschedule(taskB); taskC = task_create("ttaskC", task_priority_taskC, NULL, NULL, 0x1000, 80, 0, 0); task_resume_noschedule(taskC); task_delay(500); task_destroy(taskA); task_destroy(taskB); task_destroy(taskC); }
static void test_suspend_start() { task_t taskB, taskC; task_suspendA = task_create("ttaskA", task_suspend_taskA, NULL, NULL, 0x1000, 150, 0, 0); task_resume_noschedule(task_suspendA); taskB = task_create("ttaskB", task_suspend_taskB, NULL, NULL, 0x1000, 100, 0, 0); task_resume_noschedule(taskB); taskC = task_create("ttaskC", task_suspend_taskC, NULL, NULL, 0x1000, 120, 0, 0); task_resume_noschedule(taskC); task_delay(500); task_destroy(task_suspendA); task_destroy(taskB); task_destroy(taskC); }
void task_c_remove() { asm("cli"); task_destroy(taskc_tid); asm("sti"); memset(StackC, 0, sizeof(StackC)); settextcolor(0xf,0x0); screen_pos_putch(' ', 70, 0); }
static void test_pinherit_start() { task_t taskA, taskB, taskC; mtx_initialize(&mtx_pinherit); taskA = task_create("ttaskA", task_pinherit_taskA, NULL, NULL, 0x1000, 250, 0, 0); task_resume_noschedule(taskA); taskB = task_create("ttaskB", task_pinherit_taskB, NULL, NULL, 0x1000, 210, 0, 0); task_resume_noschedule(taskB); taskC = task_create("ttaskC", task_pinherit_taskC, NULL, NULL, 0x1000, 220, 0, 0); task_resume_noschedule(taskC); task_delay(500); task_destroy(taskA); task_destroy(taskB); task_destroy(taskC); mtx_destroy(&mtx_pinherit); }
/** Create a program using an existing address space. * * @param as Address space containing a binary program image. * @param entry_addr Program entry-point address in program address space. * @param name Name to set for the program's task. * @param prg Buffer for storing program information. * * @return EOK on success or negative error code. * */ int program_create(as_t *as, uintptr_t entry_addr, char *name, program_t *prg) { prg->loader_status = EE_OK; prg->task = task_create(as, name); if (!prg->task) return ELIMIT; /* * Create the stack address space area. */ uintptr_t virt = USTACK_ADDRESS; as_area_t *area = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, STACK_SIZE, AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, 0); if (!area) { task_destroy(prg->task); return ENOMEM; } uspace_arg_t *kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); kernel_uarg->uspace_entry = (void *) entry_addr; kernel_uarg->uspace_stack = (void *) virt; kernel_uarg->uspace_stack_size = STACK_SIZE; kernel_uarg->uspace_thread_function = NULL; kernel_uarg->uspace_thread_arg = NULL; kernel_uarg->uspace_uarg = NULL; /* * Create the main thread. */ prg->main_thread = thread_create(uinit, kernel_uarg, prg->task, THREAD_FLAG_USPACE, "uinit"); if (!prg->main_thread) { free(kernel_uarg); as_area_destroy(as, virt); task_destroy(prg->task); return ELIMIT; } return EOK; }
void finalize() { // vypsat statistiky if(node == 0) { srpprintf(node, "-----"); srpprintf(node, "uloha:"); dump_task(stdout, t); } // sesynchronizovat pozici v programu // rezie uklidu a vypisu uz me nezajima MPI_Barrier(MPI_COMM_WORLD); tend = MPI_Wtime(); if(node_solver) { // vypis reseni srpprintf(node, "prohledano stavu: %d (orezano: %d)", cc, co); if(!solution) { srpprintf(node, "reseni nebylo nalezeno!"); } else { srpprintf(node, "reseni nalezeno p=%d", solution->p); dump_hist(stdout, solution->h); } srpprintf(node, "cas: %fs", tend-tbeg); // uklid stack_item_destroy(solution); stack_destroy(s); task_destroy(t); task_destroy(tf); } // finalizace MPI srpdebug("mpi", node, "finalizace <uzel=%d/%d>", node, node_count); MPI_Finalize(); // konec exit(EXIT_SUCCESS); }
void reaper(void) { task_t *task; asm("cli"); /* XXX race condition? */ for(;;){ sem_acquire(reaper_sem); task = rsrc_dequeue(reaper_queue); // kprintf("reaper reclaiming thread #%d (%s)",task->rsrc.id,task->rsrc.name); task_destroy(task); } }
/* * This is the heart of the whole test-suite process. This cleans up * any existing temporary files left behind as well as log files left * behind. Then it propagates a list of tests from `curdir` by scaning * it for template files and compiling them into tasks, in which it * schedualizes them (executes them) and actually reports errors and * what not. It then proceeds to destroy the tasks and return memory * it's the engine :) * * It returns true of tests could be propagated, otherwise it returns * false. * * It expects con_init() was called before hand. */ static GMQCC_WARN bool test_perform(const char *curdir, const char *defs) { size_t failed = false; static const char *default_defs = "defs.qh"; size_t pad[] = { /* test ### [succeed/fail]: `description` [tests/template.tmpl] [type] */ 0, 0, 0 }; /* * If the default definition file isn't set to anything. We will * use the default_defs here, which is "defs.qc" */ if (!defs) { defs = default_defs; } task_precleanup(curdir); if (!task_propagate(curdir, pad, defs)) { con_err("error: failed to propagate tasks\n"); task_destroy(); return false; } /* * If we made it here all tasks where propagated from their resultant * template file. So we can start the FILO scheduler, this has been * designed in the most thread-safe way possible for future threading * it's designed to prevent lock contention, and possible syncronization * issues. */ failed = task_schedualize(pad); if (failed) con_out("%u out of %u tests failed\n", failed, vec_size(task_tasks)); task_destroy(); return (failed) ? false : true; }
static void handle_tcp_connect_stage2(int result, char type, int count, int ttl, void *addresses, void *arg) { struct async_network_task_s *task = arg; if (result != DNS_ERR_NONE || count < 1) { trace_warning("%s, evdns returned code %d, count = %d (%s:%u)\n", __func__, result, count, task->host, (unsigned int)task->port); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, PP_ERROR_NAME_NOT_RESOLVED, 0, __func__); task_destroy(task); return; } evutil_make_socket_nonblocking(task->sock); task->addr_count = count; task->addr_ptr = 0; task->addr_type = type; if (type == DNS_IPv4_A) { task->addr = malloc(4 * count); memcpy(task->addr, addresses, 4 * count); } else if (type == DNS_IPv6_AAAA) { task->addr = malloc(16 * count); memcpy(task->addr, addresses, 16 * count); } else { trace_error("%s, bad evdns type %d (%s:%u)\n", __func__, type, task->host, (unsigned int)task->port); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, PP_ERROR_FAILED, 0, __func__); task_destroy(task); return; } handle_tcp_connect_stage3(task); }
static void handle_tcp_write_stage2(int sock, short event_flags, void *arg) { struct async_network_task_s *task = arg; int32_t retval = send(sock, task->buffer, task->bufsize, 0); if (retval < 0) retval = get_pp_errno(); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, retval, 0, __func__); task_destroy(task); }
static void handle_tcp_write_stage1(struct async_network_task_s *task) { struct pp_tcp_socket_s *ts = pp_resource_acquire(task->resource, PP_RESOURCE_TCP_SOCKET); if (!ts) { trace_error("%s, bad resource\n", __func__); task_destroy(task); return; } struct event *ev = event_new(event_b, ts->sock, EV_WRITE, handle_tcp_write_stage2, task); pp_resource_release(task->resource); add_event_mapping(task, ev); event_add(ev, NULL); }
int task_sync(task_t** execution_context, int count) { int i; int someone_not_done = 1; while (someone_not_done) { someone_not_done = 0; for (i = 0; i < count; i++) { if (execution_context[i]->status != COMPLETED) { //if (execution_context[i]->status != COMPLETED || execution_context[i]->result == NULL || *((int*)execution_context[i]->result) == -1) { someone_not_done = 1; } } if (someone_not_done) { if (!inside_main()) { debug("%d - Yielding...", sched_get()->id); sched_yield_current(); } else { debug("Main waiting"); pthread_mutex_lock(&wake_up_main_lock); pthread_cond_wait(&wake_up_main, &wake_up_main_lock); pthread_mutex_unlock(&wake_up_main_lock); debug("Main woken up"); } } } //if control is here, child tasks have finished //let's clean-up for (i = 0; i < count; i++) { task_t* task = execution_context[i]; task_destroy(task); } if (inside_main()) { debug("Going outside of sync in main %d", (unsigned int ) pthread_self()); } else { debug("Going outside of sync in task %d", task_current()->id); } return 0; }
static void handle_tcp_connect_with_net_address(struct async_network_task_s *task) { if (task->netaddr.size == sizeof(struct sockaddr_in)) { struct sockaddr_in *sai = (void *)task->netaddr.data; task->port = ntohs(sai->sin_port); handle_tcp_connect_stage2(DNS_ERR_NONE, DNS_IPv4_A, 1, 3600, &sai->sin_addr, task); } else if (task->netaddr.size == sizeof(struct sockaddr_in6)) { struct sockaddr_in6 *sai = (void *)task->netaddr.data; task->port = ntohs(sai->sin6_port); handle_tcp_connect_stage2(DNS_ERR_NONE, DNS_IPv6_AAAA, 1, 3600, &sai->sin6_addr, task); } else { trace_error("%s, bad address type\n", __func__); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, PP_ERROR_NAME_NOT_RESOLVED, 0, __func__); task_destroy(task); } }
/** * Update the TSS with the new kernel stack. * Cleanup the last task, destroying or placing on a queue as needed. */ static void __attribute__((noinline)) post_context_switch(void) { /* Update the kernel stack in the tss (these are always aligned minus 16) */ tss.rsp0 = ALIGN_UP(curr_task->kernel_rsp, PAGE_SIZE) - 16; syscall_kernel_rsp = tss.rsp0; /* todo: ltr or ldtr to load the TSS again? */ /* Clean up the previous task */ // debug("Switched from %s --> %s\n", last_task->cmdline, curr_task->cmdline); /* Do not destroy or add the Idle Task to the run queues */ if(last_task != &kernel_task) { if (last_task->state == TASK_DEAD) { task_destroy(last_task); } else { queue_add_by_state(last_task); } } /* Refill the timeslice */ reset_timeslice(curr_task); }
/** * Until pthread_exit, pull a task_t out of the task queue and run it on our thread */ void *execute_task_thread_internal( void *args ) { execution_args_t *yargs = (execution_args_t*) args; fifo_t *tasks = yargs->task_list; dna_thread_context_t *context = yargs->thread_context; free( yargs ); dna_log(DEBUG, "started execution of thread %lu", context->id); task_t *task = NULL; while ( !dna_thread_context_should_exit(context) && (task = (task_t*) fifo_pop( tasks ) ) ) { if ( task->func ) { // thread_pool_destroy sends tasks which have NULL members in, don't bother executing it task_execute(task); } task_destroy( task ); if (dna_thread_context_should_exit(context)) { break; } } dna_log(DEBUG, "Execution of thread %lu has finished.", context->id ); return NULL; }
static void handle_tcp_connect_stage3(struct async_network_task_s *task) { int res = -1; if (task->addr_type == DNS_IPv4_A) { struct sockaddr_in sai; memset(&sai, 0, sizeof(sai)); sai.sin_family = AF_INET; sai.sin_addr.s_addr = *((uint32_t *)task->addr + task->addr_ptr); sai.sin_port = htons(task->port); res = connect(task->sock, (struct sockaddr *)&sai, sizeof(sai)); } else if (task->addr_type == DNS_IPv6_AAAA) { struct sockaddr_in6 sai; memset(&sai, 0, sizeof(sai)); sai.sin6_family = AF_INET6; memcpy(&sai.sin6_addr, (char*)task->addr + task->addr_ptr * sizeof(sai.sin6_addr), sizeof(sai.sin6_addr)); sai.sin6_port = htons(task->port); res = connect(task->sock, (struct sockaddr *)&sai, sizeof(sai)); } else { // handled in stage2 trace_error("%s, never reached\n", __func__); } if (res != 0 && errno != EINPROGRESS) { trace_error("%s, res = %d, errno = %d (%s:%u)\n", __func__, res, errno, task->host, (unsigned int)task->port); ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, get_pp_errno(), 0, __func__); free(task->addr); task_destroy(task); return; } struct event *ev = event_new(event_b, task->sock, EV_WRITE, handle_tcp_connect_stage4, task); add_event_mapping(task, ev); event_add(ev, &connect_timeout); }
static void handle_tcp_read_stage2(int sock, short event_flags, void *arg) { struct async_network_task_s *task = arg; int32_t retval; retval = recv(sock, task->buffer, task->bufsize, 0); if (retval < 0) retval = get_pp_errno(); else if (retval == 0) { struct pp_tcp_socket_s *ts = pp_resource_acquire(task->resource, PP_RESOURCE_TCP_SOCKET); if (ts) { ts->seen_eof = 1; pp_resource_release(task->resource); } } ppb_message_loop_post_work_with_result(task->callback_ml, task->callback, 0, retval, 0, __func__); task_destroy(task); }
void thread_pool_destroy( thread_pool_t *pool ) { dna_log(DEBUG, "Inside thread_pool_destroy()"); if ( pool ) { dna_log(DEBUG, "Telling threads to exit..."); thread_pool_join_all(pool); dna_log(DEBUG, "Destroying execution context fifo..."); fifo_destroy( pool->thread_queue ); pool->thread_queue = NULL; dna_log(DEBUG, "Destroying tasks in fifo..."); while ( !fifo_is_empty( pool->tasks ) ) { task_t *task = (task_t*) fifo_pop( pool->tasks ); task_destroy( task ); } fifo_destroy( pool->tasks ); pool->tasks = NULL; dna_log(DEBUG, "Freeing thread context pool \"%s\".", pool->name); dna_mutex_destroy( pool->mutex ); dna_cond_destroy( pool->wait ); free(pool->mutex); free(pool->wait); free( pool ); } }