static void task_free(TaskPool *pool, Task *task, const int thread_id) { task_data_free(task, thread_id); BLI_assert(thread_id >= 0); BLI_assert(thread_id <= pool->scheduler->num_threads); if (thread_id == 0) { BLI_assert(pool->use_local_tls || BLI_thread_is_main()); } TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id); TaskMemPool *task_mempool = &tls->task_mempool; if (task_mempool->num_tasks < MEMPOOL_SIZE - 1) { /* Successfully allowed the task to be re-used later. */ task_mempool->tasks[task_mempool->num_tasks] = task; ++task_mempool->num_tasks; } else { /* Local storage saturated, no other way than just discard * the memory. * * TODO(sergey): We can perhaps store such pointer in a global * scheduler pool, maybe it'll be faster than discarding and * allocating again. */ MEM_freeN(task); #ifdef DEBUG_STATS pool->mempool_stats[thread_id].num_discard++; #endif } }
/** * @brief Interrupt handler for echo task * * This function set the variable to implement the functionality of notification * meachanism. * * @param interrupt: Interrupt ID * @param data: TLS data */ void echo_task_handler(struct timer_event* tevent) { void* data = tevent->data; sw_tls *tls; struct echo_global *echo_data; u32 task_id = (u32) data; sw_printf("SW: echo task handler 0x%x\n", task_id); tls = get_task_tls(task_id); if(tls) { echo_data = (struct echo_global *)tls->private_data; echo_data->data_available = 1; } if(tls) { notify_ns(task_id); } else { sw_printf("SW: where is the task???\n"); } tevent->state &= ~TIMER_STATE_EXECUTING; timer_event_destroy(tevent); }
static Task *task_alloc(TaskPool *pool, const int thread_id) { BLI_assert(thread_id <= pool->scheduler->num_threads); if (thread_id != -1) { BLI_assert(thread_id >= 0); BLI_assert(thread_id <= pool->scheduler->num_threads); TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id); TaskMemPool *task_mempool = &tls->task_mempool; /* Try to re-use task memory from a thread local storage. */ if (task_mempool->num_tasks > 0) { --task_mempool->num_tasks; /* Success! We've just avoided task allocation. */ #ifdef DEBUG_STATS pool->mempool_stats[thread_id].num_reuse++; #endif return task_mempool->tasks[task_mempool->num_tasks]; } /* We are doomed to allocate new task data. */ #ifdef DEBUG_STATS pool->mempool_stats[thread_id].num_alloc++; #endif } return MEM_mallocN(sizeof(Task), "New task"); }
/** * @brief Echo the data for the user supplied buffer with async support * * This function copies the request buffer to response buffer to show the * non-zero copy functionality and to show the async support by wait for the * flag and it got set in interrupt handler * * @param req_buf: Virtual address of the request buffer * @param req_buf_len: Request buffer length * @param res_buf: Virtual address of the response buffer * @param res_buf_len: Response buffer length * @param meta_data: Virtual address of the meta data of the encoded data * @param ret_res_buf_len: Return length of the response buffer * * @return SMC return codes: * SMC_SUCCESS: API processed successfully. \n * SMC_*: An implementation-defined error code for any other error. */ int process_otz_echo_async_send_cmd(void *req_buf, u32 req_buf_len, void *res_buf, u32 res_buf_len, struct otzc_encode_meta *meta_data, u32 *ret_res_buf_len) { echo_data_t echo_data; char *out_buf; int offset = 0, pos = 0, mapped = 0, type, out_len; int task_id; sw_tls *tls; struct echo_global *echo_global; task_id = get_current_task_id(); tls = get_task_tls(task_id); echo_global = (struct echo_global *)tls->private_data; if(!echo_global->data_available) { struct timer_event* tevent; timeval_t time; tevent = timer_event_create(&echo_task_handler,(void*)task_id); if(!tevent){ sw_printf("SW: Out of Memory : Cannot register Handler\n"); return SMC_ENOMEM; } /* Time duration = 100ms */ time.tval.nsec = 100000000; time.tval.sec = 0; struct sw_task* task = get_task(task_id); task->wq_head.elements_count = 0; INIT_LIST_HEAD(&task->wq_head.elements_list); task->wq_head.spin_lock.lock = 0; timer_event_start(tevent,&time); #ifdef ASYNC_DBG sw_printf("SW: Before calling wait event \n"); #endif sw_wait_event_async(&task->wq_head, echo_global->data_available, SMC_PENDING); #ifdef ASYNC_DBG sw_printf("SW: Coming out from wait event \n"); #endif } if(req_buf_len > 0) { while (offset <= req_buf_len) { if(decode_data(req_buf, meta_data, &type, &offset, &pos, &mapped, (void**)&out_buf, &out_len)) { return SMC_EINVAL_ARG; } else { if(type != OTZ_ENC_UINT32) return SMC_EINVAL_ARG; echo_data.len = *((u32*)out_buf); } if(decode_data(req_buf, meta_data, &type, &offset, &pos, &mapped, (void**)&out_buf, &out_len)) { return SMC_EINVAL_ARG; } else { if(type != OTZ_ENC_ARRAY) return SMC_EINVAL_ARG; sw_memcpy(echo_data.data, out_buf, echo_data.len); } break; } } offset = 0, pos = OTZ_MAX_REQ_PARAMS; if(res_buf_len > 0) { while (offset <= res_buf_len) { if(decode_data(res_buf, meta_data, &type, &offset, &pos, &mapped, (void**)&out_buf, &out_len)) { return SMC_EINVAL_ARG; } else { if(type != OTZ_ENC_ARRAY) return SMC_EINVAL_ARG; } sw_memcpy(out_buf, echo_data.data, echo_data.len); if(update_response_len(meta_data, pos, echo_data.len)) return SMC_EINVAL_ARG; break; } *ret_res_buf_len = echo_data.len; } /* sw_printf("SW: echo task data: %s\n", echo_data.data); */ /* sw_printf("SW: echo send cmd %s and len 0x%x strlen 0x%x\n", echo_data.data, echo_data.len, sw_strlen(echo_data.data)); */ return 0; }