/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_lo_scheduler_io_ref_t tb_lo_scheduler_io_init(tb_lo_scheduler_t* scheduler) { // done tb_bool_t ok = tb_false; tb_lo_scheduler_io_ref_t scheduler_io = tb_null; do { // init io scheduler scheduler_io = tb_malloc0_type(tb_lo_scheduler_io_t); tb_assert_and_check_break(scheduler_io); // save scheduler scheduler_io->scheduler = (tb_lo_scheduler_t*)scheduler; // init poller scheduler_io->poller = tb_poller_init(tb_null); tb_assert_and_check_break(scheduler_io->poller); #ifndef TB_CONFIG_MICRO_ENABLE // init timer and using cache time scheduler_io->timer = tb_timer_init(TB_SCHEDULER_IO_TIMER_GROW, tb_true); tb_assert_and_check_break(scheduler_io->timer); // init ltimer and using cache time scheduler_io->ltimer = tb_ltimer_init(TB_SCHEDULER_IO_LTIMER_GROW, TB_LTIMER_TICK_S, tb_true); tb_assert_and_check_break(scheduler_io->ltimer); #endif // start the io loop coroutine if (!tb_lo_coroutine_start((tb_lo_scheduler_ref_t)scheduler, tb_lo_scheduler_io_loop, scheduler_io, tb_null)) break; // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit io scheduler if (scheduler_io) tb_lo_scheduler_io_exit(scheduler_io); scheduler_io = tb_null; } // ok? return scheduler_io; }
int tb_ioctl (struct inode *inode, struct file *filp, uint cmd, unsigned long arg) { struct rt_tmbench_context *ctx; int ret = 0; volatile unsigned long long tsc = 0; ctx = (struct rt_tmbench_context *) filp->private_data; switch (cmd) { case RTTST_RTIOC_TMBENCH_START: { struct rttst_tmbench_config config_buf; struct rttst_tmbench_config *config; copy_from_user (&config_buf, (void *) arg, sizeof (struct rttst_tmbench_config)); config = &config_buf; down (&ctx->nrt_mutex); ctx->period = ns2tsc (config->period); ctx->warmup_loops = config->warmup_loops; ctx->samples_per_sec = 1000000000 / (long) config->period; ctx->histogram_size = config->histogram_size; ctx->freeze_max = config->freeze_max; if (ctx->histogram_size > 0) { ctx->histogram_min = kmalloc (3 * ctx->histogram_size * sizeof (long), GFP_KERNEL); ctx->histogram_max = ctx->histogram_min + config->histogram_size; ctx->histogram_avg = ctx->histogram_max + config->histogram_size; if (!ctx->histogram_min) { up (&ctx->nrt_mutex); return -ENOMEM; } memset (ctx->histogram_min, 0, 3 * ctx->histogram_size * sizeof (long)); ctx->bucketsize = config->histogram_bucketsize; } ctx->result.overall.min = 10000000; ctx->result.overall.max = -10000000; ctx->result.overall.avg = 0; ctx->result.overall.test_loops = 1; ctx->result.overall.overruns = 0; ctx->warmup = 1; ctx->curr.min = 10000000; ctx->curr.max = -10000000; ctx->curr.avg = 0; ctx->curr.overruns = 0; //ctx->result_event = &tb_wq; ctx->curr.test_loops = 0; ctx->mode = RTTST_TMBENCH_HANDLER; read_tsc (tsc); ctx->start_time = tsc + 1000000; ctx->date = ctx->start_time + ctx->period; tb_timer_init (timer_proc, &tb_ctx); read_tsc (tsc); tb_timer_start ((long) (ctx->date - tsc)); up (&ctx->nrt_mutex); break; } case RTTST_RTIOC_TMBENCH_STOP: { struct rttst_overall_bench_res *usr_res; usr_res = (struct rttst_overall_bench_res *) arg; down (&ctx->nrt_mutex); if (ctx->mode < 0) { up (&ctx->nrt_mutex); return -EINVAL; } tb_timer_stop (); ctx->mode = -1; ctx->result.overall.avg = slldiv (ctx->result.overall.avg, (((ctx->result.overall.test_loops) > 1 ? ctx->result.overall.test_loops : 2) - 1)); copy_to_user (&usr_res->result, &ctx->result.overall, sizeof (struct rttst_bench_res)); if (ctx->histogram_size) { int size = ctx->histogram_size * sizeof (long); copy_to_user (usr_res->histogram_min, ctx->histogram_min, size); copy_to_user (usr_res->histogram_max, ctx->histogram_max, size); copy_to_user (usr_res->histogram_avg, ctx->histogram_avg, size); kfree (ctx->histogram_min); } up (&ctx->nrt_mutex); free_irq (IRQ_WATCH, &tb_ctx); break; } case RTTST_RTIOC_INTERM_BENCH_RES: { struct rttst_interm_bench_res *usr_res; usr_res = (struct rttst_interm_bench_res *) arg; ret = wait_event_interruptible (*(ctx->result_event), ctx->done != 0); if (ret < 0) return ret; ctx->done = 0; copy_to_user (usr_res, &ctx->result, sizeof (struct rttst_interm_bench_res)); break; } case RTTST_GETCCLK: { copy_to_user ((void *) arg, &tb_cclk, sizeof (tb_cclk)); break; } case RTTST_TMR_START: { struct timer_info t_info; copy_from_user (&t_info, (void *) arg, sizeof (t_info)); ctx->period = t_info.period_tsc; ctx->start_time = t_info.start_tsc; ctx->date = ctx->start_time + ctx->period; tb_timer_init (user_timer_proc, &tb_ctx); read_tsc (tsc); tb_timer_start ((long) (ctx->date - tsc)); break; } case RTTST_TMR_WAIT: { ctx->curr.overruns = 0; ret = wait_event_interruptible (*(ctx->result_event), ctx->done != 0); if (ret < 0) return ret; ctx->date += ctx->period; read_tsc (ctx->start_time); //printk("KERNEL: wake up - tsc: %lld, overrun: %ld\n", //ctx->start_time, ctx->curr.overruns); if (ctx->date <= ctx->start_time) { while (ctx->date <= ctx->start_time) { /* set next release point */ ctx->curr.overruns++; ctx->date += ctx->period; } ret = -ETIMEDOUT; } ctx->done = 0; tb_timer_start ((long) (ctx->date - ctx->start_time)); copy_to_user ((void *) arg, &(ctx->curr.overruns), sizeof (ctx->curr.overruns)); break; } case RTTST_TMR_STOP: { tb_timer_stop (); free_irq (IRQ_WATCH, &tb_ctx); break; } default: printk ("%s: bad ioctl code (0x%x)\n", __FUNCTION__, cmd); ret = -ENOTTY; } return ret; }