int trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) { unsigned long save_max = tracing_max_latency; unsigned long count; int ret; /* start the tracing */ tr->ctrl = 1; trace->init(tr); /* reset the max latency */ tracing_max_latency = 0; /* disable preemption for a bit */ preempt_disable(); udelay(100); preempt_enable(); /* stop the tracing. */ tr->ctrl = 0; trace->ctrl_update(tr); /* check both trace buffers */ ret = trace_test_buffer(tr, NULL); if (!ret) ret = trace_test_buffer(&max_tr, &count); trace->reset(tr); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; } tracing_max_latency = save_max; return ret; }
int trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) { unsigned long save_max = tr->max_latency; unsigned long count; int ret; /* * Now that the big kernel lock is no longer preemptable, * and this is called with the BKL held, it will always * fail. If preemption is already disabled, simply * pass the test. When the BKL is removed, or becomes * preemptible again, we will once again test this, * so keep it in. */ if (preempt_count()) { printk(KERN_CONT "can not test ... force "); return 0; } /* start the tracing */ ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); return ret; } /* reset the max latency */ tr->max_latency = 0; /* disable preemption for a bit */ preempt_disable(); udelay(100); preempt_enable(); /* * Stop the tracer to avoid a warning subsequent * to buffer flipping failure because tracing_stop() * disables the tr and max buffers, making flipping impossible * in case of parallels max preempt off latencies. */ trace->stop(tr); /* stop the tracing. */ tracing_stop(); /* check both trace buffers */ ret = trace_test_buffer(&tr->trace_buffer, NULL); if (!ret) ret = trace_test_buffer(&tr->max_buffer, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; } tr->max_latency = save_max; return ret; }
int trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) { unsigned long save_max = tracing_max_latency; unsigned long count; int ret; /* */ if (preempt_count()) { printk(KERN_CONT "can not test ... force "); return 0; } /* */ ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); return ret; } /* */ tracing_max_latency = 0; /* */ preempt_disable(); udelay(100); preempt_enable(); /* */ trace->stop(tr); /* */ tracing_stop(); /* */ ret = trace_test_buffer(tr, NULL); if (!ret) ret = trace_test_buffer(&max_tr, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; } tracing_max_latency = save_max; return ret; }
int trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) { unsigned long count; int ret; /* start the tracing */ ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); return ret; } /* Sleep for a 1/10 of a second */ msleep(100); /* stop the tracing. */ tracing_stop(); /* check the trace buffer */ ret = trace_test_buffer(tr, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; } return ret; }
int trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) { unsigned long count; int ret; ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); return ret; } msleep(100); tracing_stop(); ret = trace_test_buffer(tr, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; } return ret; }
/* * Pretty much the same than for the function tracer from which the selftest * has been borrowed. */ __init int trace_selftest_startup_function_graph(struct tracer *trace, struct trace_array *tr) { int ret; unsigned long count; #ifdef CONFIG_DYNAMIC_FTRACE if (ftrace_filter_param) { printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); return 0; } #endif /* * Simulate the init() callback but we attach a watchdog callback * to detect and recover from possible hangs */ tracing_reset_online_cpus(&tr->trace_buffer); set_graph_array(tr); ret = register_ftrace_graph(&fgraph_ops); if (ret) { warn_failed_init_tracer(trace, ret); goto out; } tracing_start_cmdline_record(); /* Sleep for a 1/10 of a second */ msleep(100); /* Have we just recovered from a hang? */ if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { tracing_selftest_disabled = true; ret = -1; goto out; } tracing_stop(); /* check the trace buffer */ ret = trace_test_buffer(&tr->trace_buffer, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; goto out; } /* Don't test dynamic tracing, the function tracer already did */ out: /* Stop it if we failed */ if (ret) ftrace_graph_stop(); return ret; }
int trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) { unsigned long count; int ret; /* start the tracing */ tr->ctrl = 1; trace->init(tr); /* Sleep for a 1/10 of a second */ msleep(100); /* stop the tracing. */ tr->ctrl = 0; trace->ctrl_update(tr); /* check the trace buffer */ ret = trace_test_buffer(tr, &count); trace->reset(tr); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; } return ret; }
int trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) { unsigned long save_max = tr->max_latency; unsigned long count; int ret; /* start the tracing */ ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); return ret; } /* reset the max latency */ tr->max_latency = 0; /* disable interrupts for a bit */ local_irq_disable(); udelay(100); local_irq_enable(); /* * Stop the tracer to avoid a warning subsequent * to buffer flipping failure because tracing_stop() * disables the tr and max buffers, making flipping impossible * in case of parallels max irqs off latencies. */ trace->stop(tr); /* stop the tracing. */ tracing_stop(); /* check both trace buffers */ ret = trace_test_buffer(&tr->trace_buffer, NULL); if (!ret) ret = trace_test_buffer(&tr->max_buffer, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; } tr->max_latency = save_max; return ret; }
/* * Simple verification test of ftrace function tracer. * Enable ftrace, sleep 1/10 second, and then read the trace * buffer to see if all is in order. */ int trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) { int save_ftrace_enabled = ftrace_enabled; unsigned long count; int ret; /* make sure msleep has been recorded */ msleep(1); /* start the tracing */ ftrace_enabled = 1; ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); goto out; } /* Sleep for a 1/10 of a second */ msleep(100); /* stop the tracing. */ tracing_stop(); ftrace_enabled = 0; /* check the trace buffer */ ret = trace_test_buffer(tr, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; goto out; } ret = trace_selftest_startup_dynamic_tracing(trace, tr, DYN_FTRACE_TEST_NAME); if (ret) goto out; ret = trace_selftest_function_recursion(); if (ret) goto out; ret = trace_selftest_function_regs(); out: ftrace_enabled = save_ftrace_enabled; /* kill ftrace totally if we failed */ if (ret) ftrace_kill(); return ret; }
int trace_selftest_startup_function_graph(struct tracer *trace, struct trace_array *tr) { int ret; unsigned long count; /* */ tracing_reset_online_cpus(tr); set_graph_array(tr); ret = register_ftrace_graph(&trace_graph_return, &trace_graph_entry_watchdog); if (ret) { warn_failed_init_tracer(trace, ret); goto out; } tracing_start_cmdline_record(); /* */ msleep(100); /* */ if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { tracing_selftest_disabled = true; ret = -1; goto out; } tracing_stop(); /* */ ret = trace_test_buffer(tr, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; goto out; } /* */ out: /* */ if (ret) ftrace_graph_stop(); return ret; }
int trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) { unsigned long save_max = tracing_max_latency; unsigned long count; int ret; ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); return ret; } tracing_max_latency = 0; local_irq_disable(); udelay(100); local_irq_enable(); trace->stop(tr); tracing_stop(); ret = trace_test_buffer(tr, NULL); if (!ret) ret = trace_test_buffer(&max_tr, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; } tracing_max_latency = save_max; return ret; }
int trace_selftest_startup_hw_branches(struct tracer *trace, struct trace_array *tr) { struct trace_iterator *iter; struct tracer tracer; unsigned long count; int ret; if (!trace->open) { printk(KERN_CONT "missing open function..."); return -1; } ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); return ret; } /* * The hw-branch tracer needs to collect the trace from the various * cpu trace buffers - before tracing is stopped. */ iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) return -ENOMEM; memcpy(&tracer, trace, sizeof(tracer)); iter->trace = &tracer; iter->tr = tr; iter->pos = -1; mutex_init(&iter->mutex); trace->open(iter); mutex_destroy(&iter->mutex); kfree(iter); tracing_stop(); ret = trace_test_buffer(tr, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT "no entries found.."); ret = -1; } return ret; }
int trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) { int save_ftrace_enabled = ftrace_enabled; int save_tracer_enabled = tracer_enabled; unsigned long count; int ret; msleep(1); ftrace_enabled = 1; tracer_enabled = 1; ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); goto out; } msleep(100); tracing_stop(); ftrace_enabled = 0; ret = trace_test_buffer(tr, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; goto out; } ret = trace_selftest_startup_dynamic_tracing(trace, tr, DYN_FTRACE_TEST_NAME); out: ftrace_enabled = save_ftrace_enabled; tracer_enabled = save_tracer_enabled; if (ret) ftrace_kill(); return ret; }
int trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) { unsigned long save_max = tracing_max_latency; unsigned long count; int ret; /* start the tracing */ ret = trace->init(tr); if (ret) { warn_failed_init_tracer(trace, ret); return ret; } /* reset the max latency */ tracing_max_latency = 0; /* disable interrupts for a bit */ local_irq_disable(); udelay(100); local_irq_enable(); /* stop the tracing. */ tracing_stop(); /* check both trace buffers */ ret = trace_test_buffer(tr, NULL); if (!ret) ret = trace_test_buffer(&max_tr, &count); trace->reset(tr); tracing_start(); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; } tracing_max_latency = save_max; return ret; }
int trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) { unsigned long count; int ret; /* start the tracing */ tr->ctrl = 1; trace->init(tr); /* Sleep for a 1/10 of a second */ msleep(100); /* stop the tracing. */ tr->ctrl = 0; trace->ctrl_update(tr); /* check the trace buffer */ ret = trace_test_buffer(tr, &count); trace->reset(tr); return ret; }
int trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) { unsigned long count; int ret; /* start the tracing */ ret = trace->init(tr); if (ret) { warn_failed_init_tracer(trace, ret); return 0; } /* Sleep for a 1/10 of a second */ msleep(100); /* stop the tracing. */ tracing_stop(); /* check the trace buffer */ ret = trace_test_buffer(tr, &count); trace->reset(tr); tracing_start(); return ret; }
static int trace_wakeup_test_thread(void *data) { /* Make this a RT thread, doesn't need to be too high */ struct sched_param param = { .sched_priority = 5 }; struct completion *x = data; sched_setscheduler(current, SCHED_FIFO, ¶m); /* Make it know we have a new prio */ complete(x); /* now go to sleep and let the test wake us up */ set_current_state(TASK_INTERRUPTIBLE); schedule(); /* we are awake, now wait to disappear */ while (!kthread_should_stop()) { /* * This is an RT task, do short sleeps to let * others run. */ msleep(100); } return 0; } int trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) { unsigned long save_max = tracing_max_latency; struct task_struct *p; struct completion isrt; unsigned long count; int ret; init_completion(&isrt); /* create a high prio thread */ p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); if (IS_ERR(p)) { printk(KERN_CONT "Failed to create ftrace wakeup test thread "); return -1; } /* make sure the thread is running at an RT prio */ wait_for_completion(&isrt); /* start the tracing */ tr->ctrl = 1; trace->init(tr); /* reset the max latency */ tracing_max_latency = 0; /* sleep to let the RT thread sleep too */ msleep(100); /* * Yes this is slightly racy. It is possible that for some * strange reason that the RT thread we created, did not * call schedule for 100ms after doing the completion, * and we do a wakeup on a task that already is awake. * But that is extremely unlikely, and the worst thing that * happens in such a case, is that we disable tracing. * Honestly, if this race does happen something is horrible * wrong with the system. */ wake_up_process(p); /* give a little time to let the thread wake up */ msleep(100); /* stop the tracing. */ tr->ctrl = 0; trace->ctrl_update(tr); /* check both trace buffers */ ret = trace_test_buffer(tr, NULL); if (!ret) ret = trace_test_buffer(&max_tr, &count); trace->reset(tr); tracing_max_latency = save_max; /* kill the thread */ kthread_stop(p); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; } return ret; }
int trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) { unsigned long save_max = tracing_max_latency; unsigned long count; int ret; /* start the tracing */ tr->ctrl = 1; trace->init(tr); /* reset the max latency */ tracing_max_latency = 0; /* disable preemption and interrupts for a bit */ preempt_disable(); local_irq_disable(); udelay(100); preempt_enable(); /* reverse the order of preempt vs irqs */ local_irq_enable(); /* stop the tracing. */ tr->ctrl = 0; trace->ctrl_update(tr); /* check both trace buffers */ ret = trace_test_buffer(tr, NULL); if (ret) goto out; ret = trace_test_buffer(&max_tr, &count); if (ret) goto out; if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; goto out; } /* do the test by disabling interrupts first this time */ tracing_max_latency = 0; tr->ctrl = 1; trace->ctrl_update(tr); preempt_disable(); local_irq_disable(); udelay(100); preempt_enable(); /* reverse the order of preempt vs irqs */ local_irq_enable(); /* stop the tracing. */ tr->ctrl = 0; trace->ctrl_update(tr); /* check both trace buffers */ ret = trace_test_buffer(tr, NULL); if (ret) goto out; ret = trace_test_buffer(&max_tr, &count); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; goto out; } out: trace->reset(tr); tracing_max_latency = save_max; return ret; }
static int trace_wakeup_test_thread(void *data) { /* Make this a RT thread, doesn't need to be too high */ static const struct sched_param param = { .sched_priority = 5 }; struct completion *x = data; sched_setscheduler(current, SCHED_FIFO, ¶m); /* Make it know we have a new prio */ complete(x); /* now go to sleep and let the test wake us up */ set_current_state(TASK_INTERRUPTIBLE); schedule(); complete(x); /* we are awake, now wait to disappear */ while (!kthread_should_stop()) { /* * This is an RT task, do short sleeps to let * others run. */ msleep(100); } return 0; } int trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) { unsigned long save_max = tracing_max_latency; struct task_struct *p; struct completion isrt; unsigned long count; int ret; init_completion(&isrt); /* create a high prio thread */ p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); if (IS_ERR(p)) { printk(KERN_CONT "Failed to create ftrace wakeup test thread "); return -1; } /* make sure the thread is running at an RT prio */ wait_for_completion(&isrt); /* start the tracing */ ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); return ret; } /* reset the max latency */ tracing_max_latency = 0; while (p->on_rq) { /* * Sleep to make sure the RT thread is asleep too. * On virtual machines we can't rely on timings, * but we want to make sure this test still works. */ msleep(100); } init_completion(&isrt); wake_up_process(p); /* Wait for the task to wake up */ wait_for_completion(&isrt); /* stop the tracing. */ tracing_stop(); /* check both trace buffers */ ret = trace_test_buffer(tr, NULL); printk("ret = %d\n", ret); if (!ret) ret = trace_test_buffer(&max_tr, &count); trace->reset(tr); tracing_start(); tracing_max_latency = save_max; /* kill the thread */ kthread_stop(p); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; } return ret; }
int trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) { unsigned long save_max = tracing_max_latency; unsigned long count; int ret; /* * Now that the big kernel lock is no longer preemptable, * and this is called with the BKL held, it will always * fail. If preemption is already disabled, simply * pass the test. When the BKL is removed, or becomes * preemptible again, we will once again test this, * so keep it in. */ if (preempt_count()) { printk(KERN_CONT "can not test ... force "); return 0; } /* start the tracing */ ret = trace->init(tr); if (ret) { warn_failed_init_tracer(trace, ret); goto out; } /* reset the max latency */ tracing_max_latency = 0; /* disable preemption and interrupts for a bit */ preempt_disable(); local_irq_disable(); udelay(100); preempt_enable(); /* reverse the order of preempt vs irqs */ local_irq_enable(); /* stop the tracing. */ tracing_stop(); /* check both trace buffers */ ret = trace_test_buffer(tr, NULL); if (ret) { tracing_start(); goto out; } ret = trace_test_buffer(&max_tr, &count); if (ret) { tracing_start(); goto out; } if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; tracing_start(); goto out; } /* do the test by disabling interrupts first this time */ tracing_max_latency = 0; tracing_start(); preempt_disable(); local_irq_disable(); udelay(100); preempt_enable(); /* reverse the order of preempt vs irqs */ local_irq_enable(); /* stop the tracing. */ tracing_stop(); /* check both trace buffers */ ret = trace_test_buffer(tr, NULL); if (ret) goto out; ret = trace_test_buffer(&max_tr, &count); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; goto out; } out: trace->reset(tr); tracing_start(); tracing_max_latency = save_max; return ret; }
int trace_selftest_startup_dynamic_tracing(struct tracer *trace, struct trace_array *tr, int (*func)(void)) { int save_ftrace_enabled = ftrace_enabled; int save_tracer_enabled = tracer_enabled; unsigned long count; char *func_name; int ret; printk(KERN_CONT "PASSED\n"); pr_info("Testing dynamic ftrace: "); ftrace_enabled = 1; tracer_enabled = 1; func(); func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); ftrace_set_filter(func_name, strlen(func_name), 1); ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); goto out; } msleep(100); ret = trace_test_buffer(tr, &count); if (ret) goto out; if (count) { ret = -1; printk(KERN_CONT ".. filter did not filter .. "); goto out; } func(); msleep(100); tracing_stop(); ftrace_enabled = 0; ret = trace_test_buffer(tr, &count); trace->reset(tr); tracing_start(); if (!ret && count != 1) { printk(KERN_CONT ".. filter failed count=%ld ..", count); ret = -1; goto out; } out: ftrace_enabled = save_ftrace_enabled; tracer_enabled = save_tracer_enabled; ftrace_set_filter(NULL, 0, 1); return ret; }
/* Test dynamic code modification and ftrace filters */ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, struct trace_array *tr, int (*func)(void)) { int save_ftrace_enabled = ftrace_enabled; int save_tracer_enabled = tracer_enabled; unsigned long count; char *func_name; int ret; /* The ftrace test PASSED */ printk(KERN_CONT "PASSED\n"); pr_info("Testing dynamic ftrace: "); /* enable tracing, and record the filter function */ ftrace_enabled = 1; tracer_enabled = 1; /* passed in by parameter to fool gcc from optimizing */ func(); /* update the records */ ret = ftrace_force_update(); if (ret) { printk(KERN_CONT ".. ftraced failed .. "); return ret; } /* * Some archs *cough*PowerPC*cough* add charachters to the * start of the function names. We simply put a '*' to * accomodate them. */ func_name = "*" STR(DYN_FTRACE_TEST_NAME); /* filter only on our function */ ftrace_set_filter(func_name, strlen(func_name), 1); /* enable tracing */ tr->ctrl = 1; trace->init(tr); /* Sleep for a 1/10 of a second */ msleep(100); /* we should have nothing in the buffer */ ret = trace_test_buffer(tr, &count); if (ret) goto out; if (count) { ret = -1; printk(KERN_CONT ".. filter did not filter .. "); goto out; } /* call our function again */ func(); /* sleep again */ msleep(100); /* stop the tracing. */ tr->ctrl = 0; trace->ctrl_update(tr); ftrace_enabled = 0; /* check the trace buffer */ ret = trace_test_buffer(tr, &count); trace->reset(tr); /* we should only have one item */ if (!ret && count != 1) { printk(KERN_CONT ".. filter failed count=%ld ..", count); ret = -1; goto out; } out: ftrace_enabled = save_ftrace_enabled; tracer_enabled = save_tracer_enabled; /* Enable tracing on all functions again */ ftrace_set_filter(NULL, 0, 1); return ret; }
static int trace_wakeup_test_thread(void *data) { struct sched_param param = { .sched_priority = 5 }; struct completion *x = data; sched_setscheduler(current, SCHED_FIFO, ¶m); complete(x); set_current_state(TASK_INTERRUPTIBLE); schedule(); while (!kthread_should_stop()) { msleep(100); } return 0; } int trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) { unsigned long save_max = tracing_max_latency; struct task_struct *p; struct completion isrt; unsigned long count; int ret; init_completion(&isrt); p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); if (IS_ERR(p)) { printk(KERN_CONT "Failed to create ftrace wakeup test thread "); return -1; } wait_for_completion(&isrt); ret = tracer_init(trace, tr); if (ret) { warn_failed_init_tracer(trace, ret); return ret; } tracing_max_latency = 0; msleep(100); wake_up_process(p); msleep(100); tracing_stop(); ret = trace_test_buffer(tr, NULL); if (!ret) ret = trace_test_buffer(&max_tr, &count); trace->reset(tr); tracing_start(); tracing_max_latency = save_max; kthread_stop(p); if (!ret && !count) { printk(KERN_CONT ".. no entries found .."); ret = -1; } return ret; }