/* * This serves to avoid cpu buffer overflow, and makes sure * the task mortuary progresses * * By using schedule_delayed_work_on and then schedule_delayed_work * we guarantee this will stay on the correct cpu */ static void wq_sync_buffer(void * data) { struct oprofile_cpu_buffer * b = data; if (b->cpu != smp_processor_id()) { printk("WQ on CPU%d, prefer CPU%d\n", smp_processor_id(), b->cpu); } sync_buffer(b->cpu); /* don't re-add the work if we're shutting down */ if (work_enabled) schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); }
/* * This serves to avoid cpu buffer overflow, and makes sure * the task mortuary progresses * * By using schedule_delayed_work_on and then schedule_delayed_work * we guarantee this will stay on the correct cpu */ static void wq_sync_buffer(struct work_struct *work) { struct oprofile_cpu_buffer * b = container_of(work, struct oprofile_cpu_buffer, work.work); if (b->cpu != smp_processor_id()) { printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", smp_processor_id(), b->cpu); } sync_buffer(b->cpu); /* don't re-add the work if we're shutting down */ if (work_enabled) schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); }
/* * This serves to avoid cpu buffer overflow, and makes sure * the task mortuary progresses * * By using schedule_delayed_work_on and then schedule_delayed_work * we guarantee this will stay on the correct cpu */ static void wq_sync_buffer(struct work_struct *work) { struct oprofile_cpu_buffer *b = container_of(work, struct oprofile_cpu_buffer, work.work); if (b->cpu != smp_processor_id() && !cpu_online(b->cpu)) { cancel_delayed_work(&b->work); return; } sync_buffer(b->cpu); /* don't re-add the work if we're shutting down */ if (work_enabled) schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); }
static void wq_sync_buffer(struct work_struct *work) { struct oprofile_cpu_buffer *b = container_of(work, struct oprofile_cpu_buffer, work.work); if (b->cpu != smp_processor_id()) { printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", smp_processor_id(), b->cpu); if (!cpu_online(b->cpu)) { cancel_delayed_work(&b->work); return; } } sync_buffer(b->cpu); if (work_enabled) schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); }
/* * This serves to avoid cpu buffer overflow, and makes sure * the task mortuary progresses * * By using schedule_delayed_work_on and then schedule_delayed_work * we guarantee this will stay on the correct cpu */ static void wq_sync_buffer(struct work_struct *work) { struct oprofile_cpu_buffer *b = container_of(work, struct oprofile_cpu_buffer, work.work); if (b->cpu != smp_processor_id()) { #ifdef CONFIG_DEBUG_PRINTK printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", smp_processor_id(), b->cpu); #else ; #endif if (!cpu_online(b->cpu)) { cancel_delayed_work(&b->work); return; } } sync_buffer(b->cpu); /* don't re-add the work if we're shutting down */ if (work_enabled) schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); }