static void watchdog_enable_all_cpus(void) { unsigned int cpu; if (watchdog_disabled) { watchdog_disabled = 0; for_each_online_cpu(cpu) kthread_unpark(per_cpu(softlockup_watchdog, cpu)); } }
static void v3d_job_timedout(struct drm_sched_job *sched_job) { struct v3d_job *job = to_v3d_job(sched_job); struct v3d_exec_info *exec = job->exec; struct v3d_dev *v3d = exec->v3d; enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER; enum v3d_queue q; u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q)); u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q)); /* If the current address or return address have changed, then * the GPU has probably made progress and we should delay the * reset. This could fail if the GPU got in an infinite loop * in the CL, but that is pretty unlikely outside of an i-g-t * testcase. */ if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) { job->timedout_ctca = ctca; job->timedout_ctra = ctra; schedule_delayed_work(&job->base.work_tdr, job->base.sched->timeout); return; } mutex_lock(&v3d->reset_lock); /* block scheduler */ for (q = 0; q < V3D_MAX_QUEUES; q++) { struct drm_gpu_scheduler *sched = &v3d->queue[q].sched; kthread_park(sched->thread); drm_sched_hw_job_reset(sched, (sched_job->sched == sched ? sched_job : NULL)); } /* get the GPU back into the init state */ v3d_reset(v3d); /* Unblock schedulers and restart their jobs. */ for (q = 0; q < V3D_MAX_QUEUES; q++) { drm_sched_job_recovery(&v3d->queue[q].sched); kthread_unpark(v3d->queue[q].sched.thread); } mutex_unlock(&v3d->reset_lock); }
static int takedown_cpu(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int err; /* Park the smpboot threads */ kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); smpboot_park_threads(cpu); /* * Prevent irq alloc/free while the dying cpu reorganizes the * interrupt affinities. */ irq_lock_sparse(); /* * So now all preempt/rcu users must observe !cpu_active(). */ err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu)); if (err) { /* CPU refused to die */ irq_unlock_sparse(); /* Unpark the hotplug thread so we can rollback there */ kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread); return err; } BUG_ON(cpu_online(cpu)); /* * The migration_call() CPU_DYING callback will have removed all * runnable tasks from the cpu, there's only the idle task left now * that the migration thread is done doing the stop_machine thing. * * Wait for the stop thread to go away. */ wait_for_completion(&st->done); BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); /* Interrupts are moved away from the dying cpu, reenable alloc/free */ irq_unlock_sparse(); hotplug_cpu__broadcast_tick_pull(cpu); /* This actually kills the CPU. */ __cpu_die(cpu); tick_cleanup_dead_cpu(cpu); return 0; }
static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) { struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); struct etnaviv_gpu *gpu = submit->gpu; /* block scheduler */ kthread_park(gpu->sched.thread); drm_sched_hw_job_reset(&gpu->sched, sched_job); /* get the GPU back into the init state */ etnaviv_core_dump(gpu); etnaviv_gpu_recover_hang(gpu); /* restart scheduler after GPU is usable again */ drm_sched_job_recovery(&gpu->sched); kthread_unpark(gpu->sched.thread); }
/* * Called from the idle task. We need to set active here, so we can kick off * the stopper thread and unpark the smpboot threads. If the target state is * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the * cpu further. */ void cpuhp_online_idle(enum cpuhp_state state) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); unsigned int cpu = smp_processor_id(); /* Happens for the boot cpu */ if (state != CPUHP_AP_ONLINE_IDLE) return; st->state = CPUHP_AP_ONLINE_IDLE; /* Unpark the stopper thread and the hotplug thread of this cpu */ stop_machine_unpark(cpu); kthread_unpark(st->thread); /* Should we go further up ? */ if (st->target > CPUHP_AP_ONLINE_IDLE) __cpuhp_kick_ap_work(st); else complete(&st->done); }
void __init cpuhp_threads_init(void) { BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads)); kthread_unpark(this_cpu_read(cpuhp_state.thread)); }
static int takedown_cpu(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int err; /* * By now we've cleared cpu_active_mask, wait for all preempt-disabled * and RCU users of this state to go away such that all new such users * will observe it. * * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might * not imply sync_sched(), so wait for both. * * Do sync before park smpboot threads to take care the rcu boost case. */ if (IS_ENABLED(CONFIG_PREEMPT)) synchronize_rcu_mult(call_rcu, call_rcu_sched); else synchronize_rcu(); /* Park the smpboot threads */ kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); smpboot_park_threads(cpu); /* * Prevent irq alloc/free while the dying cpu reorganizes the * interrupt affinities. */ irq_lock_sparse(); /* * So now all preempt/rcu users must observe !cpu_active(). */ err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu)); if (err) { /* CPU refused to die */ irq_unlock_sparse(); /* Unpark the hotplug thread so we can rollback there */ kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread); return err; } BUG_ON(cpu_online(cpu)); /* * The migration_call() CPU_DYING callback will have removed all * runnable tasks from the cpu, there's only the idle task left now * that the migration thread is done doing the stop_machine thing. * * Wait for the stop thread to go away. */ wait_for_completion(&st->done); BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); /* Interrupts are moved away from the dying cpu, reenable alloc/free */ irq_unlock_sparse(); hotplug_cpu__broadcast_tick_pull(cpu); /* This actually kills the CPU. */ __cpu_die(cpu); tick_cleanup_dead_cpu(cpu); return 0; }