/** * store_hibernate - Initiate partition hibernation * @classdev: sysdev class struct * @attr: class device attribute struct * @buf: buffer * @count: buffer size * * Write the stream ID received from the HMC to this file * to trigger hibernating the partition * * Return value: * number of bytes printed to buffer / other on failure **/ static ssize_t store_hibernate(struct sysdev_class *classdev, const char *buf, size_t count) { int rc; if (!capable(CAP_SYS_ADMIN)) return -EPERM; stream_id = simple_strtoul(buf, NULL, 16); do { rc = pseries_suspend_begin(PM_SUSPEND_MEM); if (rc == -EAGAIN) ssleep(1); } while (rc == -EAGAIN); if (!rc) { stop_topology_update(); rc = pm_suspend(PM_SUSPEND_MEM); start_topology_update(); } stream_id = 0; if (!rc) rc = count; return rc; }
int rtas_ibm_suspend_me(struct rtas_args *args) { long state; long rc; unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; struct rtas_suspend_me_data data; DECLARE_COMPLETION_ONSTACK(done); if (!rtas_service_present("ibm,suspend-me")) return -ENOSYS; /* Make sure the state is valid */ rc = plpar_hcall(H_VASI_STATE, retbuf, ((u64)args->args[0] << 32) | args->args[1]); state = retbuf[0]; if (rc) { printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc); return rc; } else if (state == H_VASI_ENABLED) { args->args[args->nargs] = RTAS_NOT_SUSPENDABLE; return 0; } else if (state != H_VASI_SUSPENDING) { printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n", state); args->args[args->nargs] = -1; return 0; } atomic_set(&data.working, 0); atomic_set(&data.done, 0); atomic_set(&data.error, 0); data.token = rtas_token("ibm,suspend-me"); data.complete = &done; stop_topology_update(); /* Call function on all CPUs. One of us will make the * rtas call */ if (on_each_cpu(rtas_percpu_suspend_me, &data, 0)) atomic_set(&data.error, -EINVAL); wait_for_completion(&done); if (atomic_read(&data.error) != 0) printk(KERN_ERR "Error doing global join\n"); start_topology_update(); return atomic_read(&data.error); }
static int __rtas_suspend_cpu(struct rtas_suspend_me_data *data, int wake_when_done) { long rc = H_SUCCESS; unsigned long msr_save; int cpu; stop_topology_update(); atomic_inc(&data->working); /* really need to ensure MSR.EE is off for H_JOIN */ msr_save = mfmsr(); mtmsr(msr_save & ~(MSR_EE)); while (rc == H_SUCCESS && !data->done) rc = plpar_hcall_norets(H_JOIN); mtmsr(msr_save); if (rc == H_SUCCESS) { /* This cpu was prodded and the suspend is complete. */ goto out; } else if (rc == H_CONTINUE) { /* All other cpus are in H_JOIN, this cpu does * the suspend. */ return __rtas_suspend_last_cpu(data, wake_when_done); } printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n", smp_processor_id(), rc); data->error = rc; if (wake_when_done) { smp_wmb(); data->done = 1; /* Ensure data->done is seen on all CPUs that are about to wake up as a result of the H_PROD below */ mb(); start_topology_update(); /* This cpu did the suspend or got an error; in either case, * we need to prod all other other cpus out of join state. * Extra prods are harmless. */ for_each_online_cpu(cpu) plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu)); } out: if (atomic_dec_return(&data->working) == 0) complete(data->complete); return rc; }
static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_when_done) { u16 slb_size = mmu_slb_size; int rc = H_MULTI_THREADS_ACTIVE; int cpu; slb_set_size(SLB_MIN_SIZE); stop_topology_update(); printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id()); while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) && !atomic_read(&data->error)) rc = rtas_call(data->token, 0, 1, NULL); if (rc || atomic_read(&data->error)) { printk(KERN_DEBUG "ibm,suspend-me returned %d\n", rc); slb_set_size(slb_size); } if (atomic_read(&data->error)) rc = atomic_read(&data->error); atomic_set(&data->error, rc); start_topology_update(); if (wake_when_done) { atomic_set(&data->done, 1); for_each_online_cpu(cpu) plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu)); } if (atomic_dec_return(&data->working) == 0) complete(data->complete); return rc; }
int rtas_ibm_suspend_me(u64 handle, int *vasi_return) { long state; long rc; unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; struct rtas_suspend_me_data data; DECLARE_COMPLETION_ONSTACK(done); cpumask_var_t offline_mask; int cpuret; if (!rtas_service_present("ibm,suspend-me")) return -ENOSYS; /* Make sure the state is valid */ rc = plpar_hcall(H_VASI_STATE, retbuf, handle); state = retbuf[0]; if (rc) { printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc); return rc; } else if (state == H_VASI_ENABLED) { *vasi_return = RTAS_NOT_SUSPENDABLE; return 0; } else if (state != H_VASI_SUSPENDING) { printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n", state); *vasi_return = -1; return 0; } if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) return -ENOMEM; atomic_set(&data.working, 0); atomic_set(&data.done, 0); atomic_set(&data.error, 0); data.token = rtas_token("ibm,suspend-me"); data.complete = &done; /* All present CPUs must be online */ cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask); cpuret = rtas_online_cpus_mask(offline_mask); if (cpuret) { pr_err("%s: Could not bring present CPUs online.\n", __func__); atomic_set(&data.error, cpuret); goto out; } stop_topology_update(); /* Call function on all CPUs. One of us will make the * rtas call */ if (on_each_cpu(rtas_percpu_suspend_me, &data, 0)) atomic_set(&data.error, -EINVAL); wait_for_completion(&done); if (atomic_read(&data.error) != 0) printk(KERN_ERR "Error doing global join\n"); start_topology_update(); /* Take down CPUs not online prior to suspend */ cpuret = rtas_offline_cpus_mask(offline_mask); if (cpuret) pr_warn("%s: Could not restore CPUs to offline state.\n", __func__); out: free_cpumask_var(offline_mask); return atomic_read(&data.error); }