static int drc_pmem_bind(struct papr_scm_priv *p) { unsigned long ret[PLPAR_HCALL_BUFSIZE]; uint64_t rc, token; /* * When the hypervisor cannot map all the requested memory in a single * hcall it returns H_BUSY and we call again with the token until * we get H_SUCCESS. Aborting the retry loop before getting H_SUCCESS * leave the system in an undefined state, so we wait. */ token = 0; do { rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0, p->blocks, BIND_ANY_ADDR, token); token = be64_to_cpu(ret[0]); cond_resched(); } while (rc == H_BUSY); if (rc) { dev_err(&p->pdev->dev, "bind err: %lld\n", rc); return -ENXIO; } p->bound_addr = be64_to_cpu(ret[1]); dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res); return 0; }
int hvc_get_chars(int index, char *buf, int count) { unsigned long got; if (plpar_hcall(H_GET_TERM_CHAR, index, 0, 0, 0, &got, (unsigned long *)buf, (unsigned long *)buf+1) == H_Success) { /* * Work around a HV bug where it gives us a null * after every \r. -- paulus */ if (got > 0) { int i; for (i = 1; i < got; ++i) { if (buf[i] == 0 && buf[i-1] == '\r') { --got; if (i < got) memmove(&buf[i], &buf[i+1], got - i); } } } return got; } return 0; }
long plpar_tce_get(unsigned long liobn, unsigned long ioba, unsigned long *tce_ret) { unsigned long dummy; return plpar_hcall(H_GET_TCE, liobn, ioba, 0, 0, tce_ret, &dummy, &dummy); }
long plpar_get_term_char(unsigned long termno, unsigned long *len_ret, char *buf_ret) { unsigned long *lbuf = (unsigned long *)buf_ret; /* ToDo: alignment? */ return plpar_hcall(H_GET_TERM_CHAR, termno, 0, 0, 0, len_ret, lbuf+0, lbuf+1); }
long plpar_put_term_char(unsigned long termno, unsigned long len, const char *buffer) { unsigned long dummy; unsigned long *lbuf = (unsigned long *)buffer; /* ToDo: alignment? */ return plpar_hcall(H_PUT_TERM_CHAR, termno, len, lbuf[0], lbuf[1], &dummy, &dummy, &dummy); }
/* can't use hvc_get_chars because that strips CRs */ static int hvsi_read(struct hvsi_struct *hp, char *buf, int count) { unsigned long got; if (plpar_hcall(H_GET_TERM_CHAR, hp->vtermno, 0, 0, 0, &got, (unsigned long *)buf, (unsigned long *)buf+1) == H_Success) return got; return 0; }
static inline unsigned int icp_hv_get_xirr(unsigned char cppr) { unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; long rc; rc = plpar_hcall(H_XIRR, retbuf, cppr); if (rc != H_SUCCESS) panic(" bad return code xirr - rc = %lx\n", rc); return (unsigned int)retbuf[0]; }
static int pseries_get_random_long(unsigned long *v) { unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; if (plpar_hcall(H_RANDOM, retbuf) == H_SUCCESS) { *v = retbuf[0]; return 1; } return 0; }
int rtas_ibm_suspend_me(struct rtas_args *args) { long state; long rc; unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; struct rtas_suspend_me_data data; DECLARE_COMPLETION_ONSTACK(done); if (!rtas_service_present("ibm,suspend-me")) return -ENOSYS; /* Make sure the state is valid */ rc = plpar_hcall(H_VASI_STATE, retbuf, ((u64)args->args[0] << 32) | args->args[1]); state = retbuf[0]; if (rc) { printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc); return rc; } else if (state == H_VASI_ENABLED) { args->args[args->nargs] = RTAS_NOT_SUSPENDABLE; return 0; } else if (state != H_VASI_SUSPENDING) { printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n", state); args->args[args->nargs] = -1; return 0; } atomic_set(&data.working, 0); atomic_set(&data.done, 0); atomic_set(&data.error, 0); data.token = rtas_token("ibm,suspend-me"); data.complete = &done; stop_topology_update(); /* Call function on all CPUs. One of us will make the * rtas call */ if (on_each_cpu(rtas_percpu_suspend_me, &data, 0)) atomic_set(&data.error, -EINVAL); wait_for_completion(&done); if (atomic_read(&data.error) != 0) printk(KERN_ERR "Error doing global join\n"); start_topology_update(); return atomic_read(&data.error); }
static int pseries_rng_data_read(struct hwrng *rng, u32 *data) { int rc; rc = plpar_hcall(H_RANDOM, (unsigned long *)data); if (rc != H_SUCCESS) { pr_err_ratelimited("H_RANDOM call failed %d\n", rc); return -EIO; } /* The hypervisor interface returns 64 bits */ return 8; }
int hvc_put_chars(int index, const char *buf, int count) { unsigned long dummy; unsigned long *lbuf = (unsigned long *) buf; long ret; ret = plpar_hcall(H_PUT_TERM_CHAR, index, count, lbuf[0], lbuf[1], &dummy, &dummy, &dummy); if (ret == H_Success) return count; if (ret == H_Busy) return 0; return -1; }
static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { u64 buffer[PLPAR_HCALL_BUFSIZE]; int rc; rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer); if (rc != H_SUCCESS) { pr_err_ratelimited("H_RANDOM call failed %d\n", rc); return -EIO; } memcpy(data, buffer, 8); /* The hypervisor interface returns 64 bits */ return 8; }
/** * hvc_get_chars - retrieve characters from firmware for denoted vterm adapter * @vtermno: The vtermno or unit_address of the adapter from which to fetch the * data. * @buf: The character buffer into which to put the character data fetched from * firmware. * @count: not used? */ int hvc_get_chars(uint32_t vtermno, char *buf, int count) { long ret; unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; unsigned long *lbuf = (unsigned long *)buf; ret = plpar_hcall(H_GET_TERM_CHAR, retbuf, vtermno); lbuf[0] = be64_to_cpu(retbuf[1]); lbuf[1] = be64_to_cpu(retbuf[2]); if (ret == H_SUCCESS) return retbuf[0]; return 0; }
static inline unsigned int icp_hv_get_xirr(unsigned char cppr) { unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; long rc; unsigned int ret = XICS_IRQ_SPURIOUS; rc = plpar_hcall(H_XIRR, retbuf, cppr); if (rc == H_SUCCESS) { ret = (unsigned int)retbuf[0]; } else { pr_err("%s: bad return code xirr cppr=0x%x returned %ld\n", __func__, cppr, rc); WARN_ON_ONCE(1); } return ret; }
static int drc_pmem_unbind(struct papr_scm_priv *p) { unsigned long ret[PLPAR_HCALL_BUFSIZE]; uint64_t rc, token; token = 0; /* NB: unbind has the same retry requirements mentioned above */ do { rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index, p->bound_addr, p->blocks, token); token = be64_to_cpu(ret); cond_resched(); } while (rc == H_BUSY); if (rc) dev_err(&p->pdev->dev, "unbind error: %lld\n", rc); return !!rc; }
static int papr_scm_meta_get(struct papr_scm_priv *p, struct nd_cmd_get_config_data_hdr *hdr) { unsigned long data[PLPAR_HCALL_BUFSIZE]; int64_t ret; if (hdr->in_offset >= p->metadata_size || hdr->in_length != 1) return -EINVAL; ret = plpar_hcall(H_SCM_READ_METADATA, data, p->drc_index, hdr->in_offset, 1); if (ret == H_PARAMETER) /* bad DRC index */ return -ENODEV; if (ret) return -EINVAL; /* other invalid parameter */ hdr->out_buf[0] = data[0] & 0xff; return 0; }
/** * pseries_suspend_begin - First phase of hibernation * * Check to ensure we are in a valid state to hibernate * * Return value: * 0 on success / other on failure **/ static int pseries_suspend_begin(suspend_state_t state) { long vasi_state, rc; unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; /* Make sure the state is valid */ rc = plpar_hcall(H_VASI_STATE, retbuf, stream_id); vasi_state = retbuf[0]; if (rc) { pr_err("pseries_suspend_begin: vasi_state returned %ld\n",rc); return rc; } else if (vasi_state == H_VASI_ENABLED) { return -EAGAIN; } else if (vasi_state != H_VASI_SUSPENDING) { pr_err("pseries_suspend_begin: vasi_state returned state %ld\n", vasi_state); return -EIO; } return 0; }
int rtas_ibm_suspend_me(u64 handle, int *vasi_return) { long state; long rc; unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; struct rtas_suspend_me_data data; DECLARE_COMPLETION_ONSTACK(done); cpumask_var_t offline_mask; int cpuret; if (!rtas_service_present("ibm,suspend-me")) return -ENOSYS; /* Make sure the state is valid */ rc = plpar_hcall(H_VASI_STATE, retbuf, handle); state = retbuf[0]; if (rc) { printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc); return rc; } else if (state == H_VASI_ENABLED) { *vasi_return = RTAS_NOT_SUSPENDABLE; return 0; } else if (state != H_VASI_SUSPENDING) { printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n", state); *vasi_return = -1; return 0; } if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) return -ENOMEM; atomic_set(&data.working, 0); atomic_set(&data.done, 0); atomic_set(&data.error, 0); data.token = rtas_token("ibm,suspend-me"); data.complete = &done; /* All present CPUs must be online */ cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask); cpuret = rtas_online_cpus_mask(offline_mask); if (cpuret) { pr_err("%s: Could not bring present CPUs online.\n", __func__); atomic_set(&data.error, cpuret); goto out; } stop_topology_update(); /* Call function on all CPUs. One of us will make the * rtas call */ if (on_each_cpu(rtas_percpu_suspend_me, &data, 0)) atomic_set(&data.error, -EINVAL); wait_for_completion(&done); if (atomic_read(&data.error) != 0) printk(KERN_ERR "Error doing global join\n"); start_topology_update(); /* Take down CPUs not online prior to suspend */ cpuret = rtas_offline_cpus_mask(offline_mask); if (cpuret) pr_warn("%s: Could not restore CPUs to offline state.\n", __func__); out: free_cpumask_var(offline_mask); return atomic_read(&data.error); }
long plpar_xirr(unsigned long *xirr_ret) { unsigned long dummy; return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy); }
long plpar_ipoll(unsigned long servernum, unsigned long* xirr_ret, unsigned long* mfrr_ret) { unsigned long dummy; return plpar_hcall(H_IPOLL, servernum, 0, 0, 0, xirr_ret, mfrr_ret, &dummy); }