static size_t glink_smem_rx_avail(struct qcom_glink_pipe *np) { struct glink_smem_pipe *pipe = to_smem_pipe(np); size_t len; void *fifo; u32 head; u32 tail; if (!pipe->fifo) { fifo = qcom_smem_get(pipe->remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_1, &len); if (IS_ERR(fifo)) { pr_err("failed to acquire RX fifo handle: %ld\n", PTR_ERR(fifo)); return 0; } pipe->fifo = fifo; pipe->native.length = len; } head = le32_to_cpu(*pipe->head); tail = le32_to_cpu(*pipe->tail); if (head < tail) return pipe->native.length - tail + head; else return head - tail; }
static irqreturn_t q6v5_fatal_interrupt(int irq, void *data) { struct qcom_q6v5 *q6v5 = data; size_t len; char *msg; msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, q6v5->crash_reason, &len); if (!IS_ERR(msg) && len > 0 && msg[0]) dev_err(q6v5->dev, "fatal error received: %s\n", msg); else dev_err(q6v5->dev, "fatal error without message\n"); rproc_report_crash(q6v5->rproc, RPROC_FATAL_ERROR); return IRQ_HANDLED; }
static irqreturn_t q6v5_wdog_interrupt(int irq, void *data) { struct qcom_q6v5 *q6v5 = data; size_t len; char *msg; /* Sometimes the stop triggers a watchdog rather than a stop-ack */ if (!q6v5->running) { complete(&q6v5->stop_done); return IRQ_HANDLED; } msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, q6v5->crash_reason, &len); if (!IS_ERR(msg) && len > 0 && msg[0]) dev_err(q6v5->dev, "watchdog received: %s\n", msg); else dev_err(q6v5->dev, "watchdog without message\n"); rproc_report_crash(q6v5->rproc, RPROC_WATCHDOG); return IRQ_HANDLED; }
static void smem_debug_read_mem(struct seq_file *s) { u32 *info; size_t size; int ret, i; long flags; info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HEAP_INFO, &size); if (IS_ERR(info)) seq_printf(s, "Can't get global heap information pool\n"); else { seq_printf(s, "global heap\n"); seq_printf(s, " initialized: %d offset: %08x avail: %08x\n", info[0], info[1], info[2]); for (i = 0; i < 512; i++) { info = qcom_smem_get(QCOM_SMEM_HOST_ANY, i, &size); if (IS_ERR(info)) continue; seq_printf(s, " [%d]: p: %p s: %li\n", i, info, size); } } seq_printf(s, "\nSecure partitions accessible from APPS:\n"); ret = hwspin_lock_timeout_irqsave(__smem->hwlock, HWSPINLOCK_TIMEOUT, &flags); for (i = 0; i < SMEM_HOST_COUNT; i++) { struct smem_partition_header *part_hdr = __smem->partitions[i]; void *p; if (!part_hdr) continue; if (part_hdr->magic != SMEM_PART_MAGIC) { seq_printf(s, " part[%d]: incorrect magic\n", i); continue; } seq_printf(s, " part[%d]: (%d <-> %d) size: %d off: %08x\n", i, part_hdr->host0, part_hdr->host1, part_hdr->size, part_hdr->offset_free_uncached); p = (void *)part_hdr + sizeof(*part_hdr); while (p < (void *)part_hdr + part_hdr->offset_free_uncached) { struct smem_private_entry *entry = p; seq_printf(s, " [%d]: %s size: %d pd: %d\n", entry->item, (entry->canary == SMEM_PRIVATE_CANARY) ? "valid" : "invalid", entry->size, entry->padding_data); p += sizeof(*entry) + entry->padding_hdr + entry->size; } } hwspin_unlock_irqrestore(__smem->hwlock, &flags); }
struct qcom_glink *qcom_glink_smem_register(struct device *parent, struct device_node *node) { struct glink_smem_pipe *rx_pipe; struct glink_smem_pipe *tx_pipe; struct qcom_glink *glink; struct device *dev; u32 remote_pid; __le32 *descs; size_t size; int ret; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); dev->parent = parent; dev->of_node = node; dev->release = qcom_glink_smem_release; dev_set_name(dev, "%s:%s", node->parent->name, node->name); ret = device_register(dev); if (ret) { pr_err("failed to register glink edge\n"); put_device(dev); return ERR_PTR(ret); } ret = of_property_read_u32(dev->of_node, "qcom,remote-pid", &remote_pid); if (ret) { dev_err(dev, "failed to parse qcom,remote-pid\n"); goto err_put_dev; } rx_pipe = devm_kzalloc(dev, sizeof(*rx_pipe), GFP_KERNEL); tx_pipe = devm_kzalloc(dev, sizeof(*tx_pipe), GFP_KERNEL); if (!rx_pipe || !tx_pipe) { ret = -ENOMEM; goto err_put_dev; } ret = qcom_smem_alloc(remote_pid, SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, 32); if (ret && ret != -EEXIST) { dev_err(dev, "failed to allocate glink descriptors\n"); goto err_put_dev; } descs = qcom_smem_get(remote_pid, SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, &size); if (IS_ERR(descs)) { dev_err(dev, "failed to acquire xprt descriptor\n"); ret = PTR_ERR(descs); goto err_put_dev; } if (size != 32) { dev_err(dev, "glink descriptor of invalid size\n"); ret = -EINVAL; goto err_put_dev; } tx_pipe->tail = &descs[0]; tx_pipe->head = &descs[1]; rx_pipe->tail = &descs[2]; rx_pipe->head = &descs[3]; ret = qcom_smem_alloc(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0, SZ_16K); if (ret && ret != -EEXIST) { dev_err(dev, "failed to allocate TX fifo\n"); goto err_put_dev; } tx_pipe->fifo = qcom_smem_get(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0, &tx_pipe->native.length); if (IS_ERR(tx_pipe->fifo)) { dev_err(dev, "failed to acquire TX fifo\n"); ret = PTR_ERR(tx_pipe->fifo); goto err_put_dev; } rx_pipe->native.avail = glink_smem_rx_avail; rx_pipe->native.peak = glink_smem_rx_peak; rx_pipe->native.advance = glink_smem_rx_advance; rx_pipe->remote_pid = remote_pid; tx_pipe->native.avail = glink_smem_tx_avail; tx_pipe->native.write = glink_smem_tx_write; tx_pipe->remote_pid = remote_pid; *rx_pipe->tail = 0; *tx_pipe->head = 0; glink = qcom_glink_native_probe(dev, GLINK_FEATURE_INTENT_REUSE, &rx_pipe->native, &tx_pipe->native, false); if (IS_ERR(glink)) { ret = PTR_ERR(glink); goto err_put_dev; } return glink; err_put_dev: device_unregister(dev); return ERR_PTR(ret); }