static void ipaq_micro_trigger_tx(struct ipaq_micro *micro) { struct ipaq_micro_txdev *tx = µ->tx; struct ipaq_micro_msg *msg = micro->msg; int i, bp; u8 checksum; u32 val; bp = 0; tx->buf[bp++] = CHAR_SOF; checksum = ((msg->id & 0x0f) << 4) | (msg->tx_len & 0x0f); tx->buf[bp++] = checksum; for (i = 0; i < msg->tx_len; i++) { tx->buf[bp++] = msg->tx_data[i]; checksum += msg->tx_data[i]; } tx->buf[bp++] = checksum; tx->len = bp; tx->index = 0; print_hex_dump_debug("data: ", DUMP_PREFIX_OFFSET, 16, 1, tx->buf, tx->len, true); /* Enable interrupt */ val = readl(micro->base + UTCR3); val |= UTCR3_TIE; writel(val, micro->base + UTCR3); }
static int rave_sp_write(struct rave_sp *sp, const u8 *data, u8 data_size) { const size_t checksum_length = sp->variant->checksum->length; unsigned char frame[RAVE_SP_TX_BUFFER_SIZE]; unsigned char crc[RAVE_SP_CHECKSUM_SIZE]; unsigned char *dest = frame; size_t length; if (WARN_ON(checksum_length > sizeof(crc))) return -ENOMEM; if (WARN_ON(data_size > sizeof(frame))) return -ENOMEM; sp->variant->checksum->subroutine(data, data_size, crc); *dest++ = RAVE_SP_STX; dest = stuff(dest, data, data_size); dest = stuff(dest, crc, checksum_length); *dest++ = RAVE_SP_ETX; length = dest - frame; print_hex_dump_debug("rave-sp tx: ", DUMP_PREFIX_NONE, 16, 1, frame, length, false); return serdev_device_write(sp->serdev, frame, length, HZ); }
static int skl_set_module_format(struct skl_sst *ctx, struct skl_module_cfg *module_config, u16 *module_config_size, void **param_data) { u16 param_size; param_size = skl_get_module_param_size(ctx, module_config); *param_data = kzalloc(param_size, GFP_KERNEL); if (NULL == *param_data) return -ENOMEM; *module_config_size = param_size; switch (module_config->m_type) { case SKL_MODULE_TYPE_COPIER: skl_set_copier_format(ctx, module_config, *param_data); break; case SKL_MODULE_TYPE_SRCINT: skl_set_src_format(ctx, module_config, *param_data); break; case SKL_MODULE_TYPE_UPDWMIX: skl_set_updown_mixer_format(ctx, module_config, *param_data); break; case SKL_MODULE_TYPE_ALGO: skl_set_algo_format(ctx, module_config, *param_data); break; case SKL_MODULE_TYPE_BASE_OUTFMT: case SKL_MODULE_TYPE_MIC_SELECT: case SKL_MODULE_TYPE_KPB: skl_set_base_outfmt_format(ctx, module_config, *param_data); break; default: skl_set_base_module_format(ctx, module_config, *param_data); break; } dev_dbg(ctx->dev, "Module type=%d config size: %d bytes\n", module_config->id.module_id, param_size); print_hex_dump_debug("Module params:", DUMP_PREFIX_OFFSET, 8, 4, *param_data, param_size, false); return 0; }
static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf) { int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups); /* compress should have added space for header */ if (s > be16_to_cpu(hdr->group[0].padding)) { pr_err("Internal error: no space for header\n"); return -EINVAL; } memcpy(buf, hdr, s); print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0); return 0; }
static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core, struct hdmi_avi_infoframe *frame) { void __iomem *av_base = hdmi_av_base(core); u8 data[HDMI_INFOFRAME_SIZE(AVI)]; int i; hdmi_avi_infoframe_pack(frame, data, sizeof(data)); print_hex_dump_debug("AVI: ", DUMP_PREFIX_NONE, 16, 1, data, HDMI_INFOFRAME_SIZE(AVI), false); for (i = 0; i < sizeof(data); ++i) { hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_BASE + i * 4, data[i]); } }
static int wil_ethtoolops_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *cp) { struct wil6210_priv *wil = ndev_to_wil(ndev); int ret; wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n", cp->rx_coalesce_usecs, cp->tx_coalesce_usecs); if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { wil_dbg_misc(wil, "No IRQ coalescing in monitor mode\n"); return -EINVAL; } /* only @rx_coalesce_usecs and @tx_coalesce_usecs supported, * ignore other parameters */ if (cp->rx_coalesce_usecs > WIL6210_ITR_TRSH_MAX || cp->tx_coalesce_usecs > WIL6210_ITR_TRSH_MAX) goto out_bad; wil->tx_max_burst_duration = cp->tx_coalesce_usecs; wil->rx_max_burst_duration = cp->rx_coalesce_usecs; ret = wil_pm_runtime_get(wil); if (ret < 0) return ret; wil_configure_interrupt_moderation(wil); wil_pm_runtime_put(wil); return 0; out_bad: wil_dbg_misc(wil, "Unsupported coalescing params. Raw command:\n"); print_hex_dump_debug("DBG[MISC] coal ", DUMP_PREFIX_OFFSET, 16, 4, cp, sizeof(*cp), false); return -EINVAL; }
static int trf7970a_transmit(struct trf7970a *trf, struct sk_buff *skb, unsigned int len) { unsigned int timeout; int ret; print_hex_dump_debug("trf7970a tx data: ", DUMP_PREFIX_NONE, 16, 1, skb->data, len, false); ret = spi_write(trf->spi, skb->data, len); if (ret) { dev_err(trf->dev, "%s - Can't send tx data: %d\n", __func__, ret); return ret; } skb_pull(skb, len); if (skb->len > 0) { trf->state = TRF7970A_ST_WAIT_FOR_TX_FIFO; timeout = TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT; } else { if (trf->issue_eof) { trf->state = TRF7970A_ST_WAIT_TO_ISSUE_EOF; timeout = TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF; } else { trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA; timeout = trf->timeout; } } dev_dbg(trf->dev, "Setting timeout for %d ms, state: %d\n", timeout, trf->state); schedule_delayed_work(&trf->timeout_work, msecs_to_jiffies(timeout)); return 0; }
static int pn533_usb_send_frame(struct pn533 *dev, struct sk_buff *out) { struct pn533_usb_phy *phy = dev->phy; int rc; if (phy->priv == NULL) phy->priv = dev; phy->out_urb->transfer_buffer = out->data; phy->out_urb->transfer_buffer_length = out->len; print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1, out->data, out->len, false); rc = usb_submit_urb(phy->out_urb, GFP_KERNEL); if (rc) return rc; if (dev->protocol_type == PN533_PROTO_REQ_RESP) { /* request for response for sent packet directly */ rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC); if (rc) goto error; } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) { /* request for ACK if that's the case */ rc = pn533_submit_urb_for_ack(phy, GFP_KERNEL); if (rc) goto error; } return 0; error: usb_unlink_urb(phy->out_urb); return rc; }
int skl_ipc_init_instance(struct sst_generic_ipc *ipc, struct skl_ipc_init_instance_msg *msg, void *param_data) { struct skl_ipc_header header = {0}; u64 *ipc_header = (u64 *)(&header); int ret; u32 *buffer = (u32 *)param_data; /* param_block_size must be in dwords */ u16 param_block_size = msg->param_data_size / sizeof(u32); print_hex_dump_debug("Param data:", DUMP_PREFIX_NONE, 16, 4, buffer, param_block_size, false); header.primary = IPC_MSG_TARGET(IPC_MOD_MSG); header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST); header.primary |= IPC_GLB_TYPE(IPC_MOD_INIT_INSTANCE); header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id); header.primary |= IPC_MOD_ID(msg->module_id); header.extension = IPC_CORE_ID(msg->core_id); header.extension |= IPC_PPL_INSTANCE_ID(msg->ppl_instance_id); header.extension |= IPC_PARAM_BLOCK_SIZE(param_block_size); header.extension |= IPC_DOMAIN(msg->domain); dev_dbg(ipc->dev, "In %s primary =%x ext=%x\n", __func__, header.primary, header.extension); ret = sst_ipc_tx_message_wait(ipc, *ipc_header, param_data, msg->param_data_size, NULL, 0); if (ret < 0) { dev_err(ipc->dev, "ipc: init instance failed\n"); return ret; } return ret; }
static void trf7970a_send_upstream(struct trf7970a *trf) { u8 rssi; dev_kfree_skb_any(trf->tx_skb); trf->tx_skb = NULL; if (trf->rx_skb && !IS_ERR(trf->rx_skb) && !trf->aborting) print_hex_dump_debug("trf7970a rx data: ", DUMP_PREFIX_NONE, 16, 1, trf->rx_skb->data, trf->rx_skb->len, false); /* According to the manual it is "good form" to reset the fifo and * read the RSSI levels & oscillator status register here. It doesn't * explain why. */ trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET); trf7970a_read(trf, TRF7970A_RSSI_OSC_STATUS, &rssi); trf->state = TRF7970A_ST_IDLE; if (trf->aborting) { dev_dbg(trf->dev, "Abort process complete\n"); if (!IS_ERR(trf->rx_skb)) { kfree_skb(trf->rx_skb); trf->rx_skb = ERR_PTR(-ECANCELED); } trf->aborting = false; } trf->cb(trf->ddev, trf->cb_arg, trf->rx_skb); trf->rx_skb = NULL; }
static void hexdump(const unsigned char *buf, unsigned short len) { print_hex_dump_debug("", DUMP_PREFIX_OFFSET, 16, 1, buf, len, true); }
static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev, struct rpmsg_hdr *msg, unsigned int len) { struct rpmsg_endpoint *ept; struct scatterlist sg; int err; dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n", msg->src, msg->dst, msg->len, msg->flags, msg->reserved); #if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) print_hex_dump_debug("rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1, msg, sizeof(*msg) + msg->len, true); #endif /* * We currently use fixed-sized buffers, so trivially sanitize * the reported payload length. */ if (len > RPMSG_BUF_SIZE || msg->len > (len - sizeof(struct rpmsg_hdr))) { dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len); return -EINVAL; } /* use the dst addr to fetch the callback of the appropriate user */ mutex_lock(&vrp->endpoints_lock); ept = idr_find(&vrp->endpoints, msg->dst); /* let's make sure no one deallocates ept while we use it */ if (ept) kref_get(&ept->refcount); mutex_unlock(&vrp->endpoints_lock); if (ept) { /* make sure ept->cb doesn't go away while we use it */ mutex_lock(&ept->cb_lock); if (ept->cb) ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, msg->src); mutex_unlock(&ept->cb_lock); /* farewell, ept, we don't need you anymore */ kref_put(&ept->refcount, __ept_release); } else dev_warn(dev, "msg received with no recipient\n"); /* publish the real size of the buffer */ sg_init_one(&sg, msg, RPMSG_BUF_SIZE); /* add the buffer back to the remote processor's virtqueue */ err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, msg, GFP_KERNEL); if (err < 0) { dev_err(dev, "failed to add a virtqueue buffer: %d\n", err); return err; } return 0; }
/** * rpmsg_send_offchannel_raw() - send a message across to the remote processor * @rpdev: the rpmsg channel * @src: source address * @dst: destination address * @data: payload of message * @len: length of payload * @wait: indicates whether caller should block in case no TX buffers available * * This function is the base implementation for all of the rpmsg sending API. * * It will send @data of length @len to @dst, and say it's from @src. The * message will be sent to the remote processor which the @rpdev channel * belongs to. * * The message is sent using one of the TX buffers that are available for * communication with this remote processor. * * If @wait is true, the caller will be blocked until either a TX buffer is * available, or 15 seconds elapses (we don't want callers to * sleep indefinitely due to misbehaving remote processors), and in that * case -ERESTARTSYS is returned. The number '15' itself was picked * arbitrarily; there's little point in asking drivers to provide a timeout * value themselves. * * Otherwise, if @wait is false, and there are no TX buffers available, * the function will immediately fail, and -ENOMEM will be returned. * * Normally drivers shouldn't use this function directly; instead, drivers * should use the appropriate rpmsg_{try}send{to, _offchannel} API * (see include/linux/rpmsg.h). * * Returns 0 on success and an appropriate error value on failure. */ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst, void *data, int len, bool wait) { struct virtproc_info *vrp = rpdev->vrp; struct device *dev = &rpdev->dev; struct scatterlist sg; struct rpmsg_hdr *msg; int err; /* bcasting isn't allowed */ if (src == RPMSG_ADDR_ANY || dst == RPMSG_ADDR_ANY) { dev_err(dev, "invalid addr (src 0x%x, dst 0x%x)\n", src, dst); return -EINVAL; } /* * We currently use fixed-sized buffers, and therefore the payload * length is limited. * * One of the possible improvements here is either to support * user-provided buffers (and then we can also support zero-copy * messaging), or to improve the buffer allocator, to support * variable-length buffer sizes. */ if (len > RPMSG_BUF_SIZE - sizeof(struct rpmsg_hdr)) { dev_err(dev, "message is too big (%d)\n", len); return -EMSGSIZE; } /* grab a buffer */ msg = get_a_tx_buf(vrp); if (!msg && !wait) return -ENOMEM; /* no free buffer ? wait for one (but bail after 15 seconds) */ while (!msg) { /* enable "tx-complete" interrupts, if not already enabled */ rpmsg_upref_sleepers(vrp); /* * sleep until a free buffer is available or 15 secs elapse. * the timeout period is not configurable because there's * little point in asking drivers to specify that. * if later this happens to be required, it'd be easy to add. */ err = wait_event_interruptible_timeout(vrp->sendq, (msg = get_a_tx_buf(vrp)), msecs_to_jiffies(15000)); /* disable "tx-complete" interrupts if we're the last sleeper */ rpmsg_downref_sleepers(vrp); /* timeout ? */ if (!err) { dev_err(dev, "timeout waiting for a tx buffer\n"); return -ERESTARTSYS; } } msg->len = len; msg->flags = 0; msg->src = src; msg->dst = dst; msg->reserved = 0; memcpy(msg->data, data, len); dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n", msg->src, msg->dst, msg->len, msg->flags, msg->reserved); #if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) print_hex_dump_debug("rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1, msg, sizeof(*msg) + msg->len, true); #endif sg_init_one(&sg, msg, sizeof(*msg) + len); mutex_lock(&vrp->tx_lock); /* add message to the remote processor's virtqueue */ err = virtqueue_add_outbuf(vrp->svq, &sg, 1, msg, GFP_KERNEL); if (err) { /* * need to reclaim the buffer here, otherwise it's lost * (memory won't leak, but rpmsg won't use it again for TX). * this will wait for a buffer management overhaul. */ dev_err(dev, "virtqueue_add_outbuf failed: %d\n", err); goto out; } /* tell the remote processor it has a pending message to read */ virtqueue_kick(vrp->svq); out: mutex_unlock(&vrp->tx_lock); return err; }