static void socket_data_ready(struct sock *sk_ptr, int bytes) { unsigned long flags; struct diag_socket_info *info = NULL; if (!sk_ptr) { pr_err_ratelimited("diag: In %s, invalid sk_ptr", __func__); return; } info = (struct diag_socket_info *)(sk_ptr->sk_user_data); if (!info) { pr_err_ratelimited("diag: In %s, invalid info\n", __func__); return; } spin_lock_irqsave(&info->lock, flags); info->data_ready++; spin_unlock_irqrestore(&info->lock, flags); diag_ws_on_notify(); /* * Initialize read buffers for the servers. The servers must read data * first to get the address of its clients. */ if (!atomic_read(&info->opened) && info->port_type == PORT_TYPE_SERVER) diagfwd_buffers_init(info->fwd_ctxt); queue_work(info->wq, &(info->read_work)); wake_up_interruptible(&info->read_wait_q); return; }
static int rmnet_usb_ctrl_dmux(struct ctrl_pkt_list_elem *clist) { struct mux_hdr *hdr; size_t pad_len; size_t total_len; unsigned int mux_id; hdr = (struct mux_hdr *)clist->cpkt.data; pad_len = hdr->padding_info & MUX_CTRL_PADLEN_MASK; if (pad_len > MAX_PAD_BYTES(4)) { pr_err_ratelimited("%s: Invalid pad len %d\n", __func__, pad_len); return -EINVAL; } mux_id = hdr->mux_id; if (!mux_id || mux_id > insts_per_dev) { pr_err_ratelimited("%s: Invalid mux id %d\n", __func__, mux_id); return -EINVAL; } total_len = ntohs(hdr->pkt_len_w_padding); if (!total_len || !(total_len - pad_len)) { pr_err_ratelimited("%s: Invalid pkt length %d\n", __func__, total_len); return -EINVAL; } clist->cpkt.data_size = total_len - pad_len; return mux_id - 1; }
void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type) { int i = 0; unsigned long flags; struct diag_mempool_t *mempool = NULL; if (!driver || !buf) return; for (i = 0; i < NUM_MEMORY_POOLS; i++) { mempool = &diag_mempools[i]; if (pool_type != mempool->id) continue; if (!mempool->pool) { pr_err_ratelimited("diag: %s mempool is not initialized yet\n", mempool->name); break; } spin_lock_irqsave(&mempool->lock, flags); if (mempool->count > 0) { mempool_free(buf, mempool->pool); atomic_add(-1, (atomic_t *)&mempool->count); } else { pr_err_ratelimited("diag: Attempting to free items from %s mempool which is already empty\n", mempool->name); } spin_unlock_irqrestore(&mempool->lock, flags); break; } }
static int hsic_write(int id, unsigned char *buf, int len, int ctxt) { int err = 0; struct diag_hsic_info *ch = NULL; if (id < 0 || id >= NUM_HSIC_DEV) { pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__, id); return -EINVAL; } if (!buf || len <= 0) { pr_err_ratelimited("diag: In %s, ch %d, invalid buf %pK len %d\n", __func__, id, buf, len); return -EINVAL; } ch = &diag_hsic[id]; if (!ch->opened || !ch->enabled) { pr_debug_ratelimited("diag: In %s, ch %d is disabled. opened %d enabled: %d\n", __func__, id, ch->opened, ch->enabled); return -EIO; } err = diag_bridge_write(ch->id, buf, len); if (err) { pr_err_ratelimited("diag: cannot write to HSIC ch %d, err: %d\n", ch->id, err); } return err; }
static int rmnet_usb_data_dmux(struct sk_buff *skb, struct urb *rx_urb) { struct mux_hdr *hdr; size_t pad_len; size_t total_len; unsigned int mux_id; hdr = (struct mux_hdr *)skb->data; mux_id = hdr->mux_id; if (!mux_id || mux_id > no_rmnet_insts_per_dev) { pr_err_ratelimited("%s: Invalid data channel id %u.\n", __func__, mux_id); return -EINVAL; } pad_len = hdr->padding_info >> MUX_PAD_SHIFT; if (pad_len > MAX_PAD_BYTES(4)) { pr_err_ratelimited("%s: Invalid pad len %d\n", __func__, pad_len); return -EINVAL; } total_len = le16_to_cpu(hdr->pkt_len_w_padding); if (!total_len || !(total_len - pad_len)) { pr_err_ratelimited("%s: Invalid pkt length %d\n", __func__, total_len); return -EINVAL; } skb->data = (unsigned char *)(hdr + 1); skb_reset_tail_pointer(skb); rx_urb->actual_length = total_len - pad_len; return mux_id - 1; }
static void ksb_tomdm_work(struct work_struct *w) { struct ks_bridge *ksb = container_of(w, struct ks_bridge, to_mdm_work); struct data_pkt *pkt; unsigned long flags; struct urb *urb; int ret; spin_lock_irqsave(&ksb->lock, flags); while (!list_empty(&ksb->to_mdm_list) && test_bit(USB_DEV_CONNECTED, &ksb->flags)) { pkt = list_first_entry(&ksb->to_mdm_list, struct data_pkt, list); list_del_init(&pkt->list); spin_unlock_irqrestore(&ksb->lock, flags); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { pr_err_ratelimited("%s: unable to allocate urb", ksb->fs_dev.name); ksb_free_data_pkt(pkt); return; } ret = usb_autopm_get_interface(ksb->ifc); if (ret < 0 && ret != -EAGAIN && ret != -EACCES) { pr_err_ratelimited("%s: autopm_get failed:%d", ksb->fs_dev.name, ret); usb_free_urb(urb); ksb_free_data_pkt(pkt); return; } usb_fill_bulk_urb(urb, ksb->udev, ksb->out_pipe, pkt->buf, pkt->len, ksb_tx_cb, pkt); usb_anchor_urb(urb, &ksb->submitted); dbg_log_event(ksb, "S TX_URB", pkt->len, 0); atomic_inc(&ksb->tx_pending_cnt); ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { dev_err(&ksb->udev->dev, "out urb submission failed"); usb_unanchor_urb(urb); usb_free_urb(urb); ksb_free_data_pkt(pkt); usb_autopm_put_interface(ksb->ifc); atomic_dec(&ksb->tx_pending_cnt); wake_up(&ksb->pending_urb_wait); return; } usb_mark_last_busy(ksb->udev); usb_free_urb(urb); spin_lock_irqsave(&ksb->lock, flags); } spin_unlock_irqrestore(&ksb->lock, flags); }
static void hsic_read_work_fn(struct work_struct *work) { int err = 0; unsigned char *buf = NULL; struct diag_hsic_info *ch = container_of(work, struct diag_hsic_info, read_work); if (!ch || !ch->enabled || !ch->opened) return; do { buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE, ch->mempool); if (!buf) { err = -ENOMEM; break; } err = diag_bridge_read(ch->id, buf, DIAG_MDM_BUF_SIZE); if (err) { diagmem_free(driver, buf, ch->mempool); pr_err_ratelimited("diag: Unable to read from HSIC channel %d, err: %d\n", ch->id, err); break; } } while (buf); /* Read from the HSIC channel continously if the channel is present */ if (!err) queue_work(ch->hsic_wq, &ch->read_work); }
static long msm_ispif_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct ispif_device *ispif = (struct ispif_device *)v4l2_get_subdevdata(sd); switch (cmd) { case VIDIOC_MSM_ISPIF_CFG: return msm_ispif_cmd(sd, arg); case MSM_SD_NOTIFY_FREEZE: { ispif->ispif_sof_debug = 0; return 0; } case MSM_SD_SHUTDOWN: { struct ispif_device *ispif = (struct ispif_device *)v4l2_get_subdevdata(sd); if (ispif && ispif->base) msm_ispif_release(ispif); return 0; } default: pr_err_ratelimited("%s: invalid cmd 0x%x received\n", __func__, cmd); return -ENOIOCTLCMD; } }
int tls_sw_fallback_init(struct sock *sk, struct tls_offload_context_tx *offload_ctx, struct tls_crypto_info *crypto_info) { const u8 *key; int rc; offload_ctx->aead_send = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(offload_ctx->aead_send)) { rc = PTR_ERR(offload_ctx->aead_send); pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc); offload_ctx->aead_send = NULL; goto err_out; } key = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->key; rc = crypto_aead_setkey(offload_ctx->aead_send, key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); if (rc) goto free_aead; rc = crypto_aead_setauthsize(offload_ctx->aead_send, TLS_CIPHER_AES_GCM_128_TAG_SIZE); if (rc) goto free_aead; return 0; free_aead: crypto_free_aead(offload_ctx->aead_send); err_out: return rc; }
static long msm_ispif_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { #ifdef CONFIG_COMPAT void __user *up; if (is_compat_task()) { up = (void __user *)compat_ptr((unsigned long)arg); arg = up; } #endif switch (cmd) { case VIDIOC_MSM_ISPIF_CFG: return msm_ispif_cmd(sd, arg); case MSM_SD_SHUTDOWN: { struct ispif_device *ispif = (struct ispif_device *)v4l2_get_subdevdata(sd); if (ispif && ispif->base) msm_ispif_release(ispif); return 0; } default: pr_err_ratelimited("%s: invalid cmd 0x%x received\n", __func__, cmd); return -ENOIOCTLCMD; } }
static void ksb_rx_cb(struct urb *urb) { struct data_pkt *pkt = urb->context; struct ks_bridge *ksb = pkt->ctxt; dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length); pr_debug("status:%d actual:%d", urb->status, urb->actual_length); if (urb->status < 0) { if (urb->status != -ESHUTDOWN && urb->status != -ENOENT) pr_err_ratelimited("urb failed with err:%d", urb->status); ksb_free_data_pkt(pkt); return; } if (urb->actual_length == 0) { submit_one_urb(ksb, GFP_ATOMIC, pkt); return; } spin_lock(&ksb->lock); pkt->len = urb->actual_length; list_add_tail(&pkt->list, &ksb->to_ks_list); spin_unlock(&ksb->lock); /* wake up read thread */ wake_up(&ksb->ks_wait_q); }
static void diag_hsic_read_complete_callback(void *ctxt, char *buf, int buf_size, int actual_size) { int err = -2; if (!driver->hsic_ch) { /* * The hsic channel is closed. Return the buffer to * the pool. Do not send it on. */ diagmem_free(driver, buf, POOL_TYPE_HSIC); pr_debug("diag: In %s: driver->hsic_ch == 0, actual_size: %d\n", __func__, actual_size); return; } /* * Note that zero length is valid and still needs to be sent to * the USB only when we are logging data to the USB */ if ((actual_size > 0) || ((actual_size == 0) && (driver->logging_mode == USB_MODE))) { if (!buf) { pr_err("diag: Out of diagmem for HSIC\n"); } else { /* * Send data in buf to be written on the * appropriate device, e.g. USB MDM channel */ driver->write_len_mdm = actual_size; err = diag_device_write((void *)buf, HSIC_DATA, NULL); /* If an error, return buffer to the pool */ if (err) { diagmem_free(driver, buf, POOL_TYPE_HSIC); pr_err_ratelimited("diag: In %s, error calling diag_device_write, err: %d\n", __func__, err); } } } else { /* * The buffer has an error status associated with it. Do not * pass it on. Note that -ENOENT is sent when the diag bridge * is closed. */ diagmem_free(driver, buf, POOL_TYPE_HSIC); pr_debug("diag: In %s: error status: %d\n", __func__, actual_size); } /* * If for some reason there was no hsic data to write to the * mdm channel, set up another read */ if (err && ((driver->logging_mode == MEMORY_DEVICE_MODE) || (driver->usb_mdm_connected && !driver->hsic_suspend))) { queue_work(driver->diag_hsic_wq, &driver->diag_read_hsic_work); } }
int diag_remote_dev_read_done(int id, unsigned char *buf, int len) { int err = 0; struct diagfwd_bridge_info *ch = NULL; if (id < 0 || id >= NUM_REMOTE_DEV) return -EINVAL; ch = &bridge_info[id]; if (ch->type == DIAG_DATA_TYPE) { err = diag_mux_write(BRIDGE_TO_MUX(id), buf, len, id); if (ch->dev_ops && ch->dev_ops->queue_read) ch->dev_ops->queue_read(ch->ctxt); return err; } /* * For DCI channels copy to the internal buffer. Don't queue any * further reads. A read should be queued once we are done processing * the current packet */ if (len <= 0 || len > DIAG_MDM_BUF_SIZE) { pr_err_ratelimited("diag: Invalid len %d in %s, ch: %s\n", len, __func__, ch->name); return -EINVAL; } ch->dci_read_ptr = buf; memcpy(ch->dci_read_buf, buf, len); ch->dci_read_len = len; queue_work(ch->dci_wq, &ch->dci_read_work); return 0; }
static long msm_sensor_init_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { int32_t rc = 0; struct msm_sensor_init_t *s_init = v4l2_get_subdevdata(sd); CDBG("Enter"); /* Validate input parameters */ if (!s_init) { pr_err("failed: s_init %p", s_init); return -EINVAL; } switch (cmd) { case VIDIOC_MSM_SENSOR_INIT_CFG: rc = msm_sensor_driver_cmd(s_init, arg); break; default: pr_err_ratelimited("default\n"); break; } return 0; }
static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev, struct xenvif *vif) { int err = 0; char *node; unsigned maxlen = strlen(dev->otherend) + sizeof("/request-multicast-control"); if (vif->mcast_ctrl_watch.node) { pr_err_ratelimited("Watch is already registered\n"); return -EADDRINUSE; } node = kmalloc(maxlen, GFP_KERNEL); if (!node) { pr_err("Failed to allocate memory for watch\n"); return -ENOMEM; } snprintf(node, maxlen, "%s/request-multicast-control", dev->otherend); vif->mcast_ctrl_watch.node = node; vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed; err = register_xenbus_watch(&vif->mcast_ctrl_watch); if (err) { pr_err("Failed to set watcher %s\n", vif->mcast_ctrl_watch.node); kfree(node); vif->mcast_ctrl_watch.node = NULL; vif->mcast_ctrl_watch.callback = NULL; } return err; }
static void usb_read_work_fn(struct work_struct *work) { unsigned long flags; struct diag_request *req = NULL; struct diag_usb_info *ch = container_of(work, struct diag_usb_info, read_work); if (!ch) return; if (!ch->connected || !ch->enabled || ch->read_pending) { pr_debug_ratelimited("diag: Discarding USB read, ch: %s connected: %d, enabled: %d, pending: %d\n", ch->name, ch->connected, ch->enabled, ch->read_pending); return; } spin_lock_irqsave(&ch->lock, flags); req = ch->read_ptr; if (req) { ch->read_pending = 1; req->buf = ch->read_buf; req->length = USB_MAX_OUT_BUF; usb_diag_read(ch->hdl, req); } else { pr_err_ratelimited("diag: In %s invalid read req\n", __func__); } spin_unlock_irqrestore(&ch->lock, flags); }
static long msm_led_flash_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct msm_led_flash_ctrl_t *fctrl = NULL; void __user *argp = (void __user *)arg; if (!sd) { pr_err("sd NULL\n"); return -EINVAL; } fctrl = v4l2_get_subdevdata(sd); if (!fctrl) { pr_err("fctrl NULL\n"); return -EINVAL; } switch (cmd) { case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID: return fctrl->func_tbl->flash_get_subdev_id(fctrl, argp); case VIDIOC_MSM_FLASH_LED_DATA_CFG: return fctrl->func_tbl->flash_led_config(fctrl, argp); case MSM_SD_SHUTDOWN: *(int *)argp = MSM_CAMERA_LED_RELEASE; return fctrl->func_tbl->flash_led_config(fctrl, argp); default: pr_err_ratelimited("invalid cmd %d\n", cmd); return -ENOIOCTLCMD; } }
static long msm_csid_subdev_ioctl32(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { int rc = -ENOIOCTLCMD; struct csid_device *csid_dev = v4l2_get_subdevdata(sd); mutex_lock(&csid_dev->mutex); CDBG("%s:%d id %d\n", __func__, __LINE__, csid_dev->pdev->id); switch (cmd) { case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID: rc = msm_csid_get_subdev_id(csid_dev, arg); break; case VIDIOC_MSM_CSID_IO_CFG32: rc = msm_csid_cmd32(csid_dev, arg); break; case VIDIOC_MSM_CSID_RELEASE: case MSM_SD_SHUTDOWN: rc = msm_csid_release(csid_dev); break; default: pr_err_ratelimited("%s: command not found\n", __func__); } CDBG("%s:%d\n", __func__, __LINE__); mutex_unlock(&csid_dev->mutex); return rc; }
static void diag_hsic_read_complete(void *ctxt, char *buf, int len, int actual_size) { int err = 0; int index = (int)(uintptr_t)ctxt; struct diag_hsic_info *ch = NULL; if (index < 0 || index >= NUM_HSIC_DEV) { pr_err_ratelimited("diag: In %s, invalid HSIC index %d\n", __func__, index); return; } ch = &diag_hsic[index]; /* * Don't pass on the buffer if the channel is closed when a pending read * completes. Also, actual size can be negative error codes - do not * pass on the buffer. */ if (!ch->opened || actual_size <= 0) goto fail; err = diag_remote_dev_read_done(ch->dev_id, buf, actual_size); if (err) goto fail; return; fail: diagmem_free(driver, buf, ch->mempool); queue_work(ch->hsic_wq, &ch->read_work); return; }
static void qpnpint_irq_mask(struct irq_data *d) { struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d); struct q_chip_data *chip_d = irq_d->chip_d; struct q_perip_data *per_d = irq_d->per_d; int rc; pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq); if (!chip_d->cb) { pr_warn_ratelimited("No arbiter on bus=%u slave=%u offset=%u\n", chip_d->bus_nr, irq_d->spmi_slave, irq_d->spmi_offset); return; } qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask); per_d->int_en &= ~irq_d->mask_shift; rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR, (u8 *)&irq_d->mask_shift, 1); if (rc) { pr_err_ratelimited("spmi failure on irq %d\n", d->irq); return; } pr_debug("done hwirq %lu irq: %d\n", d->hwirq, d->irq); }
static inline void dbg_save_state(int cpu) { int i, j; i = cpu * MAX_DBG_REGS; switch (dbg.arch) { case ARM_DEBUG_ARCH_V8: dbg_write(OSLOCK_MAGIC, DBGOSLAR); isb(); dbg.state[i++] = dbg_read(DBGDSCRext); for (j = 0; j < dbg.nr_bp; j++) i = dbg_read_arch32_bxr(dbg.state, i, j); for (j = 0; j < dbg.nr_wp; j++) i = dbg_read_arch32_wxr(dbg.state, i, j); dbg.state[i++] = dbg_read(DBGDCCINT); dbg.state[i++] = dbg_read(DBGCLAIMCLR); dbg.state[i++] = dbg_read(DBGOSECCR); dbg.state[i++] = dbg_read(DBGDTRRXext); dbg.state[i++] = dbg_read(DBGDTRTXext); isb(); dbg_write(0x1, DBGOSDLR); isb(); break; default: pr_err_ratelimited("unsupported dbg arch %d in %s\n", dbg.arch, __func__); } }
static inline void dbg_save_state(int cpu) { int i, j; i = cpu * MAX_DBG_REGS; switch (dbg.arch) { case ARM_DEBUG_ARCH_V8: dbg_write(0x1, OSLAR_EL1); isb(); dbg.state[i++] = (uint32_t)dbg_readl(MDSCR_EL1); for (j = 0; j < dbg.nr_bp; j++) i = dbg_read_arch64_bxr((uint64_t *)dbg.state, i, j); for (j = 0; j < dbg.nr_wp; j++) i = dbg_read_arch64_wxr((uint64_t *)dbg.state, i, j); dbg.state[i++] = (uint32_t)dbg_readl(MDCCINT_EL1); dbg.state[i++] = (uint32_t)dbg_readl(DBGCLAIMCLR_EL1); dbg.state[i++] = (uint32_t)dbg_readl(OSECCR_EL1); dbg.state[i++] = (uint32_t)dbg_readl(OSDTRRX_EL1); dbg.state[i++] = (uint32_t)dbg_readl(OSDTRTX_EL1); isb(); dbg_write(0x1, OSDLR_EL1); isb(); break; default: pr_err_ratelimited("unsupported dbg arch %d in %s\n", dbg.arch, __func__); } }
static void ksb_tx_cb(struct urb *urb) { struct data_pkt *pkt = urb->context; struct ks_bridge *ksb = pkt->ctxt; dbg_log_event(ksb, "C TX_URB", urb->status, 0); #if 0 dev_dbg(&ksb->udev->dev, "status:%d", urb->status); #endif if (test_bit(USB_DEV_CONNECTED, &ksb->flags)) usb_autopm_put_interface_async(ksb->ifc); if (urb->status < 0) pr_err_ratelimited("%s: urb failed with err:%d", ksb->fs_dev.name, urb->status); if ((ksb->ifc->cur_altsetting->desc.bInterfaceNumber == 2)) dev_info(ksb->fs_dev.this_device, "write: %d bytes", urb->actual_length); ksb_free_data_pkt(pkt); atomic_dec(&ksb->tx_pending_cnt); wake_up(&ksb->pending_urb_wait); }
static void ksb_start_rx_work(struct work_struct *w) { struct ks_bridge *ksb = container_of(w, struct ks_bridge, start_rx_work); struct data_pkt *pkt; struct urb *urb; int i = 0; int ret; bool put = true; ret = usb_autopm_get_interface(ksb->ifc); if (ret < 0) { if (ret != -EAGAIN && ret != -EACCES) { pr_err_ratelimited("autopm_get failed:%d", ret); return; } put = false; } for (i = 0; i < NO_RX_REQS; i++) { if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) break; pkt = ksb_alloc_data_pkt(MAX_DATA_PKT_SIZE, GFP_KERNEL, ksb); if (IS_ERR(pkt)) { pr_err("unable to allocate data pkt"); break; } urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { pr_err("unable to allocate urb"); ksb_free_data_pkt(pkt); break; } usb_fill_bulk_urb(urb, ksb->udev, ksb->in_pipe, pkt->buf, pkt->len, ksb_rx_cb, pkt); usb_anchor_urb(urb, &ksb->submitted); dbg_log_event(ksb, "S RX_URB", pkt->len, 0); atomic_inc(&ksb->rx_pending_cnt); ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { pr_err("in urb submission failed"); usb_unanchor_urb(urb); usb_free_urb(urb); ksb_free_data_pkt(pkt); atomic_dec(&ksb->rx_pending_cnt); wake_up(&ksb->pending_urb_wait); break; } usb_free_urb(urb); } if (put) usb_autopm_put_interface_async(ksb->ifc); }
static struct dst_entry *rxe_find_route6(struct net_device *ndev, struct in6_addr *saddr, struct in6_addr *daddr) { struct dst_entry *ndst; struct flowi6 fl6 = { { 0 } }; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = ndev->ifindex; memcpy(&fl6.saddr, saddr, sizeof(*saddr)); memcpy(&fl6.daddr, daddr, sizeof(*daddr)); fl6.flowi6_proto = IPPROTO_UDP; if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk), recv_sockets.sk6->sk, &ndst, &fl6))) { pr_err_ratelimited("no route to %pI6\n", daddr); goto put; } if (unlikely(ndst->error)) { pr_err("no route to %pI6\n", daddr); goto put; } return ndst; put: dst_release(ndst); return NULL; }
int diag_usb_write(int id, unsigned char *buf, int len, int ctxt) { int err = 0; struct diag_request *req = NULL; struct diag_usb_info *usb_info = NULL; if (id < 0 || id >= NUM_DIAG_USB_DEV) { pr_err_ratelimited("diag: In %s, Incorrect id %d\n", __func__, id); return -EINVAL; } usb_info = &diag_usb[id]; req = diagmem_alloc(driver, sizeof(struct diag_request), usb_info->mempool); if (!req) { /* * This should never happen. It either means that we are * trying to write more buffers than the max supported by * this particualar diag USB channel at any given instance, * or the previous write ptrs are stuck in the USB layer. */ pr_err_ratelimited("diag: In %s, cannot retrieve USB write ptrs for USB channel %s\n", __func__, usb_info->name); return -ENOMEM; } req->buf = buf; req->length = len; req->context = (void *)(uintptr_t)ctxt; if (!usb_info->hdl || !usb_info->connected) { pr_debug_ratelimited("diag: USB ch %s is not connected\n", usb_info->name); diagmem_free(driver, req, usb_info->mempool); return -ENODEV; } err = usb_diag_write(usb_info->hdl, req); if (err) { pr_err_ratelimited("diag: In %s, error writing to usb channel %s, err: %d\n", __func__, usb_info->name, err); diagmem_free(driver, req, usb_info->mempool); } return err; }
static void ksb_rx_cb(struct urb *urb) { struct data_pkt *pkt = urb->context; struct ks_bridge *ksb = pkt->ctxt; bool wakeup = true; usb_mark_last_busy(ksb->udev); dbg_log_event(ksb, "C RX_URB", urb->status, urb->actual_length); #if 0 dev_dbg(&ksb->udev->dev, "status:%d actual:%d", urb->status, urb->actual_length); #endif /*non zero len of data received while unlinking urb*/ if (urb->status == -ENOENT && (urb->actual_length > 0)) { /* * If we wakeup the reader process now, it may * queue the URB before its reject flag gets * cleared. */ wakeup = false; goto add_to_list; } if (urb->status < 0) { if (urb->status != -ESHUTDOWN && urb->status != -ENOENT && urb->status != -EPROTO) pr_err_ratelimited("%s: urb failed with err:%d", ksb->fs_dev.name, urb->status); ksb_free_data_pkt(pkt); goto done; } if (urb->actual_length == 0) { submit_one_urb(ksb, GFP_ATOMIC, pkt); goto done; } if (urb->actual_length == 48) { pr_info("%s: usage=%d, child=%d\n", __func__, atomic_read(&ksb->udev->dev.power.usage_count), atomic_read(&ksb->udev->dev.power.child_count)); } add_to_list: spin_lock(&ksb->lock); pkt->len = urb->actual_length; list_add_tail(&pkt->list, &ksb->to_ks_list); spin_unlock(&ksb->lock); /* wake up read thread */ if (wakeup) wake_up(&ksb->ks_wait_q); done: atomic_dec(&ksb->rx_pending_cnt); wake_up(&ksb->pending_urb_wait); }
/* Called after the asychronous usb_diag_read() on mdm channel is complete */ static int diagfwd_read_complete_bridge(struct diag_request *diag_read_ptr) { /* The read of the usb driver on the mdm (not hsic) has completed */ driver->in_busy_hsic_read_on_device = 0; driver->read_len_mdm = diag_read_ptr->actual; if (driver->diag_smux_enabled) { diagfwd_read_complete_smux(); return 0; } /* If SMUX not enabled, check for HSIC */ if (!driver->hsic_ch) { pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__); return 0; } /* * The read of the usb driver on the mdm channel has completed. * If there is no write on the hsic in progress, check if the * read has data to pass on to the hsic. If so, pass the usb * mdm data on to the hsic. */ if (!driver->in_busy_hsic_write && driver->usb_buf_mdm_out && (driver->read_len_mdm > 0)) { /* * Initiate the hsic write. The hsic write is * asynchronous. When complete the write * complete callback function will be called */ int err; driver->in_busy_hsic_write = 1; err = diag_bridge_write(driver->usb_buf_mdm_out, driver->read_len_mdm); if (err) { pr_err_ratelimited("DIAG: mdm data on hsic write err: %d\n", err); /* * If the error is recoverable, then clear * the write flag, so we will resubmit a * write on the next frame. Otherwise, don't * resubmit a write on the next frame. */ if ((-ENODEV) != err) driver->in_busy_hsic_write = 0; } } /* * If there is no write of the usb mdm data on the * hsic channel */ if (!driver->in_busy_hsic_write) queue_work(driver->diag_bridge_wq, &driver->diag_read_mdm_work); return 0; }
static int hsic_queue_read(int id) { if (id < 0 || id >= NUM_HSIC_DEV) { pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__, id); return -EINVAL; } queue_work(diag_hsic[id].hsic_wq, &(diag_hsic[id].read_work)); return 0; }
int diag_usb_queue_read(int id) { if (id < 0 || id >= NUM_DIAG_USB_DEV) { pr_err_ratelimited("diag: In %s, Incorrect id %d\n", __func__, id); return -EINVAL; } queue_work(diag_usb[id].usb_wq, &(diag_usb[id].read_work)); return 0; }