static void data_bridge_write_cb(struct urb *urb)
{
	struct sk_buff		*skb = urb->context;
	struct timestamp_info	*info = (struct timestamp_info *)skb->cb;
	struct data_bridge	*dev = info->dev;
	struct bridge		*brdg = dev->brdg;
	int			pending;

	pr_debug("%s: dev:%p\n", __func__, dev);

	switch (urb->status) {
	case 0: /*success*/
		dev->to_modem++;
		dev->tx_num_of_bytes += skb->len;
		dbg_timestamp("UL", skb);
		break;
	case -EPROTO:
		dev->err = -EPROTO;
		break;
	case -EPIPE:
		set_bit(TX_HALT, &dev->flags);
		dev_err(&dev->intf->dev, "%s: epout halted\n", __func__);
		schedule_work(&dev->kevent);
		/* FALLTHROUGH */
	case -ESHUTDOWN:
	case -ENOENT: /* suspended */
	case -ECONNRESET: /* unplug */
	case -EOVERFLOW: /*babble error*/
		/* FALLTHROUGH */
	default:
		pr_debug_ratelimited("%s: non zero urb status = %d\n",
					__func__, urb->status);
	}

	usb_free_urb(urb);
	dev_kfree_skb_any(skb);

	pending = atomic_dec_return(&dev->pending_txurbs);

	/*flow ctrl*/
	if (brdg && fctrl_support && pending <= fctrl_dis_thld &&
		test_and_clear_bit(TX_THROTTLED, &brdg->flags)) {
		pr_debug_ratelimited("%s: disable flow ctrl: pend urbs:%u\n",
			__func__, pending);
		dev->tx_unthrottled_cnt++;
		if (brdg->ops.unthrottle_tx)
			brdg->ops.unthrottle_tx(brdg->ctx);
	}

	/* if we are here after device disconnect
	 * usb_unbind_interface() takes care of
	 * residual pm_autopm_get_interface_* calls
	 */
	if (urb->dev->state != USB_STATE_NOTATTACHED)
		usb_autopm_put_interface_async(dev->intf);
}
Esempio n. 2
0
static void data_bridge_write_cb(struct urb *urb)
{
	struct sk_buff		*skb = urb->context;
	struct timestamp_info	*info = (struct timestamp_info *)skb->cb;
	struct data_bridge	*dev = info->dev;
	struct bridge		*brdg = dev->brdg;
	int			pending;

	pr_debug("%s: dev:%p\n", __func__, dev);

	switch (urb->status) {
	case 0: 
		dbg_timestamp("UL", skb);
		break;
	case -EPROTO:
		dev->err = -EPROTO;
		break;
	case -EPIPE:
		set_bit(TX_HALT, &dev->flags);
		dev_err(&dev->intf->dev, "%s: epout halted\n", __func__);
		schedule_work(&dev->kevent);
		
	case -ESHUTDOWN:
	case -ENOENT: 
	case -ECONNRESET: 
	case -EOVERFLOW: 
		
	default:
		pr_debug_ratelimited("%s: non zero urb status = %d\n",
					__func__, urb->status);
	}

	usb_free_urb(urb);
	dev_kfree_skb_any(skb);

	pending = atomic_dec_return(&dev->pending_txurbs);

	
	if (brdg && fctrl_support && pending <= fctrl_dis_thld &&
		test_and_clear_bit(TX_THROTTLED, &brdg->flags)) {
		pr_debug_ratelimited("%s: disable flow ctrl: pend urbs:%u\n",
			__func__, pending);
		dev->tx_unthrottled_cnt++;
		if (brdg->ops.unthrottle_tx)
			brdg->ops.unthrottle_tx(brdg->ctx);
	}

	if (urb->dev->state != USB_STATE_NOTATTACHED)
		usb_autopm_put_interface_async(dev->intf);
}
static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
{
	caam_congested = congested;

	if (congested) {
#ifdef CONFIG_DEBUG_FS
		times_congested++;
#endif
		pr_debug_ratelimited("CAAM entered congestion\n");

	} else {
		pr_debug_ratelimited("CAAM exited congestion\n");
	}
}
Esempio n. 4
0
static void usb_read_work_fn(struct work_struct *work)
{
	unsigned long flags;
	struct diag_request *req = NULL;
	struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
						read_work);
	if (!ch)
		return;

	if (!ch->connected || !ch->enabled || ch->read_pending) {
		pr_debug_ratelimited("diag: Discarding USB read, ch: %s connected: %d, enabled: %d, pending: %d\n",
				     ch->name, ch->connected, ch->enabled,
				     ch->read_pending);
		return;
	}

	spin_lock_irqsave(&ch->lock, flags);
	req = ch->read_ptr;
	if (req) {
		ch->read_pending = 1;
		req->buf = ch->read_buf;
		req->length = USB_MAX_OUT_BUF;
		usb_diag_read(ch->hdl, req);
	} else {
		pr_err_ratelimited("diag: In %s invalid read req\n", __func__);
	}
	spin_unlock_irqrestore(&ch->lock, flags);
}
static void data_bridge_read_cb(struct urb *urb)
{
	struct bridge		*brdg;
	struct sk_buff		*skb = urb->context;
	struct timestamp_info	*info = (struct timestamp_info *)skb->cb;
	struct data_bridge	*dev = info->dev;
	bool			queue = 0;

	brdg = dev->brdg;
	skb_put(skb, urb->actual_length);

	switch (urb->status) {
	case -ENOENT: /* suspended */
	case 0: /* success */
		queue = 1;
		info->rx_done = get_timestamp();
		spin_lock(&dev->rx_done.lock);
		__skb_queue_tail(&dev->rx_done, skb);
		spin_unlock(&dev->rx_done.lock);
#ifdef CONFIG_MDM_HSIC_PM
		/* wakelock for fast dormancy */
		if (urb->actual_length)
			fast_dormancy_wakelock(rmnet_pm_dev);
#endif
		break;

	/*do not resubmit*/
	case -EPIPE:
		set_bit(RX_HALT, &dev->flags);
		dev_err(&dev->udev->dev, "%s: epout halted\n", __func__);
		schedule_work(&dev->kevent);
		/* FALLTHROUGH */
	case -ESHUTDOWN:
	case -ECONNRESET: /* unplug */
	case -EPROTO:
		dev_kfree_skb_any(skb);
		break;

	/*resubmit */
	case -EOVERFLOW: /*babble error*/
	default:
		queue = 1;
		dev_kfree_skb_any(skb);
		pr_debug_ratelimited("%s: non zero urb status = %d\n",
			__func__, urb->status);
		break;
	}

	spin_lock(&dev->rx_done.lock);
	urb->context = NULL;
	list_add_tail(&urb->urb_list, &dev->rx_idle);
	spin_unlock(&dev->rx_done.lock);

	/* during suspend handle rx packet, but do not queue rx work */
	if (urb->status == -ENOENT)
		return;

	if (queue)
		queue_work(dev->wq, &dev->process_rx_w);
}
static int hsic_write(int id, unsigned char *buf, int len, int ctxt)
{
	int err = 0;
	struct diag_hsic_info *ch = NULL;

	if (id < 0 || id >= NUM_HSIC_DEV) {
		pr_err_ratelimited("diag: In %s, invalid index %d\n",
				   __func__, id);
		return -EINVAL;
	}
	if (!buf || len <= 0) {
		pr_err_ratelimited("diag: In %s, ch %d, invalid buf %pK len %d\n",
				   __func__, id, buf, len);
		return -EINVAL;
	}

	ch = &diag_hsic[id];
	if (!ch->opened || !ch->enabled) {
		pr_debug_ratelimited("diag: In %s, ch %d is disabled. opened %d enabled: %d\n",
				     __func__, id, ch->opened, ch->enabled);
		return -EIO;
	}

	err = diag_bridge_write(ch->id, buf, len);
	if (err) {
		pr_err_ratelimited("diag: cannot write to HSIC ch %d, err: %d\n",
				   ch->id, err);
	}
	return err;
}
Esempio n. 7
0
static void ctrl_write_callback(struct urb *urb)
{
#ifdef HTC_DEBUG_QMI_STUCK
	struct ctrl_write_context *context = urb->context;
	struct rmnet_ctrl_dev	*dev = context->dev;
#else 
	struct rmnet_ctrl_dev	*dev = urb->context;
#endif 

#ifdef HTC_DEBUG_QMI_STUCK
	del_timer(&context->timer);

	if (unlikely(time_is_before_jiffies(context->start_jiffies + HZ)))
		pr_err("[%s] urb %p takes %d msec to complete.\n", __func__,
			urb, jiffies_to_msecs(jiffies - context->start_jiffies));
#endif	
	if (urb->status) {
		dev->tx_ctrl_err_cnt++;
		pr_debug_ratelimited("Write status/size %d/%d\n",
				urb->status, urb->actual_length);
	}

#ifdef HTC_LOG_RMNET_USB_CTRL
	log_rmnet_usb_ctrl_event(dev->intf, "Tx cb", urb->actual_length);
#endif	

	kfree(urb->setup_packet);
	kfree(urb->transfer_buffer);
	usb_free_urb(urb);
	usb_autopm_put_interface_async(dev->intf);
#ifdef HTC_DEBUG_QMI_STUCK
	kfree(context);
#endif	
}
Esempio n. 8
0
static int uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
{
	bool fail;
	u8 tmp = 0, val = 0;

	fail = lowpan_fetch_skb(skb, &tmp, sizeof(tmp));

	if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
		pr_debug("UDP header uncompression\n");
		switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
		case LOWPAN_NHC_UDP_CS_P_00:
			fail |= lowpan_fetch_skb(skb, &uh->source,
						 sizeof(uh->source));
			fail |= lowpan_fetch_skb(skb, &uh->dest,
						 sizeof(uh->dest));
			break;
		case LOWPAN_NHC_UDP_CS_P_01:
			fail |= lowpan_fetch_skb(skb, &uh->source,
						 sizeof(uh->source));
			fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
			uh->dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
			break;
		case LOWPAN_NHC_UDP_CS_P_10:
			fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
			uh->source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
			fail |= lowpan_fetch_skb(skb, &uh->dest,
						 sizeof(uh->dest));
			break;
		case LOWPAN_NHC_UDP_CS_P_11:
			fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
			uh->source = htons(LOWPAN_NHC_UDP_4BIT_PORT +
					   (val >> 4));
			uh->dest = htons(LOWPAN_NHC_UDP_4BIT_PORT +
					 (val & 0x0f));
			break;
		default:
			pr_debug("ERROR: unknown UDP format\n");
			goto err;
		}

		pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
			 ntohs(uh->source), ntohs(uh->dest));

		/* checksum */
		if (tmp & LOWPAN_NHC_UDP_CS_C) {
			pr_debug_ratelimited("checksum elided currently not supported\n");
			goto err;
		} else {
			fail |= lowpan_fetch_skb(skb, &uh->check,
						 sizeof(uh->check));
		}

		/* UDP length needs to be infered from the lower layers
		 * here, we obtain the hint from the remaining size of the
		 * frame
		 */
		uh->len = htons(skb->len + sizeof(struct udphdr));
		pr_debug("uncompressed UDP length: src = %d", ntohs(uh->len));
	} else {
Esempio n. 9
0
static void data_bridge_write_cb(struct urb *urb)
{
	struct sk_buff		*skb = urb->context;
	struct data_bridge	*dev = *(struct data_bridge **)skb->cb;
	struct bridge		*brdg = dev->brdg;
	int			pending;

	pr_debug("%s: dev:%p\n", __func__, dev);

	switch (urb->status) {
	case 0: /*success*/
		break;
	case -EPIPE:
		set_bit(TX_HALT, &dev->flags);
		dev_err(&dev->udev->dev, "%s: epout halted\n", __func__);
		schedule_work(&dev->kevent);
		/* FALLTHROUGH */
	case -ESHUTDOWN:
	case -ENOENT: /* suspended */
	case -ECONNRESET: /* unplug */
	case -EOVERFLOW: /*babble error*/
		/* FALLTHROUGH */
	default:
		pr_debug_ratelimited("%s: non zero urb status = %d\n",
					__func__, urb->status);
	}

	usb_free_urb(urb);
	dev_kfree_skb_any(skb);

	pending = atomic_dec_return(&dev->pending_txurbs);

	/*flow ctrl*/
	if (brdg && fctrl_support && pending <= fctrl_dis_thld &&
		test_and_clear_bit(TX_THROTTLED, &brdg->flags)) {
		pr_debug_ratelimited("%s: disable flow ctrl: pend urbs:%u\n",
			__func__, pending);
		dev->tx_unthrottled_cnt++;
		if (brdg->ops.unthrottle_tx)
			brdg->ops.unthrottle_tx(brdg->ctx);
	}

	usb_autopm_put_interface_async(dev->intf);
}
static void data_bridge_read_cb(struct urb *urb)
{
	struct bridge		*brdg;
	struct sk_buff		*skb = urb->context;
	struct timestamp_info	*info = (struct timestamp_info *)skb->cb;
	struct data_bridge	*dev = info->dev;
	bool			queue = 0;

	/*usb device disconnect*/
	if (urb->dev->state == USB_STATE_NOTATTACHED)
		urb->status = -ECONNRESET;

	brdg = dev->brdg;
	skb_put(skb, urb->actual_length);

	switch (urb->status) {
	case 0: /* success */
		queue = 1;
		info->rx_done = get_timestamp();
		spin_lock(&dev->rx_done.lock);
		__skb_queue_tail(&dev->rx_done, skb);
		spin_unlock(&dev->rx_done.lock);
		break;

	/*do not resubmit*/
	case -EPIPE:
		set_bit(RX_HALT, &dev->flags);
		dev_err(&dev->intf->dev, "%s: epout halted\n", __func__);
		schedule_work(&dev->kevent);
		/* FALLTHROUGH */
	case -ESHUTDOWN:
	case -ENOENT: /* suspended */
	case -ECONNRESET: /* unplug */
	case -EPROTO:
		dev_kfree_skb_any(skb);
		break;

	/*resubmit */
	case -EOVERFLOW: /*babble error*/
	default:
		queue = 1;
		dev_kfree_skb_any(skb);
		pr_debug_ratelimited("%s: non zero urb status = %d\n",
			__func__, urb->status);
		break;
	}

	spin_lock(&dev->rx_done.lock);
	list_add_tail(&urb->urb_list, &dev->rx_idle);
	spin_unlock(&dev->rx_done.lock);

	if (queue)
		queue_work(dev->wq, &dev->process_rx_w);
}
Esempio n. 11
0
static void ctrl_write_callback(struct urb *urb)
{
	struct rmnet_ctrl_dev	*dev = urb->context;

	if (urb->status) {
		dev->tx_ctrl_err_cnt++;
		pr_debug_ratelimited("Write status/size %d/%d\n",
				urb->status, urb->actual_length);
	}

	kfree(urb->setup_packet);
	kfree(urb->transfer_buffer);
	usb_free_urb(urb);
	usb_autopm_put_interface_async(dev->intf);
}
static int ghsuart_ctrl_receive(void *dev, void *buf, size_t actual)
{
	struct ghsuart_ctrl_port	*port = dev;
	int retval = 0;

	pr_debug_ratelimited("%s: read complete bytes read: %d\n",
			__func__, actual);

	/* send it to USB here */
	if (port && port->send_cpkt_response) {
		retval = port->send_cpkt_response(port->port_usb, buf, actual);
		port->to_host++;
	}
	kfree(buf);
	return retval;
}
int diag_usb_write(int id, unsigned char *buf, int len, int ctxt)
{
    int err = 0;
    struct diag_request *req = NULL;
    struct diag_usb_info *usb_info = NULL;

    if (id < 0 || id >= NUM_DIAG_USB_DEV) {
        pr_err_ratelimited("diag: In %s, Incorrect id %d\n",
                           __func__, id);
        return -EINVAL;
    }

    usb_info = &diag_usb[id];

    req = diagmem_alloc(driver, sizeof(struct diag_request),
                        usb_info->mempool);
    if (!req) {
        /*
         * This should never happen. It either means that we are
         * trying to write more buffers than the max supported by
         * this particualar diag USB channel at any given instance,
         * or the previous write ptrs are stuck in the USB layer.
         */
        pr_err_ratelimited("diag: In %s, cannot retrieve USB write ptrs for USB channel %s\n",
                           __func__, usb_info->name);
        return -ENOMEM;
    }

    req->buf = buf;
    req->length = len;
    req->context = (void *)(uintptr_t)ctxt;

    if (!usb_info->hdl || !usb_info->connected) {
        pr_debug_ratelimited("diag: USB ch %s is not connected\n",
                             usb_info->name);
        diagmem_free(driver, req, usb_info->mempool);
        return -ENODEV;
    }
    err = usb_diag_write(usb_info->hdl, req);
    if (err) {
        pr_err_ratelimited("diag: In %s, error writing to usb channel %s, err: %d\n",
                           __func__, usb_info->name, err);
        diagmem_free(driver, req, usb_info->mempool);
    }

    return err;
}
Esempio n. 14
0
static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
					struct se_device **found_dev)
{
	struct xcopy_dev_search_info info;
	int ret;

	memset(&info, 0, sizeof(info));
	info.dev_wwn = dev_wwn;

	ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
	if (ret == 1) {
		*found_dev = info.found_dev;
		return 0;
	} else {
		pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
		return -EINVAL;
	}
}
Esempio n. 15
0
/*
 * The current CPU has been marked offline.  Migrate IRQs off this CPU.
 * If the affinity settings do not allow other CPUs, force them onto any
 * available CPU.
 *
 * Note: we must iterate over all IRQs, whether they have an attached
 * action structure or not, as we need to get chained interrupts too.
 */
void migrate_irqs(void)
{
	unsigned int i;
	struct irq_desc *desc;
	unsigned long flags;

	local_irq_save(flags);

	for_each_irq_desc(i, desc) {
		bool affinity_broken;

		raw_spin_lock(&desc->lock);
		affinity_broken = migrate_one_irq(desc);
		raw_spin_unlock(&desc->lock);

		if (affinity_broken)
			pr_debug_ratelimited("IRQ%u no longer affine to CPU%u\n",
					    i, smp_processor_id());
	}
void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type)
{
	void *buf = NULL;
	int i = 0;
	unsigned long flags;
	struct diag_mempool_t *mempool = NULL;

	if (!driver)
		return NULL;

	for (i = 0; i < NUM_MEMORY_POOLS; i++) {
		mempool = &diag_mempools[i];
		if (pool_type != mempool->id)
			continue;
		if (!mempool->pool) {
			pr_err_ratelimited("diag: %s mempool is not initialized yet\n",
					   mempool->name);
			break;
		}
		if (size == 0 || size > mempool->itemsize) {
			pr_err_ratelimited("diag: cannot alloc from mempool %s, invalid size: %d\n",
					   mempool->name, size);
			break;
		}
		spin_lock_irqsave(&mempool->lock, flags);
		if (mempool->count < mempool->poolsize) {
			atomic_add(1, (atomic_t *)&mempool->count);
			buf = mempool_alloc(mempool->pool, GFP_ATOMIC);
			kmemleak_not_leak(buf);
		}
		spin_unlock_irqrestore(&mempool->lock, flags);
		if (!buf) {
			pr_debug_ratelimited("diag: Unable to allocate buffer from memory pool %s, size: %d/%d count: %d/%d\n",
					     mempool->name,
					     size, mempool->itemsize,
					     mempool->count,
					     mempool->poolsize);
		}
		break;
	}

	return buf;
}
Esempio n. 17
0
int diag_usb_write(int id, unsigned char *buf, int len, int ctxt)
{
	int err = 0;
	struct diag_request *req = NULL;
	struct diag_usb_info *usb_info = NULL;

	if (id < 0 || id >= NUM_DIAG_USB_DEV) {
		pr_err_ratelimited("diag: In %s, Incorrect id %d\n",
				   __func__, id);
		return -EINVAL;
	}

	usb_info = &diag_usb[id];

	req = diagmem_alloc(driver, sizeof(struct diag_request),
			    usb_info->mempool);
	if (!req) {
		pr_err_ratelimited("diag: In %s, cannot retrieve USB write ptrs for USB channel %s\n",
				   __func__, usb_info->name);
		return -ENOMEM;
	}

	req->buf = buf;
	req->length = len;
	req->context = (void *)(uintptr_t)ctxt;

	if (!usb_info->hdl || !usb_info->connected) {
		pr_debug_ratelimited("diag: USB ch %s is not connected\n",
				     usb_info->name);
		diagmem_free(driver, req, usb_info->mempool);
		return -ENODEV;
	}
	err = usb_diag_write(usb_info->hdl, req);
	if (err) {
		pr_err_ratelimited("diag: In %s, error writing to usb channel %s, err: %d\n",
				   __func__, usb_info->name, err);
		diagmem_free(driver, req, usb_info->mempool);
	}

	return err;
}
Esempio n. 18
0
static void ctrl_write_callback(struct urb *urb)
{
	struct ctrl_pkt *cpkt = urb->context;
	struct rmnet_ctrl_dev *dev = cpkt->ctxt;

#ifdef CONFIG_QCT_9K_MODEM
	if (get_radio_flag() & RADIO_FLAG_MORE_LOG)
		pr_info("[RMNET] wcb: %d/%d\n", urb->status, urb->actual_length);
#endif

	if (urb->status) {
		dev->cudev->tx_ctrl_err_cnt++;
		//error case
		pr_info("[RMNET] %s :Write status/size %d/%d\n", __func__, urb->status, urb->actual_length);
		pr_debug_ratelimited("Write status/size %d/%d\n", urb->status, urb->actual_length);
	}

	kfree(urb->setup_packet);
	kfree(urb->transfer_buffer);
	usb_free_urb(urb);
	kfree(cpkt);
	usb_autopm_put_interface_async(dev->cudev->intf);
}
Esempio n. 19
0
static __be16 ether_ip_type_trans(struct sk_buff *skb,
	struct net_device *dev)
{
	__be16	protocol = 0;

	skb->dev = dev;

	switch (skb->data[0] & 0xf0) {
	case 0x40:
		protocol = htons(ETH_P_IP);
		break;
	case 0x60:
		protocol = htons(ETH_P_IPV6);
		break;
	default:
		if ((skb->data[0] & 0x40) == 0x00)
			protocol = htons(ETH_P_MAP);
		else
			pr_debug_ratelimited("[%s] L3 protocol decode error: 0x%02x",
					dev->name, skb->data[0] & 0xf0);
	}

	return protocol;
}
Esempio n. 20
0
static int udp_uncompress(struct sk_buff *skb, size_t needed)
{
	u8 tmp = 0, val = 0;
	struct udphdr uh;
	bool fail;
	int err;

	fail = lowpan_fetch_skb(skb, &tmp, sizeof(tmp));

	pr_debug("UDP header uncompression\n");
	switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
	case LOWPAN_NHC_UDP_CS_P_00:
		fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source));
		fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest));
		break;
	case LOWPAN_NHC_UDP_CS_P_01:
		fail |= lowpan_fetch_skb(skb, &uh.source, sizeof(uh.source));
		fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
		uh.dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
		break;
	case LOWPAN_NHC_UDP_CS_P_10:
		fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
		uh.source = htons(val + LOWPAN_NHC_UDP_8BIT_PORT);
		fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest));
		break;
	case LOWPAN_NHC_UDP_CS_P_11:
		fail |= lowpan_fetch_skb(skb, &val, sizeof(val));
		uh.source = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val >> 4));
		uh.dest = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val & 0x0f));
		break;
	default:
		BUG();
	}

	pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
		 ntohs(uh.source), ntohs(uh.dest));

	/* checksum */
	if (tmp & LOWPAN_NHC_UDP_CS_C) {
		pr_debug_ratelimited("checksum elided currently not supported\n");
		fail = true;
	} else {
		fail |= lowpan_fetch_skb(skb, &uh.check, sizeof(uh.check));
	}

	if (fail)
		return -EINVAL;

	/* UDP length needs to be infered from the lower layers
	 * here, we obtain the hint from the remaining size of the
	 * frame
	 */
	switch (lowpan_priv(skb->dev)->lltype) {
	case LOWPAN_LLTYPE_IEEE802154:
		if (lowpan_802154_cb(skb)->d_size)
			uh.len = htons(lowpan_802154_cb(skb)->d_size -
				       sizeof(struct ipv6hdr));
		else
			uh.len = htons(skb->len + sizeof(struct udphdr));
		break;
	default:
		uh.len = htons(skb->len + sizeof(struct udphdr));
		break;
	}
	pr_debug("uncompressed UDP length: src = %d", ntohs(uh.len));

	/* replace the compressed UDP head by the uncompressed UDP
	 * header
	 */
	err = skb_cow(skb, needed);
	if (unlikely(err))
		return err;

	skb_push(skb, sizeof(struct udphdr));
	skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));

	return 0;
}
Esempio n. 21
0
int data_bridge_write(unsigned int id, struct sk_buff *skb)
{
	int			result;
	int			size = skb->len;
	int			pending;
	struct urb		*txurb;
	struct timestamp_info	*info = (struct timestamp_info *)skb->cb;
	struct data_bridge	*dev = __dev[id];
	struct bridge		*brdg;

	if (!dev || !dev->brdg || dev->err || !usb_get_intfdata(dev->intf))
		return -ENODEV;

	brdg = dev->brdg;
	if (!brdg)
		return -ENODEV;

	dev_dbg(&dev->udev->dev, "%s: write (%d bytes)\n", __func__, skb->len);

	result = usb_autopm_get_interface(dev->intf);
	if (result < 0) {
		dev_err(&dev->udev->dev, "%s: resume failure\n", __func__);
		goto error;
	}

	txurb = usb_alloc_urb(0, GFP_KERNEL);
	if (!txurb) {
		dev_err(&dev->udev->dev, "%s: error allocating read urb\n",
			__func__);
		result = -ENOMEM;
		goto error;
	}

	/* store dev pointer in skb */
	info->dev = dev;
	info->tx_queued = get_timestamp();

	usb_fill_bulk_urb(txurb, dev->udev, dev->bulk_out,
			skb->data, skb->len, data_bridge_write_cb, skb);

	if (test_bit(SUSPENDED, &dev->flags)) {
		usb_anchor_urb(txurb, &dev->delayed);
		goto free_urb;
	}

	pending = atomic_inc_return(&dev->pending_txurbs);
	usb_anchor_urb(txurb, &dev->tx_active);

	if (atomic_read(&dev->pending_txurbs) % tx_urb_mult)
		txurb->transfer_flags |= URB_NO_INTERRUPT;

	result = usb_submit_urb(txurb, GFP_KERNEL);
	if (result < 0) {
		usb_unanchor_urb(txurb);
		atomic_dec(&dev->pending_txurbs);
		dev_err(&dev->udev->dev, "%s: submit URB error %d\n",
			__func__, result);
		goto free_urb;
	}

	dev->to_modem++;
	dev_dbg(&dev->udev->dev, "%s: pending_txurbs: %u\n", __func__, pending);

	/* flow control: last urb submitted but return -EBUSY */
	if (fctrl_support && pending > fctrl_en_thld) {
		set_bit(TX_THROTTLED, &brdg->flags);
		dev->tx_throttled_cnt++;
		pr_debug_ratelimited("%s: enable flow ctrl pend txurbs:%u\n",
					__func__, pending);
		return -EBUSY;
	}

	return size;

free_urb:
	usb_free_urb(txurb);
error:
	dev->txurb_drp_cnt++;
	usb_autopm_put_interface(dev->intf);

	return result;
}
Esempio n. 22
0
static void notification_available_cb(struct urb *urb)
{
	int				status;
	struct usb_cdc_notification	*ctrl;
	struct usb_device		*udev;
	struct rmnet_ctrl_dev		*dev = urb->context;

	udev = interface_to_usbdev(dev->intf);

	switch (urb->status) {
	case 0:
	
	case -ENOENT:
		
		break;

	
	case -ESHUTDOWN:
	case -ECONNRESET:
	case -EPROTO:
		return;
	case -EPIPE:
		pr_err_ratelimited("%s: Stall on int endpoint\n", __func__);
		
		return;

	
	case -EOVERFLOW:
		pr_err_ratelimited("%s: Babble error happened\n", __func__);
	default:
		 pr_debug_ratelimited("%s: Non zero urb status = %d\n",
			__func__, urb->status);
		goto resubmit_int_urb;
	}

	if (!urb->actual_length)
		return;

	ctrl = urb->transfer_buffer;

	switch (ctrl->bNotificationType) {
	case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
		dev->resp_avail_cnt++;
		
#ifdef HTC_PM_DBG
		if (usb_pm_debug_enabled)
			usb_mark_intf_last_busy(dev->intf, false);
#endif
		
		usb_mark_last_busy(udev);
		queue_work(dev->wq, &dev->get_encap_work);

		if (!dev->resp_available) {
			dev->resp_available = true;
			
			if (dev->intf)
				dev_info(&dev->intf->dev, "%s[%d]:dev->resp_available:%d\n", __func__, __LINE__, dev->resp_available);
			wake_up(&dev->open_wait_queue);
		}

		return;
	default:
		 dev_err(dev->devicep,
			"%s:Command not implemented\n", __func__);
	}

resubmit_int_urb:
	
#ifdef HTC_PM_DBG
	if (usb_pm_debug_enabled)
		usb_mark_intf_last_busy(dev->intf, false);
#endif
	
	usb_mark_last_busy(udev);
	usb_anchor_urb(urb, &dev->rx_submitted);
	status = usb_submit_urb(urb, GFP_ATOMIC);
	if (status) {
		usb_unanchor_urb(urb);
		dev_err(dev->devicep, "%s: Error re-submitting Int URB %d\n",
		__func__, status);
	}

	return;
}
Esempio n. 23
0
static void resp_avail_cb(struct urb *urb)
{
	struct usb_device		*udev;
	struct ctrl_pkt_list_elem	*list_elem = NULL;
	struct rmnet_ctrl_dev		*dev = urb->context;
	void				*cpkt;
	int				status = 0;
	size_t				cpkt_size = 0;

	udev = interface_to_usbdev(dev->intf);

	usb_autopm_put_interface_async(dev->intf);
#ifdef HTC_DEBUG_QMI_STUCK
	del_timer(&dev->rcv_timer);
	if (expired_rcvurb == urb) {
		expired_rcvurb = NULL;
		dev_err(&(dev->intf->dev), "[RMNET] %s(%d) urb->status:%d urb->actual_length:%u !!!\n", __func__, __LINE__, urb->status, urb->actual_length);
		if (urb->status != 0) {
			if (urb->actual_length > 0) {
				dev_err(&(dev->intf->dev), "[RMNET] %s(%d) set urb->status 0 !!!\n", __func__, __LINE__);
				urb->status = 0;
			}
			else {
				dev_err(&(dev->intf->dev), "[RMNET] %s(%d) dev->inturb->anchor(%x) !!!\n", __func__, __LINE__, (dev->inturb) ? (unsigned int)dev->inturb->anchor : (unsigned int)(0xffffffff));
				dev_err(&(dev->intf->dev), "[RMNET] %s(%d) goto resubmit_int_urb !!!\n", __func__, __LINE__);
				goto resubmit_int_urb;
			}
		}
	}
#endif	

	switch (urb->status) {
	case 0:
		
		dev->get_encap_resp_cnt++;
		break;

	
	case -ESHUTDOWN:
	case -ENOENT:
	case -ECONNRESET:
	case -EPROTO:
		return;

	
	case -EOVERFLOW:
		pr_err_ratelimited("%s: Babble error happened\n", __func__);
	default:
		pr_debug_ratelimited("%s: Non zero urb status = %d\n",
				__func__, urb->status);
		goto resubmit_int_urb;
	}

	dev_dbg(dev->devicep, "Read %d bytes for %s\n",
		urb->actual_length, dev->name);

#ifdef HTC_LOG_RMNET_USB_CTRL
	log_rmnet_usb_ctrl_event(dev->intf, "Rx cb", urb->actual_length);
#endif	

	cpkt = urb->transfer_buffer;
	cpkt_size = urb->actual_length;
	if (!cpkt_size) {
		dev->zlp_cnt++;
		dev_dbg(dev->devicep, "%s: zero length pkt received\n",
				__func__);
		goto resubmit_int_urb;
	}

	list_elem = kmalloc(sizeof(struct ctrl_pkt_list_elem), GFP_ATOMIC);
	if (!list_elem) {
		dev_err(dev->devicep, "%s: list_elem alloc failed\n", __func__);
		return;
	}
	list_elem->cpkt.data = kmalloc(cpkt_size, GFP_ATOMIC);
	if (!list_elem->cpkt.data) {
		dev_err(dev->devicep, "%s: list_elem->data alloc failed\n",
			__func__);
		kfree(list_elem);
		return;
	}
	memcpy(list_elem->cpkt.data, cpkt, cpkt_size);
	list_elem->cpkt.data_size = cpkt_size;
	spin_lock(&dev->rx_lock);
	list_add_tail(&list_elem->list, &dev->rx_list);
	spin_unlock(&dev->rx_lock);

	wake_up(&dev->read_wait_queue);

resubmit_int_urb:
	
#ifdef HTC_PM_DBG
	if (usb_pm_debug_enabled)
		usb_mark_intf_last_busy(dev->intf, false);
#endif
	
	
	if (!dev->inturb->anchor) {
		usb_mark_last_busy(udev);
		usb_anchor_urb(dev->inturb, &dev->rx_submitted);
		status = usb_submit_urb(dev->inturb, GFP_ATOMIC);
		if (status) {
			usb_unanchor_urb(dev->inturb);
			dev_err(dev->devicep, "%s: Error re-submitting Int URB %d\n",
				__func__, status);
		}
	}
}
Esempio n. 24
0
static void notification_available_cb(struct urb *urb)
{
	int				status;
	struct usb_cdc_notification	*ctrl;
	struct ctrl_bridge		*dev = urb->context;
	struct bridge			*brdg = dev->brdg;
	unsigned int			ctrl_bits;
	unsigned char			*data;

	switch (urb->status) {
	case 0:
		/*success*/
		break;
	case -ESHUTDOWN:
	case -ENOENT:
	case -ECONNRESET:
	case -EPROTO:
		 /* unplug */
		 return;
	case -EPIPE:
		dev_err(&dev->intf->dev,
			"%s: stall on int endpoint\n", __func__);
		/* TBD : halt to be cleared in work */
	case -EOVERFLOW:
	default:
		pr_debug_ratelimited("%s: non zero urb status = %d\n",
					__func__, urb->status);
		goto resubmit_int_urb;
	}

	ctrl = (struct usb_cdc_notification *)urb->transfer_buffer;
	data = (unsigned char *)(ctrl + 1);

	switch (ctrl->bNotificationType) {
	case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
		dev->resp_avail++;
		usb_fill_control_urb(dev->readurb, dev->udev,
					usb_rcvctrlpipe(dev->udev, 0),
					(unsigned char *)dev->in_ctlreq,
					dev->readbuf,
					DEFAULT_READ_URB_LENGTH,
					resp_avail_cb, dev);

		usb_anchor_urb(dev->readurb, &dev->tx_submitted);
		status = usb_submit_urb(dev->readurb, GFP_ATOMIC);
		if (status) {
			dev_err(&dev->intf->dev,
				"%s: Error submitting Read URB %d\n",
				__func__, status);
			usb_unanchor_urb(dev->readurb);
			goto resubmit_int_urb;
		}
		return;
	case USB_CDC_NOTIFY_NETWORK_CONNECTION:
		dev_dbg(&dev->intf->dev, "%s network\n", ctrl->wValue ?
					"connected to" : "disconnected from");
		break;
	case USB_CDC_NOTIFY_SERIAL_STATE:
		dev->notify_ser_state++;
		ctrl_bits = get_unaligned_le16(data);
		dev_dbg(&dev->intf->dev, "serial state: %d\n", ctrl_bits);
		dev->cbits_tohost = ctrl_bits;
		if (brdg && brdg->ops.send_cbits)
			brdg->ops.send_cbits(brdg->ctx, ctrl_bits);
		break;
	default:
		dev_err(&dev->intf->dev, "%s: unknown notification %d received:"
			"index %d len %d data0 %d data1 %d",
			__func__, ctrl->bNotificationType, ctrl->wIndex,
			ctrl->wLength, data[0], data[1]);
	}

resubmit_int_urb:
	usb_anchor_urb(urb, &dev->tx_submitted);
	status = usb_submit_urb(urb, GFP_ATOMIC);
	if (status) {
		dev_err(&dev->intf->dev, "%s: Error re-submitting Int URB %d\n",
		__func__, status);
		usb_unanchor_urb(urb);
	}
}
static void notification_available_cb(struct urb *urb)
{
	int				status;
	struct usb_cdc_notification	*ctrl;
	struct usb_device		*udev;
	struct rmnet_ctrl_dev		*dev = urb->context;

	udev = interface_to_usbdev(dev->intf);

	switch (urb->status) {
	case 0:
		/*success*/
		break;

	/*do not resubmit*/
	case -ESHUTDOWN:
	case -ENOENT:
	case -ECONNRESET:
	case -EPROTO:
		return;
	case -EPIPE:
		pr_err_ratelimited("%s: Stall on int endpoint\n", __func__);
		/* TBD : halt to be cleared in work */
		return;

	/*resubmit*/
	case -EOVERFLOW:
		pr_err_ratelimited("%s: Babble error happened\n", __func__);
	default:
		 pr_debug_ratelimited("%s: Non zero urb status = %d\n",
			__func__, urb->status);
		goto resubmit_int_urb;
	}

	ctrl = urb->transfer_buffer;

	switch (ctrl->bNotificationType) {
	case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
		dev->resp_avail_cnt++;
		usb_fill_control_urb(dev->rcvurb, udev,
					usb_rcvctrlpipe(udev, 0),
					(unsigned char *)dev->in_ctlreq,
					dev->rcvbuf,
					DEFAULT_READ_URB_LENGTH,
					resp_avail_cb, dev);

		status = usb_submit_urb(dev->rcvurb, GFP_ATOMIC);
		if (status) {
			dev_err(dev->devicep,
			"%s: Error submitting Read URB %d\n", __func__, status);
			goto resubmit_int_urb;
		}

		usb_mark_last_busy(udev);

		if (!dev->resp_available) {
			dev->resp_available = true;
			wake_up(&dev->open_wait_queue);
		}

		return;
	default:
		 dev_err(dev->devicep,
			"%s:Command not implemented\n", __func__);
	}

resubmit_int_urb:
	status = usb_submit_urb(urb, GFP_ATOMIC);
	if (status)
		dev_err(dev->devicep, "%s: Error re-submitting Int URB %d\n",
		__func__, status);

	return;
}
Esempio n. 26
0
static void resp_avail_cb(struct urb *urb)
{
	struct usb_device		*udev;
	struct ctrl_pkt_list_elem	*list_elem = NULL;
	struct rmnet_ctrl_dev		*dev = urb->context;
	void				*cpkt;
	int				status = 0;
	size_t				cpkt_size = 0;
	unsigned int 		iface_num;

	udev = interface_to_usbdev(dev->intf);
	iface_num = dev->intf->cur_altsetting->desc.bInterfaceNumber;

	usb_autopm_put_interface_async(dev->intf);

	switch (urb->status) {
	case 0:
		/*success*/
		dev->get_encap_resp_cnt++;
		break;

	/*do not resubmit*/
	case -ESHUTDOWN:
	case -ENOENT:
	case -ECONNRESET:
	case -EPROTO:
		return;

	/*resubmit*/
	case -EOVERFLOW:
		pr_err_ratelimited("%s: Babble error happened\n", __func__);
	default:
		pr_debug_ratelimited("%s: Non zero urb status = %d\n",
				__func__, urb->status);
		goto resubmit_int_urb;
	}

	dev_dbg(dev->devicep, "Read %d bytes for %s\n",
		urb->actual_length, dev->name);

	cpkt = urb->transfer_buffer;
	cpkt_size = urb->actual_length;
	if (!cpkt_size) {
		dev->zlp_cnt++;
		dev_dbg(dev->devicep, "%s: zero length pkt received\n",
				__func__);
		goto resubmit_int_urb;
	}

	list_elem = kmalloc(sizeof(struct ctrl_pkt_list_elem), GFP_ATOMIC);
	if (!list_elem) {
		dev_err(dev->devicep, "%s: list_elem alloc failed\n", __func__);
		return;
	}
	list_elem->cpkt.data = kmalloc(cpkt_size, GFP_ATOMIC);
	if (!list_elem->cpkt.data) {
		dev_err(dev->devicep, "%s: list_elem->data alloc failed\n",
			__func__);
		kfree(list_elem);
		return;
	}
	memcpy(list_elem->cpkt.data, cpkt, cpkt_size);
	list_elem->cpkt.data_size = cpkt_size;
	spin_lock(&dev->rx_lock);
	list_add_tail(&list_elem->list, &dev->rx_list);
	spin_unlock(&dev->rx_lock);

	rd_cb_time = cpu_clock(smp_processor_id());

	wake_up(&dev->read_wait_queue);

resubmit_int_urb:
	/*check if it is already submitted in resume*/
	if (!dev->inturb->anchor) {
		usb_mark_last_busy(udev);
		usb_anchor_urb(dev->inturb, &dev->rx_submitted);
		status = usb_submit_urb(dev->inturb, GFP_ATOMIC);
		if (status) {
			usb_unanchor_urb(dev->inturb);
			dev_err(dev->devicep, "%s: Error re-submitting Int URB %d\n",
					__func__, status);
		}
		DBG_NOTI("[CHKRA:%d]>", iface_num);
	}
}
Esempio n. 27
0
static void notification_available_cb(struct urb *urb)
{
	int				status;
	struct usb_cdc_notification	*ctrl;
	struct usb_device		*udev;
	struct rmnet_ctrl_dev		*dev = urb->context;
	unsigned int 		iface_num;

	udev = interface_to_usbdev(dev->intf);
	iface_num = dev->intf->cur_altsetting->desc.bInterfaceNumber;

	switch (urb->status) {
	case 0:
	/*if non zero lenght of data received while unlink*/
	case -ENOENT:
		DBG_NOTI("[NACB:%d]<", iface_num);
		/*success*/
		break;

	/*do not resubmit*/
	case -ESHUTDOWN:
	case -ECONNRESET:
	case -EPROTO:
		return;
	case -EPIPE:
		pr_err_ratelimited("%s: Stall on int endpoint\n", __func__);
		/* TBD : halt to be cleared in work */
		return;

	/*resubmit*/
	case -EOVERFLOW:
		pr_err_ratelimited("%s: Babble error happened\n", __func__);
	default:
		 pr_debug_ratelimited("%s: Non zero urb status = %d\n",
			__func__, urb->status);
		goto resubmit_int_urb;
	}

	if (!urb->actual_length) {
		pr_err("Received Zero actual length: %d", urb->actual_length);
		return;
	}
	ctrl = urb->transfer_buffer;

	switch (ctrl->bNotificationType) {
	case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
		dev->resp_avail_cnt++;

		usb_mark_last_busy(udev);

		if (urb->status == -ENOENT)
			pr_info("URB status is ENOENT");

		queue_work(dev->wq, &dev->get_encap_work);

		if (!dev->resp_available) {
			dev->resp_available = true;
			wake_up(&dev->open_wait_queue);
		}

		return;
	default:
		 dev_err(dev->devicep,
			"%s:Command not implemented\n", __func__);
	}

resubmit_int_urb:
	usb_anchor_urb(urb, &dev->rx_submitted);
	status = usb_submit_urb(urb, GFP_ATOMIC);
	if (status) {
		usb_unanchor_urb(urb);
		dev_err(dev->devicep, "%s: Error re-submitting Int URB %d\n",
		__func__, status);
	}

	return;
}
static void resp_avail_cb(struct urb *urb)
{
	struct usb_device		*udev;
	struct ctrl_pkt_list_elem	*list_elem = NULL;
	struct rmnet_ctrl_dev		*dev = urb->context;
	void				*cpkt;
	int				status = 0;
	size_t				cpkt_size = 0;

	udev = interface_to_usbdev(dev->intf);

	switch (urb->status) {
	case 0:
		/*success*/
		dev->get_encap_resp_cnt++;
		break;

	/*do not resubmit*/
	case -ESHUTDOWN:
	case -ENOENT:
	case -ECONNRESET:
	case -EPROTO:
		return;

	/*resubmit*/
	case -EOVERFLOW:
		pr_err_ratelimited("%s: Babble error happened\n", __func__);
	default:
		pr_debug_ratelimited("%s: Non zero urb status = %d\n",
				__func__, urb->status);
		goto resubmit_int_urb;
	}

	dev_dbg(dev->devicep, "Read %d bytes for %s\n",
		urb->actual_length, dev->name);

	cpkt = urb->transfer_buffer;
	cpkt_size = urb->actual_length;
	if (!cpkt_size) {
		dev->zlp_cnt++;
		dev_dbg(dev->devicep, "%s: zero length pkt received\n",
				__func__);
		goto resubmit_int_urb;
	}

	list_elem = kmalloc(sizeof(struct ctrl_pkt_list_elem), GFP_ATOMIC);
	if (!list_elem) {
		dev_err(dev->devicep, "%s: list_elem alloc failed\n", __func__);
		return;
	}
	list_elem->cpkt.data = kmalloc(cpkt_size, GFP_ATOMIC);
	if (!list_elem->cpkt.data) {
		dev_err(dev->devicep, "%s: list_elem->data alloc failed\n",
			__func__);
		kfree(list_elem);
		return;
	}
	memcpy(list_elem->cpkt.data, cpkt, cpkt_size);
	list_elem->cpkt.data_size = cpkt_size;
	spin_lock(&dev->rx_lock);
	list_add_tail(&list_elem->list, &dev->rx_list);
	spin_unlock(&dev->rx_lock);

	wake_up(&dev->read_wait_queue);

resubmit_int_urb:
	/*re-submit int urb to check response available*/
	usb_mark_last_busy(udev);
	status = usb_submit_urb(dev->inturb, GFP_ATOMIC);
	if (status)
		dev_err(dev->devicep, "%s: Error re-submitting Int URB %d\n",
			__func__, status);
}
static void notification_available_cb(struct urb *urb)
{
	int				status;
	struct usb_cdc_notification	*ctrl;
	struct ctrl_bridge		*dev = urb->context;
	struct bridge			*brdg = dev->brdg;
	unsigned int			ctrl_bits;
	unsigned char			*data;

	switch (urb->status) {
	case 0:
	/*if non zero lenght of data received while unlink*/
	case -ENOENT:
		/*success*/
		break;
	case -ESHUTDOWN:
	case -ECONNRESET:
	case -EPROTO:
		 /* unplug */
		 return;
	case -EPIPE:
		dev_err(&dev->intf->dev,
			"%s: stall on int endpoint\n", __func__);
		/* TBD : halt to be cleared in work */
	case -EOVERFLOW:
	default:
		pr_debug_ratelimited("%s: non zero urb status = %d\n",
					__func__, urb->status);
		goto resubmit_int_urb;
	}

	if (!urb->actual_length)
		return;

	ctrl = (struct usb_cdc_notification *)urb->transfer_buffer;
	data = (unsigned char *)(ctrl + 1);

	switch (ctrl->bNotificationType) {
	case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
		dev->resp_avail++;
		//pr_info("GOT notification \n");
		usb_mark_last_busy(dev->udev);
		if (urb->status == -ENOENT)
			pr_info("URB status is ENOENT");
                queue_work(dev->wq, &dev->get_encap_work);

		return;
	case USB_CDC_NOTIFY_NETWORK_CONNECTION:
		dev_dbg(&dev->intf->dev, "%s network\n", ctrl->wValue ?
					"connected to" : "disconnected from");
		break;
	case USB_CDC_NOTIFY_SERIAL_STATE:
		dev->notify_ser_state++;
		ctrl_bits = get_unaligned_le16(data);
		dev_dbg(&dev->intf->dev, "serial state: %d\n", ctrl_bits);
		dev->cbits_tohost = ctrl_bits;
		if (brdg && brdg->ops.send_cbits)
			brdg->ops.send_cbits(brdg->ctx, ctrl_bits);
		break;
	default:
		dev_err(&dev->intf->dev, "%s: unknown notification %d received:"
			"index %d len %d data0 %d data1 %d",
			__func__, ctrl->bNotificationType, ctrl->wIndex,
			ctrl->wLength, data[0], data[1]);
	}

resubmit_int_urb:
	usb_anchor_urb(urb, &dev->tx_submitted);
	status = usb_submit_urb(urb, GFP_ATOMIC);
	if (status) {
		dev_err(&dev->intf->dev, "%s: Error re-submitting Int URB %d\n",
		__func__, status);
		usb_unanchor_urb(urb);
	}
}
Esempio n. 30
-1
static void notification_available_cb(struct urb *urb)
{
	int				status;
	struct usb_cdc_notification	*ctrl;
	struct usb_device		*udev;
	struct ctrl_bridge		*dev = urb->context;
	struct bridge			*brdg = dev->brdg;
	unsigned int			ctrl_bits;
	unsigned char			*data;
	unsigned int		iface_num;

	/* if this intf is already disconnected, this urb free-ed before
	 * calling from qh_completions. just return and do nothing */
	if (!dev->intf)
		return;

	udev = interface_to_usbdev(dev->intf);
	iface_num = dev->intf->cur_altsetting->desc.bInterfaceNumber;

	switch (urb->status) {
	case 0:
		pr_info("[NACB:%d]<\n", iface_num);
		/*success*/
		break;
	case -ESHUTDOWN:
	case -ENOENT:
	case -ECONNRESET:
	case -EPROTO:
		 /* unplug */
		 return;
	case -EPIPE:
		dev_err(&udev->dev, "%s: stall on int endpoint\n", __func__);
		/* TBD : halt to be cleared in work */
	case -EOVERFLOW:
	default:
		pr_debug_ratelimited("%s: non zero urb status = %d\n",
					__func__, urb->status);
		goto resubmit_int_urb;
	}

	ctrl = (struct usb_cdc_notification *)urb->transfer_buffer;
	data = (unsigned char *)(ctrl + 1);

	switch (ctrl->bNotificationType) {
	case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
		dev->resp_avail++;
		usb_fill_control_urb(dev->readurb, udev,
					usb_rcvctrlpipe(udev, 0),
					(unsigned char *)dev->in_ctlreq,
					dev->readbuf,
					DEFAULT_READ_URB_LENGTH,
					resp_avail_cb, dev);

		status = usb_submit_urb(dev->readurb, GFP_ATOMIC);
		if (status) {
			dev_err(&udev->dev,
				"%s: Error submitting Read URB %d\n",
				__func__, status);
			goto resubmit_int_urb;
		} else
			pr_info("[NRA:%d]>\n", iface_num);
		return;
	case USB_CDC_NOTIFY_NETWORK_CONNECTION:
		dev_dbg(&udev->dev, "%s network\n", ctrl->wValue ?
					"connected to" : "disconnected from");
		break;
	case USB_CDC_NOTIFY_SERIAL_STATE:
		dev->notify_ser_state++;
		ctrl_bits = get_unaligned_le16(data);
		dev_dbg(&udev->dev, "serial state: %d\n", ctrl_bits);
		dev->cbits_tohost = ctrl_bits;
		if (brdg && brdg->ops.send_cbits)
			brdg->ops.send_cbits(brdg->ctx, ctrl_bits);
#ifdef CONFIG_MDM_HSIC_PM
		pr_info("%s: set lpa handling to false\n", __func__);
		lpa_handling = false;
#endif
		break;
	default:
		dev_err(&udev->dev, "%s: unknown notification %d received:"
			"index %d len %d data0 %d data1 %d",
			__func__, ctrl->bNotificationType, ctrl->wIndex,
			ctrl->wLength, data[0], data[1]);
	}

resubmit_int_urb:
	status = usb_submit_urb(urb, GFP_ATOMIC);
	if (status) {
		dev_err(&udev->dev, "%s: Error re-submitting Int URB %d\n",
		__func__, status);
	} else
		pr_info("[CHKRA:%d]>\n", iface_num);
}