Example #1
0
static void tx_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb;
	struct eth_dev	*dev;
	struct net_device *net;
	struct usb_request *new_req;
	struct usb_ep *in;
	int length;
	int retval;

	if (!ep->driver_data) {
		usb_ep_free_request(ep, req);
		return;
	}

	dev = ep->driver_data;
	net = dev->net;

	if (!dev->port_usb) {
		usb_ep_free_request(ep, req);
		return;
	}

	switch (req->status) {
	default:
		dev->net->stats.tx_errors++;
		VDBG(dev, "tx err %d\n", req->status);
		
	case -ECONNRESET:		
	case -ESHUTDOWN:		
		break;
	case 0:
		if (!req->zero)
			dev->net->stats.tx_bytes += req->length-1;
		else
			dev->net->stats.tx_bytes += req->length;
	}
	dev->net->stats.tx_packets++;

	spin_lock(&dev->req_lock);
	list_add_tail(&req->list, &dev->tx_reqs);

	if (dev->port_usb->multi_pkt_xfer && !req->context) {
		dev->no_tx_req_used--;
		req->length = 0;
		in = dev->port_usb->in_ep;

		if (!list_empty(&dev->tx_reqs)) {
			new_req = container_of(dev->tx_reqs.next,
					struct usb_request, list);
			list_del(&new_req->list);
			spin_unlock(&dev->req_lock);
			if (new_req->length > 0) {
				length = new_req->length;

				if (dev->port_usb->is_fixed &&
					length == dev->port_usb->fixed_in_len &&
					(length % in->maxpacket) == 0)
					new_req->zero = 0;
				else
					new_req->zero = 1;

				if (new_req->zero && !dev->zlp &&
						(length % in->maxpacket) == 0) {
					new_req->zero = 0;
					length++;
				}

				new_req->length = length;
				retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
				switch (retval) {
				default:
					DBG(dev, "tx queue err %d\n", retval);
					new_req->length = 0;
					spin_lock(&dev->req_lock);
					list_add_tail(&new_req->list,
							&dev->tx_reqs);
					spin_unlock(&dev->req_lock);
					break;
				case 0:
					spin_lock(&dev->req_lock);
					dev->no_tx_req_used++;
					spin_unlock(&dev->req_lock);
					net->trans_start = jiffies;
				}
			} else {
				spin_lock(&dev->req_lock);
				list_add_tail(&new_req->list, &dev->tx_reqs);
				spin_unlock(&dev->req_lock);
			}
		} else {
static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
					struct net_device *net)
{
	struct eth_dev		*dev = netdev_priv(net);
	int			length = skb->len;
	int			retval;
	struct usb_request	*req = NULL;
	unsigned long		flags;
	struct usb_ep		*in;
	u16			cdc_filter;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb) {
		in = dev->port_usb->in_ep;
		cdc_filter = dev->port_usb->cdc_filter;
	} else {
		in = NULL;
		cdc_filter = 0;
	}
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!in) {
		dev_kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	/* apply outgoing CDC or RNDIS filters */
	if (!is_promisc(cdc_filter)) {
		u8		*dest = skb->data;

		if (is_multicast_ether_addr(dest)) {
			u16	type;

			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
			 * SET_ETHERNET_MULTICAST_FILTERS requests
			 */
			if (is_broadcast_ether_addr(dest))
				type = USB_CDC_PACKET_TYPE_BROADCAST;
			else
				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
			if (!(cdc_filter & type)) {
				dev_kfree_skb_any(skb);
				return NETDEV_TX_OK;
			}
		}
		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
	}

	spin_lock_irqsave(&dev->req_lock, flags);
	/*
	 * this freelist can be empty if an interrupt triggered disconnect()
	 * and reconfigured the gadget (shutting down this queue) after the
	 * network stack decided to xmit but before we got the spinlock.
	 */
	if (list_empty(&dev->tx_reqs)) {
		spin_unlock_irqrestore(&dev->req_lock, flags);
		return NETDEV_TX_BUSY;
	}

	req = container_of(dev->tx_reqs.next, struct usb_request, list);
	list_del(&req->list);

	/* temporarily stop TX queue when the freelist empties */
	if (list_empty(&dev->tx_reqs))
		netif_stop_queue(net);
	spin_unlock_irqrestore(&dev->req_lock, flags);

	/* no buffer copies needed, unless the network stack did it
	 * or the hardware can't use skb buffers.
	 * or there's not enough space for extra headers we need
	 */
	if (dev->wrap) {
		unsigned long	flags;

		spin_lock_irqsave(&dev->lock, flags);
		if (dev->port_usb)
			skb = dev->wrap(dev->port_usb, skb);
		spin_unlock_irqrestore(&dev->lock, flags);
		if (!skb)
			goto drop;

		length = skb->len;
	}
	req->buf = skb->data;
	req->context = skb;
	req->complete = tx_complete;

	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
	if (dev->port_usb->is_fixed &&
	    length == dev->port_usb->fixed_in_len &&
	    (length % in->maxpacket) == 0)
		req->zero = 0;
	else
		req->zero = 1;

	/* use zlp framing on tx for strict CDC-Ether conformance,
	 * though any robust network rx path ignores extra padding.
	 * and some hardware doesn't like to write zlps.
	 */
	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
		length++;

	req->length = length;

	/* throttle high/super speed IRQ rate back slightly */
	if (gadget_is_dualspeed(dev->gadget))
		req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
				     dev->gadget->speed == USB_SPEED_SUPER)
			? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
			: 0;

	retval = usb_ep_queue(in, req, GFP_ATOMIC);
	switch (retval) {
	default:
		DBG(dev, "tx queue err %d\n", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		atomic_inc(&dev->tx_qlen);
	}

	if (retval) {
		dev_kfree_skb_any(skb);
drop:
		dev->net->stats.tx_dropped++;
		spin_lock_irqsave(&dev->req_lock, flags);
		if (list_empty(&dev->tx_reqs))
			netif_start_queue(net);
		list_add(&req->list, &dev->tx_reqs);
		spin_unlock_irqrestore(&dev->req_lock, flags);
	}
	return NETDEV_TX_OK;
}
static int
gser_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
	struct f_gser            *gser = func_to_gser(f);
	struct usb_composite_dev *cdev = f->config->cdev;
	struct usb_request	 *req = cdev->req;
	int			 value = -EOPNOTSUPP;
	u16			 w_index = le16_to_cpu(ctrl->wIndex);
	u16			 w_value = le16_to_cpu(ctrl->wValue);
	u16			 w_length = le16_to_cpu(ctrl->wLength);

	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {

	
	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
			| USB_CDC_REQ_SET_LINE_CODING:
		if (w_length != sizeof(struct usb_cdc_line_coding))
			goto invalid;

		value = w_length;
		cdev->gadget->ep0->driver_data = gser;
		req->complete = gser_complete_set_line_coding;
		break;

	
	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
			| USB_CDC_REQ_GET_LINE_CODING:
		value = min_t(unsigned, w_length,
				sizeof(struct usb_cdc_line_coding));
		memcpy(req->buf, &gser->port_line_coding, value);
		break;

	
	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:

		value = 0;
		gser->port_handshake_bits = w_value;
		if (gser->port.notify_modem) {
			unsigned port_num =
				gserial_ports[gser->port_num].client_port_num;

			gser->port.notify_modem(&gser->port,
					port_num, w_value);
		}
		break;

	default:
invalid:
		DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
			ctrl->bRequestType, ctrl->bRequest,
			w_value, w_index, w_length);
	}

	
	if (value >= 0) {
		DBG(cdev, "gser ttyGS%d req%02x.%02x v%04x i%04x l%d\n",
			gser->port_num, ctrl->bRequestType, ctrl->bRequest,
			w_value, w_index, w_length);
		req->zero = 0;
		req->length = value;
		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
		if (value < 0)
			ERROR(cdev, "gser response on ttyGS%d, err %d\n",
					gser->port_num, value);
	}

	
	return value;
}
Example #4
0
int f_cloner_setup_handle(struct usb_function *f,
		const struct usb_ctrlrequest *ctlreq)
{
	struct cloner *cloner = f->config->cdev->req->context;
	struct usb_request *req = cloner->ep0req;

	debug_cond(BURNNER_DEBUG,"vendor bRequestType %x,bRequest %x wLength %d\n",
			ctlreq->bRequestType,
			ctlreq->bRequest,
			ctlreq->wLength);

	if ((ctlreq->bRequestType & USB_TYPE_MASK) != USB_TYPE_VENDOR) {
		printf("Unkown RequestType 0x%x \n",ctlreq->bRequestType);
		cloner->ack = -ENOSYS;
		return -ENOSYS;
	}

	usb_ep_dequeue(cloner->ep0, cloner->ep0req);
	usb_ep_dequeue(cloner->ep_in, cloner->read_req);
	usb_ep_dequeue(cloner->ep_out, cloner->write_req);

	cloner->cmd_type = ctlreq->bRequest;
	req->length = ctlreq->wLength;
	req->complete = handle_cmd;

	switch (ctlreq->bRequest) {
		case VR_GET_CPU_INFO:
			strcpy(cloner->ep0req->buf,"BOOT47XX");
			break;
		case VR_GET_ACK:
			if (cloner->ack) printf("cloner->ack = %d\n",cloner->ack);
			memcpy(cloner->ep0req->buf,&cloner->ack,sizeof(int));
			break;
		case VR_GET_CRC:
			if (cloner->ack) printf("cloner->ack = %d, cloner->crc = %x\n",cloner->ack, cloner->crc);
			memcpy(cloner->ep0req->buf,&cloner->ack,sizeof(int));
			memcpy(cloner->ep0req->buf + sizeof(int),&cloner->crc,sizeof(int));
			break;
		case VR_INIT:
			break;
		case VR_UPDATE_CFG:
		case VR_WRITE:
			cloner->ack = -EBUSY;
			break;
#ifdef CONFIG_CMD_EFUSE
	       	case VR_GET_CHIP_ID:
		case VR_GET_USER_ID:
			cloner->ep0req->length = ctlreq->wLength;
			cloner->ack = efuse_program(cloner);
			break;
#endif
		case VR_SET_DATA_ADDR:
		case VR_SET_DATA_LEN:
			cloner->full_size = ctlreq->wIndex | ctlreq->wValue << 16;
			cloner->full_size_remainder = cloner->full_size;
			printf("cloner->full_size = %x\n", cloner->full_size);
			break;
	}

	return usb_ep_queue(cloner->ep0, cloner->ep0req, 0);
}
Example #5
0
static int gs_start_tx(struct gs_port *port)
{
	struct list_head	*pool = &port->write_pool;
	struct usb_ep		*in;
	int			status = 0;
	static long 		prev_len;
	bool			do_tty_wake = false;

	if (port->port_usb)
		in = port->port_usb->in;
	else
		return 0;

	while (!list_empty(pool)) {
		struct usb_request	*req;
		int			len;

		if (port->write_started >= TX_QUEUE_SIZE)
			break;

		req = list_entry(pool->next, struct usb_request, list);
		len = gs_send_packet(port, req->buf, TX_BUF_SIZE);
		if (len == 0) {
			if (prev_len && (prev_len % in->maxpacket == 0)) {
				req->length = 0;
				list_del(&req->list);
				spin_unlock(&port->port_lock);
				status = usb_ep_queue(in, req, GFP_ATOMIC);
				spin_lock(&port->port_lock);
				if (!port->port_usb) {
					gs_free_req(in, req);
					break;
				}
				if (status) {
					printk(KERN_ERR "%s: %s err %d\n",
					__func__, "queue", status);
					list_add(&req->list, pool);
				}
				prev_len = 0;
			}
			wake_up_interruptible(&port->drain_wait);
			break;
		}
		do_tty_wake = true;

		req->length = len;
		list_del(&req->list);

		pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
				port->port_num, len, *((u8 *)req->buf),
				*((u8 *)req->buf+1), *((u8 *)req->buf+2));

		spin_unlock(&port->port_lock);
		status = usb_ep_queue(in, req, GFP_ATOMIC);
		spin_lock(&port->port_lock);
		if (!port->port_usb) {
			do_tty_wake = false;
			gs_free_req(in, req);
			break;
		}
		if (status) {
			pr_debug("%s: %s %s err %d\n",
					__func__, "queue", in->name, status);
			list_add(&req->list, pool);
			break;
		}
		prev_len = req->length;
		port->nbytes_from_tty += req->length;

	}

	if (do_tty_wake && port->port_tty)
		tty_wakeup(port->port_tty);
	return status;
}
Example #6
0
static int mtp_ioctl(struct inode *inode, struct file *file,
		unsigned int cmd, unsigned long arg)
{
	int len, clen, count, n;
	struct usb_request *req;
	struct mtp_event_data event;

	if (!g_usb_mtp_context.online)
		return -EINVAL;

	switch (cmd) {
	case MTP_IOC_EVENT:
		if (g_usb_mtp_context.intr_in_busy) {
			mtp_err("interrupt in request busy\n");
			return -EBUSY;
		}

		count = MIN(_IOC_SIZE(cmd), MTP_EVENT_SIZE);
		if (copy_from_user(event.data, (void *)arg,  count))
			return -EINVAL;

		/* length is in little endian */
		memcpy(&len, event.data, sizeof(len));
		clen = le32_to_cpu(len);
		mtp_debug("len=%d cpu len=%d\n", len, clen);
		/* send event through interrupt in */
		req = g_usb_mtp_context.int_tx_req;
		if (!req)
			return -EINVAL;
		count = MIN(MTP_EVENT_SIZE, clen);
		memcpy(req->buf, event.data, count);
		req->length = count;
		req->zero = 0;
		g_usb_mtp_context.intr_in_busy = 1;
		if (usb_ep_queue(g_usb_mtp_context.intr_in, req, GFP_ATOMIC)) {
			g_usb_mtp_context.intr_in_busy = 0;
			return -EINVAL;
		}
		break;
	case MTP_IOC_SEND_ZLP:
		req = req_get(&g_usb_mtp_context.tx_reqs);
		if (!req)
			return -EINVAL;
		req->length = 0;
		req->zero = 0;
		if (usb_ep_queue(g_usb_mtp_context.bulk_in, req, GFP_ATOMIC)) {
			req_put(&g_usb_mtp_context.tx_reqs, req);
			return -EINVAL;
		}
		break;
	case MTP_IOC_GET_EP_SIZE_IN:
		/* get endpoint buffer size for bulk in */
		len = BULK_BUFFER_SIZE;
		if (copy_to_user((void *)arg, &len, sizeof(int)))
			return -EINVAL;
		break;
	case MTP_IOC_CANCEL_IO:
		mtp_debug("MTP_IOC_CANCEL_IO:\n");
		g_usb_mtp_context.cancel = 1;
		for (n = 0; n < MAX_BULK_RX_REQ_NUM; n++) {
			req = pending_reqs[n];
			if (req && req->actual) {
				mtp_err("n=%d %p %d\n", n, req, req->actual);
				req->actual = 0;
			}
		}
		/* we've cancelled the recv urb, start new one */
		mtp_debug("MTP_IOC_CANCEL_IO end:\n");
		wake_up(&g_usb_mtp_context.rx_wq);
		wake_up(&g_usb_mtp_context.tx_wq);
		break;
	case MTP_IOC_DEVICE_RESET:
		g_usb_mtp_context.cancel = 1;
		g_usb_mtp_context.ctl_cancel = 1;
		wake_up(&g_usb_mtp_context.rx_wq);
		wake_up(&g_usb_mtp_context.tx_wq);
		wake_up(&g_usb_mtp_context.ctl_rx_wq);
		wake_up(&g_usb_mtp_context.ctl_tx_wq);
		break;
	}
	return 0;
}
static void tx_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb = req->context;
	struct eth_dev	*dev = ep->driver_data;
#ifdef CONFIG_USB_RNDIS_MULTIPACKET
	struct net_device *net;
	struct usb_request *new_req;
	struct usb_ep *in;
	int length;
	int retval;

	if (!ep->driver_data) {
		pr_err("%s: driver_data is null\n", __func__);
		usb_ep_free_request(ep, req);
		return;
	}

	dev = ep->driver_data;
	net = dev->net;

	if (!dev->port_usb) {
		pr_err("%s: port_usb is null\n", __func__);
		usb_ep_free_request(ep, req);
		return;
	}
#endif

	switch (req->status) {
	default:
		dev->net->stats.tx_errors++;
		VDBG(dev, "tx err %d\n", req->status);
#ifdef CONFIG_USB_NCM_SUPPORT_MTU_CHANGE
		printk(KERN_ERR"usb:%s tx err %d\n",__func__, req->status);
#endif
		/* FALLTHROUGH */
	case -ECONNRESET:		/* unlink */
	case -ESHUTDOWN:		/* disconnect etc */
		break;
	case 0:
#ifdef CONFIG_USB_RNDIS_MULTIPACKET
		if (!req->zero)
			dev->net->stats.tx_bytes += req->length-1;
		else
			dev->net->stats.tx_bytes += req->length;
#else
		dev->net->stats.tx_bytes += skb->len;
#endif
	}
	dev->net->stats.tx_packets++;

	spin_lock(&dev->req_lock);
#ifdef CONFIG_USB_RNDIS_MULTIPACKET
	list_add_tail(&req->list, &dev->tx_reqs);

	if (dev->port_usb->multi_pkt_xfer) {
		dev->no_tx_req_used--;
		req->length = 0;
		in = dev->port_usb->in_ep;

		if (!list_empty(&dev->tx_reqs)) {
			new_req = container_of(dev->tx_reqs.next,
					struct usb_request, list);
			list_del(&new_req->list);
			spin_unlock(&dev->req_lock);
			if (new_req->length > 0) {
				length = new_req->length;

				/* NCM requires no zlp if transfer is
				 * dwNtbInMaxSize */
				if (dev->port_usb->is_fixed &&
					length == dev->port_usb->fixed_in_len &&
					(length % in->maxpacket) == 0)
					new_req->zero = 0;
				else
					new_req->zero = 1;

				/* use zlp framing on tx for strict CDC-Ether
				 * conformance, though any robust network rx
				 * path ignores extra padding. and some hardware
				 * doesn't like to write zlps.
				 */
				if (new_req->zero && !dev->zlp &&
						(length % in->maxpacket) == 0) {
					new_req->zero = 0;
					length++;
				}

				new_req->length = length;
				new_req->complete = tx_complete;
				retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
				switch (retval) {
				default:
					DBG(dev, "tx queue err %d\n", retval);
					new_req->length = 0;
					spin_lock(&dev->req_lock);
					list_add_tail(&new_req->list, &dev->tx_reqs);
					spin_unlock(&dev->req_lock);
					break;
				case 0:
					spin_lock(&dev->req_lock);
					dev->no_tx_req_used++;
					spin_unlock(&dev->req_lock);
					net->trans_start = jiffies;
				}
			} else {
				spin_lock(&dev->req_lock);
				list_add_tail(&new_req->list, &dev->tx_reqs);
				spin_unlock(&dev->req_lock);
			}
		} else {
static int
rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
    struct f_rndis		*rndis = func_to_rndis(f);
    struct usb_composite_dev *cdev = f->config->cdev;
    struct usb_request	*req = cdev->req;
    int			value = -EOPNOTSUPP;
    u16			w_index = le16_to_cpu(ctrl->wIndex);
    u16			w_value = le16_to_cpu(ctrl->wValue);
    u16			w_length = le16_to_cpu(ctrl->wLength);

    /* composite driver infrastructure handles everything except
     * CDC class messages; interface activation uses set_alt().
     */
    switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {

    /* RNDIS uses the CDC command encapsulation mechanism to implement
     * an RPC scheme, with much getting/setting of attributes by OID.
     */
    case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
            | USB_CDC_SEND_ENCAPSULATED_COMMAND:
        if (w_value || w_index != rndis->ctrl_id)
            goto invalid;
        /* read the request; process it later */
        value = w_length;
        req->complete = rndis_command_complete;
        req->context = rndis;
        /* later, rndis_response_available() sends a notification */
        break;

    case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
            | USB_CDC_GET_ENCAPSULATED_RESPONSE:
        if (w_value || w_index != rndis->ctrl_id)
            goto invalid;
        else {
            u8 *buf;
            u32 n;

            /* return the result */
            buf = rndis_get_next_response(rndis->config, &n);
            if (buf) {
                memcpy(req->buf, buf, n);
                req->complete = rndis_response_complete;
                req->context = rndis;
                rndis_free_response(rndis->config, buf);
                value = n;
            }
            /* else stalls ... spec says to avoid that */
        }
        break;

    default:
invalid:
        VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
             ctrl->bRequestType, ctrl->bRequest,
             w_value, w_index, w_length);
    }

    /* respond with data transfer or status phase? */
    if (value >= 0) {
        DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n",
            ctrl->bRequestType, ctrl->bRequest,
            w_value, w_index, w_length);
        req->zero = (value < w_length);
        req->length = value;
        value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
        if (value < 0)
            ERROR(cdev, "rndis response on err %d\n", value);
    }

    /* device either stalls (value < 0) or reports success */
    return value;
}
/*
 * Context: caller owns port_lock, and port_usb is set
 */
static unsigned gs_start_rx(struct gs_port *port)
/*
__releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
    struct list_head	*pool;
    struct usb_ep		*out;
    unsigned		started = 0;

    if (!port || !port->port_usb) {
        pr_err("Error - port or port->usb is NULL.");
        return -EIO;
    }

    pool = &port->read_pool;
    out  = port->port_usb->out;

    while (!list_empty(pool)) {
        struct usb_request	*req;
        int			status;
        struct tty_struct	*tty;

        /* no more rx if closed */
        tty = port->port.tty;
        if (!tty)
            break;

        if (port->read_started >= RX_QUEUE_SIZE)
            break;

        req = list_entry(pool->next, struct usb_request, list);
        list_del(&req->list);
        req->length = RX_BUF_SIZE;

        /* drop lock while we call out; the controller driver
         * may need to call us back (e.g. for disconnect)
         */
        spin_unlock(&port->port_lock);
        status = usb_ep_queue(out, req, GFP_ATOMIC);
        spin_lock(&port->port_lock);
        /*
         * If port_usb is NULL, gserial disconnect is called
         * while the spinlock is dropped and all requests are
         * freed. Free the current request here.
         */
        if (!port->port_usb) {
            started = 0;
            gs_free_req(out, req);
            break;
        }
        if (status) {
            pr_debug("%s: %s %s err %d\n",
                     __func__, "queue", out->name, status);
            list_add(&req->list, pool);
            break;
        }
        port->read_started++;

    }
    return port->read_started;
}
Example #10
0
/*
 * The setup() callback implements all the ep0 functionality that's
 * not handled lower down, in hardware or the hardware driver (like
 * device and endpoint feature flags, and their status).  It's all
 * housekeeping for the gadget function we're implementing.  Most of
 * the work is in config-specific setup.
 */
static int psfreedom_setup(struct usb_gadget *gadget,
    const struct usb_ctrlrequest *ctrl)
{
  struct psfreedom_device *dev = get_gadget_data(gadget);
  struct usb_request *req = dev->req;
  int value = -EOPNOTSUPP;
  u16 w_index = le16_to_cpu(ctrl->wIndex);
  u16 w_value = le16_to_cpu(ctrl->wValue);
  u16 w_length = le16_to_cpu(ctrl->wLength);
  u8 address = psfreedom_get_address (dev->gadget);
  unsigned long flags;
  u16 request = (ctrl->bRequestType << 8) | ctrl->bRequest;

  spin_lock_irqsave (&dev->lock, flags);
  VDBG (dev, "Setup called %d (0x%x) -- %d -- %d. Myaddr :%d\n", ctrl->bRequest,
      ctrl->bRequestType, w_value, w_index, address);

  req->zero = 0;

  /* Enable the timer if it's not already enabled */
  if (timer_added == 0)
    add_timer (&psfreedom_state_machine_timer);
  timer_added = 1;

  /* Set the address of the port */
  if (address)
    dev->port_address[dev->current_port] = address;

  /* Setup the hub or the devices */
  if (dev->current_port == 0)
    value = hub_setup (gadget, ctrl, request, w_index, w_value, w_length);
  else
    value = devices_setup (gadget, ctrl, request, w_index, w_value, w_length);

  if (no_delayed_switching) {
    if (dev->switch_to_port_delayed >= 0)
      switch_to_port (dev, dev->switch_to_port_delayed);
    dev->switch_to_port_delayed = -1;
  }

  DBG (dev, "%s Setup called %s (%d - %d) -> %d (w_length=%d)\n",
      STATUS_STR (dev->status),  REQUEST_STR (request), w_value, w_index,
      value, w_length);

  /* respond with data transfer before status phase? */
  if (value >= 0) {
    req->length = value;
    req->zero = value < w_length;
    value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
    if (value < 0) {
      DBG(dev, "ep_queue --> %d\n", value);
      req->status = 0;
      spin_unlock_irqrestore (&dev->lock, flags);
      psfreedom_setup_complete(gadget->ep0, req);
      return value;
    }
  }

  spin_unlock_irqrestore (&dev->lock, flags);
  /* device either stalls (value < 0) or reports success */
  return value;
}
Example #11
0
static int gser_notify(struct f_gser *gser, u8 type, u16 value,
		void *data, unsigned length)
{
	struct usb_ep			*ep = gser->notify;
	struct usb_request		*req;
	struct usb_cdc_notification	*notify;
#ifndef CONFIG_LGE_USB_GADGET_DRIVER
	const unsigned			len = sizeof(*notify) + length;
#endif
	void				*buf;
	int				status;
	struct usb_composite_dev *cdev = gser->port.func.config->cdev;

// [START] seunghun.kim : for LG_USB_DRIVER 2011.03.25
// [START] seunghun.kim : temp_modify for using LG_USB_MODEM on ES3 by f_serial
//#ifdef CONFIG_USB_SUPPORT_LGE_ANDROID_NMEA_FIX
#ifdef CONFIG_LGE_USB_GADGET_DRIVER
// [END] seunghun.kim : temp_modify for using LG_USB_MODEM on ES3 by f_serial
	unsigned char noti_buf[GS_NOTIFY_MAXPACKET];

	memset(noti_buf, 0, GS_NOTIFY_MAXPACKET);
#endif	
// [END] seunghun.kim : for LG_USB_DRIVER 2011.03.25
	req = gser->notify_req;
	gser->notify_req = NULL;
	gser->pending = false;

// [START] seunghun.kim : for LG_USB_DRIVER 2011.03.25
// [START] seunghun.kim : temp_modify for using LG_USB_MODEM on ES3 by f_serial
//#ifdef CONFIG_USB_SUPPORT_LGE_ANDROID_NMEA_FIX
#ifdef CONFIG_LGE_USB_GADGET_DRIVER
// [END] seunghun.kim : temp_modify for using LG_USB_MODEM on ES3 by f_serial
	req->length = GS_NOTIFY_MAXPACKET;
#else
// [END] seunghun.kim : for LG_USB_DRIVER 2011.03.25
	req->length = len;
// [START] seunghun.kim : for LG_USB_DRIVER 2011.03.25
#endif
// [END] seunghun.kim : for LG_USB_DRIVER 2011.03.25
	notify = req->buf;
	buf = notify + 1;

	notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
			| USB_RECIP_INTERFACE;
	notify->bNotificationType = type;
	notify->wValue = cpu_to_le16(value);
	notify->wIndex = cpu_to_le16(gser->data_id);
	notify->wLength = cpu_to_le16(length);
// [START] seunghun.kim : for LG_USB_DRIVER 2011.03.25
// [START] seunghun.kim : temp_modify for using LG_USB_MODEM on ES3 by f_serial
//#ifdef CONFIG_USB_SUPPORT_LGE_ANDROID_NMEA_FIX
#ifdef CONFIG_LGE_USB_GADGET_DRIVER
// [END] seunghun.kim : temp_modify for using LG_USB_MODEM on ES3 by f_serial
	memcpy(noti_buf, data, length);
	memcpy(buf, noti_buf, GS_NOTIFY_MAXPACKET);
#else
// [END] seunghun.kim : for LG_USB_DRIVER 2011.03.25
	memcpy(buf, data, length);
// [START] seunghun.kim : for LG_USB_DRIVER 2011.03.25
#endif
// [END] seunghun.kim : for LG_USB_DRIVER 2011.03.25

	status = usb_ep_queue(ep, req, GFP_ATOMIC);
	if (status < 0) {
		ERROR(cdev, "gser ttyGS%d can't notify serial state, %d\n",
				gser->port_num, status);
		gser->notify_req = req;
	}

	return status;
}
Example #12
0
static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
					struct net_device *net)
{
	struct eth_dev		*dev = netdev_priv(net);
	int			length = skb->len;
	int			retval;
	struct usb_request	*req = NULL;
	unsigned long		flags;
	struct usb_ep		*in;
	u16			cdc_filter;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb) {
		in = dev->port_usb->in_ep;
		cdc_filter = dev->port_usb->cdc_filter;
	} else {
		in = NULL;
		cdc_filter = 0;
	}
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!in) {
		dev_kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	
	if (!is_promisc(cdc_filter)) {
		u8		*dest = skb->data;

		if (is_multicast_ether_addr(dest)) {
			u16	type;

			if (is_broadcast_ether_addr(dest))
				type = USB_CDC_PACKET_TYPE_BROADCAST;
			else
				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
			if (!(cdc_filter & type)) {
				dev_kfree_skb_any(skb);
				return NETDEV_TX_OK;
			}
		}
		
	}

	spin_lock_irqsave(&dev->req_lock, flags);
	if (list_empty(&dev->tx_reqs)) {
		spin_unlock_irqrestore(&dev->req_lock, flags);
		return NETDEV_TX_BUSY;
	}

	req = container_of(dev->tx_reqs.next, struct usb_request, list);
	list_del(&req->list);

	
	if (list_empty(&dev->tx_reqs))
		netif_stop_queue(net);
	spin_unlock_irqrestore(&dev->req_lock, flags);

	if (dev->wrap) {
		unsigned long	flags;

		spin_lock_irqsave(&dev->lock, flags);
		if (dev->port_usb)
			skb = dev->wrap(dev->port_usb, skb);
		spin_unlock_irqrestore(&dev->lock, flags);
		if (!skb)
			goto drop;

		length = skb->len;
	}
	req->buf = skb->data;
	req->context = skb;
	req->complete = tx_complete;

	
	if (dev->port_usb->is_fixed &&
	    length == dev->port_usb->fixed_in_len &&
	    (length % in->maxpacket) == 0)
		req->zero = 0;
	else
		req->zero = 1;

	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
		length++;

	req->length = length;

	
	if (gadget_is_dualspeed(dev->gadget) &&
			 (dev->gadget->speed == USB_SPEED_HIGH)) {
		dev->tx_qlen++;
		if (dev->tx_qlen == qmult) {
			req->no_interrupt = 0;
			dev->tx_qlen = 0;
		} else {
			req->no_interrupt = 1;
		}
	} else {
		req->no_interrupt = 0;
	}

	retval = usb_ep_queue(in, req, GFP_ATOMIC);
	switch (retval) {
	default:
		DBG(dev, "tx queue err %d\n", retval);
		break;
	case 0:
		net->trans_start = jiffies;
	}

	if (retval) {
		dev_kfree_skb_any(skb);
drop:
		dev->net->stats.tx_dropped++;
		spin_lock_irqsave(&dev->req_lock, flags);
		if (list_empty(&dev->tx_reqs))
			netif_start_queue(net);
		list_add(&req->list, &dev->tx_reqs);
		spin_unlock_irqrestore(&dev->req_lock, flags);
	}
	return NETDEV_TX_OK;
}
Example #13
0
int rawbulk_function_setup(struct usb_function *f, const struct
        usb_ctrlrequest *ctrl) {
    struct rawbulk_function *fn = function_to_rbf(f);
    unsigned int  setdtr = 0;
    unsigned int data_connect = 0;
    struct usb_composite_dev *cdev = f->config->cdev;
    struct usb_request *req = cdev->req;
    int         value = -EOPNOTSUPP;
    u16         w_index = le16_to_cpu(ctrl->wIndex);
    u16         w_value = le16_to_cpu(ctrl->wValue);
    u16         w_length = le16_to_cpu(ctrl->wLength);

    C2K_NOTE("%s\n", __func__);

    
   if(ctrl->bRequest) {
         C2K_NOTE("ctrl->bRequestType = %0x  ctrl->bRequest = %0x \n", ctrl->bRequestType, ctrl->bRequest);
   }
   switch(ctrl->bRequest) {
        case 0x01:
            if(ctrl->bRequestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) {//0x40
                // set/clear DTR 
                C2K_NOTE("setdtr = %d, w_value =%d\n", setdtr, w_value);
                if(fn->activated){
                    setdtr = w_value & 0x01;                 
                    //schedule_work(&flow_control);
                    modem_dtr_set(setdtr, 0);
                    modem_dcd_state();
                }                                 
                value = 0;
            }
            break;
        case 0x02:
            if(ctrl->bRequestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {//0xC0
                // DSR | CD109 
                //schedule_work(&dtr_status);
                data_connect = modem_dcd_state();
                //modem_dtr_query(&data_connect, 0);
                if(fn->activated) {                   
                    if(data_connect && fn->enable) {
                        *((unsigned char *)req->buf) = 0x3;
                        C2K_NOTE("connect %d\n", data_connect);
                                            
                    }
                    else {
                        *((unsigned char *)req->buf) = 0x2;
                        C2K_NOTE("disconnect=%d, setdtr=%d\n", data_connect, setdtr);
                    }              
                }
                else //set CD CSR state to 0 if modem bypass not inactive
                    *((unsigned char *)req->buf) = 0x0;
                value = 1;
            }
            break;
        case 0x03:
            if(ctrl->bRequestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) {//0x40
                // xcvr 
                C2K_NOTE("CTRL SET XCVR 0x%02x\n", w_value);
                value = 0;
            }
            break;
        case 0x04:
            if((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
                if(ctrl->bRequestType & USB_DIR_IN) {//0xC0
                    // return ID
                    sprintf(req->buf, "CBP_8.2");
                    value = 1;
                } else {//0x40
                    value = 0;
                }
            }
            break;
        case 0x05:
            if(ctrl->bRequestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {//0xC0
                // connect status 
                C2K_NOTE("CTRL CONNECT STATUS\n");
                *((unsigned char *)req->buf) = 0x0;
                value = 1;
            }
            break;
        default:
            C2K_NOTE("invalid control req%02x.%02x v%04x i%04x l%d\n",
                    ctrl->bRequestType, ctrl->bRequest,
                    w_value, w_index, w_length);
    }

    // respond with data transfer or status phase? 
       if (value >= 0) {
        req->zero = 0;
        req->length = value;
        req->complete = simple_setup_complete;
        value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
        if (value < 0)
            printk(KERN_ERR "response err %d\n", value);
        }

    // device either stalls (value < 0) or reports success 
    return value;

}
/*
 * gs_start_tx
 *
 * This function finds available write requests, calls
 * gs_send_packet to fill these packets with data, and
 * continues until either there are no more write requests
 * available or no more data to send.  This function is
 * run whenever data arrives or write requests are available.
 *
 * Context: caller owns port_lock; port_usb is non-null.
 */
static int gs_start_tx(struct gs_port *port)
/*
__releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
	struct list_head	*pool = &port->write_pool;
	struct usb_ep		*in = port->port_usb->in;
	int			status = 0;
	static long 		prev_len;
	bool			do_tty_wake = false;

	while (!list_empty(pool)) {
		struct usb_request	*req;
		int			len;

		req = list_entry(pool->next, struct usb_request, list);
		len = gs_send_packet(port, req->buf, TX_BUF_SIZE);
		if (len == 0) {
			/* Queue zero length packet */
#if 1 //QCT_SBA
			if (prev_len && (prev_len % in->maxpacket == 0)) {
#else
			if (prev_len & (prev_len % in->maxpacket == 0)) {
#endif
				req->length = 0;
				list_del(&req->list);

				spin_unlock(&port->port_lock);
				status = usb_ep_queue(in, req, GFP_ATOMIC);
				spin_lock(&port->port_lock);
				if (!port->port_usb) {
					gs_free_req(in, req);
					break;
				}
				if (status) {
					printk(KERN_ERR "%s: %s err %d\n",
					__func__, "queue", status);
					list_add(&req->list, pool);
				}
				prev_len = 0;
			}
			wake_up_interruptible(&port->drain_wait);
			break;
		}
		do_tty_wake = true;

		req->length = len;
		list_del(&req->list);

		pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
				port->port_num, len, *((u8 *)req->buf),
				*((u8 *)req->buf+1), *((u8 *)req->buf+2));

		/* Drop lock while we call out of driver; completions
		 * could be issued while we do so.  Disconnection may
		 * happen too; maybe immediately before we queue this!
		 *
		 * NOTE that we may keep sending data for a while after
		 * the TTY closed (dev->ioport->port_tty is NULL).
		 */
		spin_unlock(&port->port_lock);
		status = usb_ep_queue(in, req, GFP_ATOMIC);
		spin_lock(&port->port_lock);
		/*
		 * If port_usb is NULL, gserial disconnect is called
		 * while the spinlock is dropped and all requests are
		 * freed. Free the current request here.
		 */
		if (!port->port_usb) {
			do_tty_wake = false;
			gs_free_req(in, req);
			break;
		}
		if (status) {
			pr_debug("%s: %s %s err %d\n",
					__func__, "queue", in->name, status);
			list_add(&req->list, pool);
			break;
		}
		prev_len = req->length;

	}

	if (do_tty_wake && port->port_tty)
		tty_wakeup(port->port_tty);
	return status;
}

/*
 * Context: caller owns port_lock, and port_usb is set
 */
static unsigned gs_start_rx(struct gs_port *port)
/*
__releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
	struct list_head	*pool = &port->read_pool;
	struct usb_ep		*out = port->port_usb->out;
	unsigned		started = 0;

	while (!list_empty(pool)) {
		struct usb_request	*req;
		int			status;
		struct tty_struct	*tty;

		/* no more rx if closed */
		tty = port->port_tty;
		if (!tty)
			break;

		req = list_entry(pool->next, struct usb_request, list);
		list_del(&req->list);
		req->length = RX_BUF_SIZE;

		/* drop lock while we call out; the controller driver
		 * may need to call us back (e.g. for disconnect)
		 */
		spin_unlock(&port->port_lock);
		status = usb_ep_queue(out, req, GFP_ATOMIC);
		spin_lock(&port->port_lock);
		/*
		 * If port_usb is NULL, gserial disconnect is called
		 * while the spinlock is dropped and all requests are
		 * freed. Free the current request here.
		 */
		if (!port->port_usb) {
			started = 0;
			gs_free_req(out, req);
			break;
		}
		if (status) {
			pr_debug("%s: %s %s err %d\n",
					__func__, "queue", out->name, status);
			list_add(&req->list, pool);
			break;
		}
		started++;

	}
	return started;
}

/*
 * RX work queue takes data out of the RX queue and hands it up to the TTY
 * layer until it refuses to take any more data (or is throttled back).
 * Then it issues reads for any further data.
 *
 * If the RX queue becomes full enough that no usb_request is queued,
 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
 * can be buffered before the TTY layer's buffers (currently 64 KB).
 */
static void gs_rx_push(struct work_struct *w)
{
	struct gs_port		*port = container_of(w, struct gs_port, push);
	struct tty_struct	*tty;
	struct list_head	*queue = &port->read_queue;
	bool			disconnect = false;
	bool			do_push = false;

	/* hand any queued data to the tty */
	spin_lock_irq(&port->port_lock);
	tty = port->port_tty;
	while (!list_empty(queue)) {
		struct usb_request	*req;

		req = list_first_entry(queue, struct usb_request, list);

		/* discard data if tty was closed */
		if (!tty)
			goto recycle;

		/* leave data queued if tty was rx throttled */
		if (test_bit(TTY_THROTTLED, &tty->flags))
			break;

		switch (req->status) {
		case -ESHUTDOWN:
			disconnect = true;
			pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
			break;

		default:
			/* presumably a transient fault */
			pr_warning(PREFIX "%d: unexpected RX status %d\n",
					port->port_num, req->status);
			/* FALLTHROUGH */
		case 0:
			/* normal completion */
			break;
		}

		/* push data to (open) tty */
		if (req->actual) {
			char		*packet = req->buf;
			unsigned	size = req->actual;
			unsigned	n;
			int		count;

			/* we may have pushed part of this packet already... */
			n = port->n_read;
			if (n) {
				packet += n;
				size -= n;
			}

			count = tty_insert_flip_string(tty, packet, size);
			if (count)
				do_push = true;
			if (count != size) {
				/* stop pushing; TTY layer can't handle more */
				port->n_read += count;
				pr_vdebug(PREFIX "%d: rx block %d/%d\n",
						port->port_num,
						count, req->actual);
				break;
			}
			port->n_read = 0;
		}
recycle:
		list_move(&req->list, &port->read_pool);
	}

	/* Push from tty to ldisc; this is immediate with low_latency, and
	 * may trigger callbacks to this driver ... so drop the spinlock.
	 */
	if (tty && do_push) {
		spin_unlock_irq(&port->port_lock);
		tty_flip_buffer_push(tty);
		wake_up_interruptible(&tty->read_wait);
		spin_lock_irq(&port->port_lock);

		/* tty may have been closed */
		tty = port->port_tty;
	}


	/* We want our data queue to become empty ASAP, keeping data
	 * in the tty and ldisc (not here).  If we couldn't push any
	 * this time around, there may be trouble unless there's an
	 * implicit tty_unthrottle() call on its way...
	 *
	 * REVISIT we should probably add a timer to keep the work queue
	 * from starving ... but it's not clear that case ever happens.
	 */
	if (!list_empty(queue) && tty) {
		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
			if (do_push)
				queue_work(gserial_wq, &port->push);
			else
				pr_warning(PREFIX "%d: RX not scheduled?\n",
					port->port_num);
		}
	}

	/* If we're still connected, refill the USB RX queue. */
	if (!disconnect && port->port_usb)
		gs_start_rx(port);

	spin_unlock_irq(&port->port_lock);
}

static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct gs_port	*port = ep->driver_data;
	unsigned long flags;

	/* Queue all received data until the tty layer is ready for it. */
	spin_lock_irqsave(&port->port_lock, flags);
	list_add_tail(&req->list, &port->read_queue);
	queue_work(gserial_wq, &port->push);
	spin_unlock_irqrestore(&port->port_lock, flags);
}

static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct gs_port	*port = ep->driver_data;
	unsigned long flags;

	spin_lock_irqsave(&port->port_lock, flags);
	list_add(&req->list, &port->write_pool);

	switch (req->status) {
	default:
		/* presumably a transient fault */
		pr_warning("%s: unexpected %s status %d\n",
				__func__, ep->name, req->status);
		/* FALL THROUGH */
	case 0:
		/* normal completion */
		if (port->port_usb)
			gs_start_tx(port);
		break;

	case -ESHUTDOWN:
		/* disconnect */
		pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
		break;
	}

	spin_unlock_irqrestore(&port->port_lock, flags);
}

static void gs_free_requests(struct usb_ep *ep, struct list_head *head)
{
	struct usb_request	*req;

	while (!list_empty(head)) {
		req = list_entry(head->next, struct usb_request, list);
		list_del(&req->list);
		gs_free_req(ep, req);
	}
}

static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
		int num, int size,
		void (*fn)(struct usb_ep *, struct usb_request *))
{
	int			i;
	struct usb_request	*req;

	/* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
	 * do quite that many this time, don't fail ... we just won't
	 * be as speedy as we might otherwise be.
	 */
	for (i = 0; i < num; i++) {
		req = gs_alloc_req(ep, size, GFP_ATOMIC);
		if (!req)
			return list_empty(head) ? -ENOMEM : 0;
		req->complete = fn;
		list_add_tail(&req->list, head);
	}
	return 0;
}
Example #15
0
static ssize_t mtp_read(struct file *fp, char __user *buf,
				size_t count, loff_t *pos)
{
	struct usb_request *req = 0;
	int xfer, rc = count;
	int ret;

	while (count > 0) {
		mtp_debug("count=%d\n", count);
		if (g_usb_mtp_context.error)
			return -EIO;

		/* we will block until we're online */
		ret = wait_event_interruptible(g_usb_mtp_context.rx_wq,
			(g_usb_mtp_context.online || g_usb_mtp_context.cancel));
		if (g_usb_mtp_context.cancel) {
			mtp_debug("cancel return in mtp_read at beginning\n");
			g_usb_mtp_context.cancel = 0;
			return -EINVAL;
		}
		if (ret < 0) {
			mtp_err("wait_event_interruptible return %d\n", ret);
			rc = ret;
			break;
		}

		/* if we have idle read requests, get them queued */
		while (1) {
			req = req_get(&g_usb_mtp_context.rx_reqs);
			if (!req)
				break;
requeue_req:
			req->length = BULK_BUFFER_SIZE;
			mtp_debug("rx %p queue\n", req);
			ret = usb_ep_queue(g_usb_mtp_context.bulk_out,
				req, GFP_ATOMIC);

			if (ret < 0) {
				mtp_err("queue error %d\n", ret);
				g_usb_mtp_context.error = 1;
				req_put(&g_usb_mtp_context.rx_reqs, req);
				return ret;
			}
		}

		/* if we have data pending, give it to userspace */
		if (g_usb_mtp_context.data_len > 0) {
			if (g_usb_mtp_context.data_len < count)
				xfer = g_usb_mtp_context.data_len;
			else
				xfer = count;

			if (copy_to_user(buf, g_usb_mtp_context.read_buf,
								xfer)) {
				rc = -EFAULT;
				break;
			}
			g_usb_mtp_context.read_buf += xfer;
			g_usb_mtp_context.data_len -= xfer;
			buf += xfer;
			count -= xfer;
			mtp_debug("xfer=%d\n", xfer);

			/* if we've emptied the buffer, release the request */
			if (g_usb_mtp_context.data_len == 0) {
				req_put(&g_usb_mtp_context.rx_reqs,
						g_usb_mtp_context.cur_read_req);
				g_usb_mtp_context.cur_read_req = 0;
			}
			continue;
		}

		/* wait for a request to complete */
		req = 0;
		mtp_debug("wait req finish\n");
		ret = wait_event_interruptible(g_usb_mtp_context.rx_wq,
		((req = req_get(&g_usb_mtp_context.rx_done_reqs))
			|| g_usb_mtp_context.cancel));
		mtp_debug("req finished\n");
		if (g_usb_mtp_context.cancel) {
			if (req != 0)
				req_put(&g_usb_mtp_context.rx_reqs, req);
			mtp_debug("cancel return in mtp_read at complete\n");
			g_usb_mtp_context.cancel = 0;
			return -EINVAL;
		}
		if (ret < 0) {
			mtp_err("wait_event_interruptible(2) return %d\n", ret);
			rc = ret;
			break;
		}
		if (req != 0) {
			/* if we got a 0-len one we need to put it back into
			** service.  if we made it the current read req we'd
			** be stuck forever
			*/
			if (req->actual == 0)
				goto requeue_req;

			g_usb_mtp_context.cur_read_req = req;
			g_usb_mtp_context.data_len = req->actual;
			g_usb_mtp_context.read_buf = req->buf;
			mtp_debug("rx %p done actual=%d\n", req, req->actual);
		}
	}

	mtp_debug("mtp_read returning %d\n", rc);
	return rc;
}
Example #16
0
static int ecm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
	struct f_ecm		*ecm = func_to_ecm(f);
	struct usb_composite_dev *cdev = f->config->cdev;
	struct usb_request	*req = cdev->req;
	int			value = -EOPNOTSUPP;
	u16			w_index = le16_to_cpu(ctrl->wIndex);
	u16			w_value = le16_to_cpu(ctrl->wValue);
	u16			w_length = le16_to_cpu(ctrl->wLength);

	/* composite driver infrastructure handles everything except
	 * CDC class messages; interface activation uses set_alt().
	 */
	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
			| USB_CDC_SET_ETHERNET_PACKET_FILTER:
		/* see 6.2.30: no data, wIndex = interface,
		 * wValue = packet filter bitmap
		 */
		if (w_length != 0 || w_index != ecm->ctrl_id)
			goto invalid;
		DBG(cdev, "packet filter %02x\n", w_value);
		/* REVISIT locking of cdc_filter.  This assumes the UDC
		 * driver won't have a concurrent packet TX irq running on
		 * another CPU; or that if it does, this write is atomic...
		 */
		ecm->port.cdc_filter = w_value;
		value = 0;
		break;

	/* and optionally:
	 * case USB_CDC_SEND_ENCAPSULATED_COMMAND:
	 * case USB_CDC_GET_ENCAPSULATED_RESPONSE:
	 * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
	 * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
	 * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
	 * case USB_CDC_GET_ETHERNET_STATISTIC:
	 */

	default:
invalid:
		DBG(cdev, "f_ecm : invalid control req%02x.%02x v%04x i%04x l%d\n",
			ctrl->bRequestType, ctrl->bRequest,
			w_value, w_index, w_length);
	}

	/* respond with data transfer or status phase? */
	if (value >= 0) {
		DBG(cdev, "ecm req%02x.%02x v%04x i%04x l%d\n",
			ctrl->bRequestType, ctrl->bRequest,
			w_value, w_index, w_length);
		req->zero = 0;
		req->length = value;
		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
		if (value < 0)
			ERROR(cdev, "ecm req %02x.%02x response err %d\n",
					ctrl->bRequestType, ctrl->bRequest,
					value);
	}

	/* device either stalls (value < 0) or reports success */
	return value;
}
Example #17
0
static ssize_t mtp_write(struct file *fp, const char __user *buf,
				 size_t count, loff_t *pos)
{
	struct usb_request *req;
	int rc = count, xfer;
	int ret;

	while (count > 0) {
		mtp_debug("count=%d\n", count);
		if (g_usb_mtp_context.error)
			return -EIO;

		/* get an idle tx request to use */
		ret = wait_event_interruptible(g_usb_mtp_context.tx_wq,
			(g_usb_mtp_context.online || g_usb_mtp_context.cancel));

		if (g_usb_mtp_context.cancel) {
			mtp_debug("cancel return in mtp_write at beginning\n");
			g_usb_mtp_context.cancel = 0;
			return -EINVAL;
		}
		if (ret < 0) {
			mtp_err("wait_event_interruptible return %d\n", ret);
			rc = ret;
			break;
		}

		req = 0;
		mtp_debug("get tx req\n");
		ret = wait_event_interruptible(g_usb_mtp_context.tx_wq,
			((req = req_get(&g_usb_mtp_context.tx_reqs))
			 || g_usb_mtp_context.cancel));

		mtp_debug("got tx req\n");
		if (g_usb_mtp_context.cancel) {
			mtp_debug("cancel return in mtp_write get req\n");
			if (req != 0)
				req_put(&g_usb_mtp_context.tx_reqs, req);
			g_usb_mtp_context.cancel = 0;
			return -EINVAL;
		}
		if (ret < 0) {
			mtp_err("wait_event_interruptible return(2) %d\n", ret);
			rc = ret;
			break;
		}

		if (req != 0) {
			if (count > BULK_BUFFER_SIZE)
				xfer = BULK_BUFFER_SIZE;
			else
				xfer = count;
			if (copy_from_user(req->buf, buf, xfer)) {
				req_put(&g_usb_mtp_context.tx_reqs, req);
				rc = -EFAULT;
				break;
			}

			req->length = xfer;
			ret = usb_ep_queue(g_usb_mtp_context.bulk_in,
				req, GFP_ATOMIC);
			if (ret < 0) {
				mtp_err("error %d\n", ret);
				g_usb_mtp_context.error = 1;
				req_put(&g_usb_mtp_context.tx_reqs, req);
				rc = ret;
				break;
			}

			buf += xfer;
			count -= xfer;
			mtp_debug("xfer=%d\n", xfer);
		}
	}

	mtp_debug("mtp_write returning %d\n", rc);
	return rc;
}
Example #18
0
/*
 * gs_start_tx
 *
 * This function finds available write requests, calls
 * gs_send_packet to fill these packets with data, and
 * continues until either there are no more write requests
 * available or no more data to send.  This function is
 * run whenever data arrives or write requests are available.
 *
 * Context: caller owns port_lock; port_usb is non-null.
 */
static int gs_start_tx(struct gs_port *port)
/*
__releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
	struct list_head	*pool = &port->write_pool;
	//struct usb_ep		*in = port->port_usb->in;
	struct usb_ep		*in;
	int			status = 0;
	bool			do_tty_wake = false;

	if (!port->port_usb) /* abort immediately after disconnect */
        return -EINVAL;
    in = port->port_usb->in;

	while (!list_empty(pool)) {
		struct usb_request	*req;
		int			len;

		if (port->write_started >= QUEUE_SIZE)
			break;

		req = list_entry(pool->next, struct usb_request, list);
		len = gs_send_packet(port, req->buf, in->maxpacket);
		if (len == 0) {
			wake_up_interruptible(&port->drain_wait);
			break;
		}
		do_tty_wake = true;

		req->length = len;
		list_del(&req->list);
		req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);

		pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
				port->port_num, len, *((u8 *)req->buf),
				*((u8 *)req->buf+1), *((u8 *)req->buf+2));

		USB_LOGGER(GS_START_TX, GS_START_TX, port->port_num, len);

		#define OUTPUT_BTYE_NUM 5
		{
			int i,j = 0;
			char* prefix[] = {"p1","p2","p3","p4","p5"};
			char* suffix[] = {"s1","s2","s3","s4","s5"};
			for (i = 0; i < req->actual && i < OUTPUT_BTYE_NUM; i++)
				USB_LOGGER(HEX_NUM, GS_START_TX, prefix[i], *((u8 *)req->buf+i));

			if (req->actual >= OUTPUT_BTYE_NUM*2) {
				for(i = req->actual-1, j = 1; i >= (req->actual - OUTPUT_BTYE_NUM) \
					&& i >= OUTPUT_BTYE_NUM; i--,j++) {
					USB_LOGGER(HEX_NUM, GS_START_TX, suffix[OUTPUT_BTYE_NUM-j], \
							*((u8 *)req->buf+i));
				}
			}
		}

		/* Drop lock while we call out of driver; completions
		 * could be issued while we do so.  Disconnection may
		 * happen too; maybe immediately before we queue this!
		 *
		 * NOTE that we may keep sending data for a while after
		 * the TTY closed (dev->ioport->port_tty is NULL).
		 */
		spin_unlock(&port->port_lock);
		status = usb_ep_queue(in, req, GFP_ATOMIC);
		spin_lock(&port->port_lock);

		if (status) {
			pr_debug("%s: %s %s err %d\n",
					__func__, "queue", in->name, status);
			list_add(&req->list, pool);
			break;
		}

		port->write_started++;

		/* abort immediately after disconnect */
		if (!port->port_usb)
			break;
	}

	if (do_tty_wake && port->port_tty)
		tty_wakeup(port->port_tty);
	return status;
}
Example #19
0
static ssize_t mtp_ctl_write(struct file *file, const char *buf,
	size_t count, loff_t *pos)
{
    struct mtp_ctl_msg_header msg;
	struct usb_request *req = NULL;
	struct usb_ep *ep0;
	int ret;

	mtp_debug("count=%d\n", count);

	ret = wait_event_interruptible(g_usb_mtp_context.ctl_tx_wq,
		(g_usb_mtp_context.online || g_usb_mtp_context.ctl_cancel));
	if (g_usb_mtp_context.ctl_cancel) {
		mtp_debug("ctl_cancel return in mtp_ctl_write 1\n");
		g_usb_mtp_context.ctl_cancel = 0;
		return -EINVAL;
	}
	if (ret < 0)
		return ret;

	ep0 = g_usb_mtp_context.cdev->gadget->ep0;
    if (count > ep0->maxpacket || count < MTP_CTL_MSG_HEADER_SIZE) {
		mtp_err("size invalid\n");
		return -ENOMEM;
    }

    /* msg info */
    if (copy_from_user(&msg, buf, MTP_CTL_MSG_HEADER_SIZE))
		return -EINVAL;

    mtp_debug("msg len = %d, msg id = %d", msg.msg_len, msg.msg_id);
    if (msg.msg_id != MTP_CTL_CLASS_REPLY) {
		mtp_err("invalid id %d", msg.msg_id);
		return -EINVAL;
    }

    /* sending the data */
	req = g_usb_mtp_context.ctl_tx_req;
	if (!req)
		return -ENOMEM;
    req->length = count - MTP_CTL_MSG_HEADER_SIZE;
	req->complete = mtp_ctl_write_complete;
    if (copy_from_user(req->buf,
		(u8 *)buf + MTP_CTL_MSG_HEADER_SIZE, req->length)) {
		return -EINVAL;
	}
	ctl_tx_done = 0;
	if (usb_ep_queue(ep0, req, GFP_ATOMIC)) {
		req->status = 0;
		mtp_ctl_write_complete(ep0, req);
		return -EIO;
	}
	ret = wait_event_interruptible(g_usb_mtp_context.ctl_tx_wq,
		(ctl_tx_done || g_usb_mtp_context.ctl_cancel));
	ctl_tx_done = 0;
	if (g_usb_mtp_context.ctl_cancel) {
		mtp_debug("ctl_cancel return in mtp_ctl_write\n");
		g_usb_mtp_context.ctl_cancel = 0;
		return -EINVAL;
	}
	if (ret < 0)
		return ret;

	mtp_debug("return count=%d\n", count);
    return count;
}
static int mtp_function_setup(struct usb_function *f,
					const struct usb_ctrlrequest *ctrl)
{
	int	value = -EOPNOTSUPP;
	u16     wIndex = le16_to_cpu(ctrl->wIndex);
	u16     wLength = le16_to_cpu(ctrl->wLength);
	struct usb_composite_dev *cdev = f->config->cdev;
	struct usb_request	*req = cdev->req;

//    int result = -EOPNOTSUPP;

	mtp_debug("bRequestType=0x%x bRequest=0x%x wIndex=0x%x wLength=0x%x\n",
		ctrl->bRequestType, ctrl->bRequest, wIndex, wLength);

	switch (ctrl->bRequestType & USB_TYPE_MASK) {
	case USB_TYPE_VENDOR:
		switch (ctrl->bRequest) {
		case MTP_MOD_VENDOR_CODE:
			if (wIndex == mtp_ext_id) {
				memcpy(req->buf, mtp_ext_desc,
						sizeof(mtp_ext_desc));
				if (wLength < mtp_ext_desc[0])
					value = wLength;
				else
					value = mtp_ext_desc[0];

				req->zero = 0;
				req->length = value;
				if (usb_ep_queue(cdev->gadget->ep0, req,
					GFP_ATOMIC))
					mtp_err("ep0 in queue failed\n");
			}
			break;
		default:
			break;
		}
		break;
	case USB_TYPE_CLASS:
		switch (ctrl->bRequest) {
		case MTP_CLASS_CANCEL_REQ:
		case MTP_CLASS_GET_EXTEND_EVEVT_DATA:
		case MTP_CLASS_RESET_REQ:
		case MTP_CLASS_GET_DEVICE_STATUS:
#ifdef CONFIG_LGE_USB_GADGET_MTP_DRIVER
         g_bRequest = ctrl->bRequest;

		 if(g_bRequest == MTP_CLASS_CANCEL_REQ)
		 {
		   lg_mtp_debug("LG_FW : MTP CANCEL Request PC => Device!!\n");
		   cancel_noti = 1;
		 }
		 else if(g_bRequest == MTP_CLASS_GET_DEVICE_STATUS)
		 {
		   lg_mtp_debug("LG_FW : MTP GET DEVICE Request PC => Device!!\n");
		 }
#endif
			mtp_debug("ctl request=0x%x\n", ctrl->bRequest);

			value = 0;
			if ((ctrl->bRequest  == MTP_CLASS_CANCEL_REQ)
				&& wLength == MTP_CANCEL_REQ_DATA_SIZE) {
				value = wLength;
				req->zero = 0;
				req->length = wLength;

                lg_mtp_debug("LG_FW : MTP Cancel Request Length [%d] \n", wLength);
				if (usb_ep_queue(cdev->gadget->ep0,
						req, GFP_ATOMIC)) {
					mtp_err("ep0 out queue failed\n");
				}
			} 
			break;

		default:
			break;
		}
	}

	mtp_debug("return value=%d\n", value);
	return value;

}
Example #21
0
void handle_cmd(struct usb_ep *ep,struct usb_request *req)
{
	struct cloner *cloner = req->context;
	if(req->status == -ECONNRESET) {
		cloner->ack = -ECONNRESET;
		return;
	}

	if (req->actual != req->length) {
		printf("cmd transfer length is err req->actual = %d, req->length = %d\n",
				req->actual,req->length);
		cloner->ack = -EIO;
		return;
	}

	union cmd *cmd = req->buf;
	debug_cond(BURNNER_DEBUG,"handle_cmd type=%x\n",cloner->cmd_type);
	switch(cloner->cmd_type) {
		case VR_UPDATE_CFG:
			cloner->args_req->length = cmd->update.length;
			usb_ep_queue(cloner->ep_out, cloner->args_req, 0);
			break;
		case VR_WRITE:
			realloc_buf(cloner, cmd->write.length);
			cloner->write_req->length = cmd->write.length;
			usb_ep_queue(cloner->ep_out, cloner->write_req, 0);
			break;
		case VR_INIT:
			if(!cloner->inited) {
				cloner->ack = -EBUSY;
				cloner_init(cloner);
				cloner->inited = 1;
				cloner->ack = 0;
			}
			break;
		case VR_READ:
			handle_read(cloner);
			break;
		case VR_GET_CRC:
			if (!cloner->ack)
				usb_ep_queue(cloner->ep_in, cloner->read_req, 0);
			break;
		case VR_SYNC_TIME:
			cloner->ack = rtc_set(&cloner->cmd->rtc);
			break;
		case VR_CHECK:
			cloner->ack = handle_check(cloner);
			break;
	    case VR_GET_CHIP_ID:
		case VR_GET_USER_ID:
		case VR_GET_ACK:
		case VR_GET_CPU_INFO:
		case VR_SET_DATA_ADDR:
		case VR_SET_DATA_LEN:
			break;
		case VR_REBOOT:
#ifdef CONFIG_FPGA
			mdelay(1000);
			do_udc_reset();
			mdelay(10000);
#endif
			do_reset(NULL,0,0,NULL);
			break;
		case VR_POWEROFF:
			burner_set_reset_tag();
			do_reset(NULL,0,0,NULL);
			break;
	}
}
static int mtp_ioctl(struct inode *inode, struct file *file,
		unsigned int cmd, unsigned long arg)
{   
  u16 g_mtp_get_status;    
  struct usb_request	*req =g_usb_mtp_context.cdev->req;
  struct usb_ep *ep0;
  long ret;
  int usbconnect = 0;

/* LGE_CHANGES_S [[email protected]] 2010-09-18, Depense code for Null pointer access */

  /* When mtp_enable_open()/release()(in android.c) is invoked,
   * mtp_enable_flag is set/cleared. If enable flag is false(mtp is off),
   * we cut off the user's ioctl request.
   */
  if (!mtp_enable_flag)
	  return -ENODEV;

  if (product_id != lg_mtp_pid) {
	  printk(KERN_INFO "not MTP pid\n");
	  return -EFAULT;
  }

  if (g_usb_mtp_context.cdev->gadget == NULL) {
	  return -EFAULT;
  }

  ep0 = g_usb_mtp_context.cdev->gadget->ep0;

/* LGE_CHANGES_E [[email protected]]  */


  switch (cmd)  
  {      	       
	case USB_MTP_FUNC_IOC_CONTROL_REQUEST_GET:
		
      if(g_bRequest==MTP_CLASS_CANCEL_REQ||
        g_bRequest==MTP_CLASS_RESET_REQ||
        g_bRequest==MTP_CLASS_GET_DEVICE_STATUS
        )
      {
         mtp_debug("USB_MTP_FUNC_IOC_CONTROL_REQUEST_GET status = %d\n", g_bRequest);
         ret = copy_to_user ((void __user *)arg, &g_bRequest, sizeof(g_bRequest));
		 
		 if(g_bRequest == MTP_CLASS_CANCEL_REQ)
		 {
		   lg_mtp_debug("LG_FW : MTP CANCEL Request Device => App !!\n");
		   cancel_noti = 1;
		 }
		 else if(g_bRequest == MTP_CLASS_GET_DEVICE_STATUS)
		 {
		   lg_mtp_debug("LG_FW : MTP GET DEVICE Request Device => App!!\n");
		 }
      }
      else
      {
         mtp_debug("USB_MTP_FUNC_IOC_OTHER_CONTROL_REQUEST_GET status = %d\n", g_bRequest);
         usbconnect = mtp_get_usb_state();
         if(usbconnect == 0)
         {
           g_bRequest = MTP_OFFLINE; //offline
           ret = copy_to_user ((void __user *)arg, &g_bRequest, sizeof(g_bRequest));
         }
         else
         {
           if(g_usb_mtp_context.online == 1)
           {
              g_bRequest = MTP_ONLINE;//online
              ret = copy_to_user ((void __user *)arg, &g_bRequest, sizeof(g_bRequest));
           }
           else
           {
              g_bRequest = MTP_UNKOWN; //unkown
              ret = copy_to_user ((void __user *)arg, &g_bRequest, sizeof(g_bRequest));
           }
         }
      }
	  
	   g_bRequest = MTP_NO_INIT_STATUS;
	   
      if(ret >= 0)
        return ret;
      else
        return -EFAULT;   
	  break;
		
	case USB_MTP_FUNC_IOC_GET_DEVICE_STATUS_SET:
		 mtp_debug("USB_MTP_FUNC_IOC_GET_DEVICE_STATUS_SET status = %d\n", g_bRequest);
         ret = copy_from_user (&g_mtp_get_status, (void __user *)arg, sizeof(g_mtp_get_status));
         if(ret < 0)
           return -EFAULT;

         if(req == NULL)
         {
           mtp_debug("LG_FW :: req is NULL");
           return -EFAULT;
         }
		 lg_mtp_debug("LG_FW : MTP SET DEVICE STATUS App => Device [0x%x]!!\n",arg);
         *((u16 *)(req->buf)) = 0x0004;
         *((u16 *)(req->buf + 2)) = arg;
		 req->zero = 0;
		 req->length = 6;
         usb_ep_queue(ep0,req, GFP_ATOMIC);
		 
		 if(arg == 0x2001)
		 {
		   cancel_noti = 0;
		 }
		 break;
	                          
    default :
      mtp_debug("Invalid IOCTL  Processed!!\n");
      break; 
 	}  
  
	return 0;  
}
Example #23
0
/*
 * gs_start_tx
 *
 * This function finds available write requests, calls
 * gs_send_packet to fill these packets with data, and
 * continues until either there are no more write requests
 * available or no more data to send.  This function is
 * run whenever data arrives or write requests are available.
 *
 * Context: caller owns port_lock; port_usb is non-null.
 */
static int gs_start_tx(struct gs_port *port)
/*
__releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
	struct list_head	*pool = &port->write_pool;
	struct usb_ep		*in;
	int			status = 0;
	static long 		prev_len;
	bool			do_tty_wake = false;

	if (port->port_usb)
		in = port->port_usb->in;
	else
		return 0;

	while (!list_empty(pool)) {
		struct usb_request	*req;
		int			len;

		if (port->write_started >= TX_QUEUE_SIZE)
			break;

		req = list_entry(pool->next, struct usb_request, list);
		len = gs_send_packet(port, req->buf, TX_BUF_SIZE);
		if (len == 0) {
			/* Queue zero length packet */
			if (prev_len && (prev_len % in->maxpacket == 0)) {
				req->length = 0;
				list_del(&req->list);
				spin_unlock(&port->port_lock);
				status = usb_ep_queue(in, req, GFP_ATOMIC);
				spin_lock(&port->port_lock);
				if (!port->port_usb) {
					gs_free_req(in, req);
					break;
				}
				if (status) {
					printk(KERN_ERR "%s: %s err %d\n",
					__func__, "queue", status);
					list_add(&req->list, pool);
				}
				prev_len = 0;
			}
			wake_up_interruptible(&port->drain_wait);
			break;
		}
		do_tty_wake = true;

		req->length = len;
		list_del(&req->list);
		req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);

		pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
				port->port_num, len, *((u8 *)req->buf),
				*((u8 *)req->buf+1), *((u8 *)req->buf+2));

		/* Drop lock while we call out of driver; completions
		 * could be issued while we do so.  Disconnection may
		 * happen too; maybe immediately before we queue this!
		 *
		 * NOTE that we may keep sending data for a while after
		 * the TTY closed (dev->ioport->port_tty is NULL).
		 */
		spin_unlock(&port->port_lock);
		status = usb_ep_queue(in, req, GFP_ATOMIC);
		spin_lock(&port->port_lock);
		/*
		 * If port_usb is NULL, gserial disconnect is called
		 * while the spinlock is dropped and all requests are
		 * freed. Free the current request here.
		 */
		if (!port->port_usb) {
			do_tty_wake = false;
			gs_free_req(in, req);
			break;
		}
		if (status) {
			pr_debug("%s: %s %s err %d\n",
					__func__, "queue", in->name, status);
			list_add(&req->list, pool);
			break;
		}
		prev_len = req->length;
		port->nbytes_from_tty += req->length;

	}

	if (do_tty_wake && port->port_tty)
		tty_wakeup(port->port_tty);
	return status;
}
Example #24
0
static ssize_t acc_read(struct file *fp, char __user *buf,
	size_t count, loff_t *pos)
{
	struct acc_dev *dev = fp->private_data;
	struct usb_request *req;
	int r = count, xfer;
	int ret = 0;

	pr_debug("acc_read(%d)\n", count);

	if (dev->disconnected)
		return -ENODEV;

	if (count > BULK_BUFFER_SIZE)
		count = BULK_BUFFER_SIZE;

	/* we will block until we're online */
	pr_debug("acc_read: waiting for online\n");
	ret = wait_event_interruptible(dev->read_wq, dev->online);
	if (ret < 0) {
		r = ret;
		goto done;
	}

requeue_req:
	/* queue a request */
	req = dev->rx_req[0];
	req->length = count;
	dev->rx_done = 0;
	ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
	if (ret < 0) {
		r = -EIO;
		goto done;
	} else {
		pr_debug("rx %p queue\n", req);
	}

	/* wait for a request to complete */
	ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
	if (ret < 0) {
		r = ret;
		usb_ep_dequeue(dev->ep_out, req);
		goto done;
	}
	if (dev->online) {
		/* If we got a 0-len packet, throw it back and try again. */
		if (req->actual == 0)
			goto requeue_req;

		pr_debug("rx %p %d\n", req, req->actual);
		xfer = (req->actual < count) ? req->actual : count;
		r = xfer;
		if (copy_to_user(buf, req->buf, xfer))
			r = -EFAULT;
	} else
		r = -EIO;

done:
	pr_debug("acc_read returning %d\n", r);
	return r;
}
Example #25
0
/*
 * gs_start_tx
 *
 * This function finds available write requests, calls
 * gs_send_packet to fill these packets with data, and
 * continues until either there are no more write requests
 * available or no more data to send.  This function is
 * run whenever data arrives or write requests are available.
 *
 * Context: caller owns port_lock; port_usb is non-null.
 */
static int gs_start_tx(struct gs_port *port)
/*
__releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
	struct list_head	*pool = &port->write_pool;
	struct usb_ep		*in = port->port_usb->in;
	int			status = 0;
	bool			do_tty_wake = false;

	while (!list_empty(pool)) {
		struct usb_request	*req;
		int			len;

		if (port->write_started >= QUEUE_SIZE)
			break;

		req = list_entry(pool->next, struct usb_request, list);
		len = gs_send_packet(port, req->buf, in->maxpacket);
		if (len == 0) {
			wake_up_interruptible(&port->drain_wait);
			break;
		}
		do_tty_wake = true;

		req->length = len;
		list_del(&req->list);
		req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);

		pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
				port->port_num, len, *((u8 *)req->buf),
				*((u8 *)req->buf+1), *((u8 *)req->buf+2));

		/* Drop lock while we call out of driver; completions
		 * could be issued while we do so.  Disconnection may
		 * happen too; maybe immediately before we queue this!
		 *
		 * NOTE that we may keep sending data for a while after
		 * the TTY closed (dev->ioport->port_tty is NULL).
		 */
		spin_unlock(&port->port_lock);
		status = usb_ep_queue(in, req, GFP_ATOMIC);
		spin_lock(&port->port_lock);

		if (status) {
			pr_debug("%s: %s %s err %d\n",
					__func__, "queue", in->name, status);
			list_add(&req->list, pool);
			break;
		}

		port->write_started++;

		/* abort immediately after disconnect */
		if (!port->port_usb)
			break;
	}

	if (do_tty_wake && port->port_tty)
		tty_wakeup(port->port_tty);
	return status;
}
Example #26
0
static int acc_ctrlrequest(struct usb_composite_dev *cdev,
				const struct usb_ctrlrequest *ctrl)
{
	struct acc_dev	*dev = _acc_dev;
	int	value = -EOPNOTSUPP;
	u8 b_requestType = ctrl->bRequestType;
	u8 b_request = ctrl->bRequest;
	u16	w_index = le16_to_cpu(ctrl->wIndex);
	u16	w_value = le16_to_cpu(ctrl->wValue);
	u16	w_length = le16_to_cpu(ctrl->wLength);

/*
	printk(KERN_INFO "acc_ctrlrequest "
			"%02x.%02x v%04x i%04x l%u\n",
			b_requestType, b_request,
			w_value, w_index, w_length);
*/

	if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) {
		if (b_request == ACCESSORY_START) {
			dev->start_requested = 1;
			schedule_delayed_work(
				&dev->work, msecs_to_jiffies(10));
			value = 0;
		} else if (b_request == ACCESSORY_SEND_STRING) {
			dev->string_index = w_index;
			cdev->gadget->ep0->driver_data = dev;
			cdev->req->complete = acc_complete_set_string;
			value = w_length;
		}
	} else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {
		if (b_request == ACCESSORY_GET_PROTOCOL) {
			*((u16 *)cdev->req->buf) = PROTOCOL_VERSION;
			value = sizeof(u16);

			/* clear any strings left over from a previous session */
			memset(dev->manufacturer, 0, sizeof(dev->manufacturer));
			memset(dev->model, 0, sizeof(dev->model));
			memset(dev->description, 0, sizeof(dev->description));
			memset(dev->version, 0, sizeof(dev->version));
			memset(dev->uri, 0, sizeof(dev->uri));
			memset(dev->serial, 0, sizeof(dev->serial));
			dev->start_requested = 0;
		}
	}

	if (value >= 0) {
		cdev->req->zero = 0;
		cdev->req->length = value;
		value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
		if (value < 0)
			ERROR(cdev, "%s setup response queue error\n",
				__func__);
	}

	if (value == -EOPNOTSUPP)
		VDBG(cdev,
			"unknown class-specific control req "
			"%02x.%02x v%04x i%04x l%u\n",
			ctrl->bRequestType, ctrl->bRequest,
			w_value, w_index, w_length);
	return value;
}
Example #27
0
static void tx_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb = req->context;
	struct eth_dev	*dev = ep->driver_data;
	struct net_device *net = dev->net;
	struct usb_request *new_req;
	struct usb_ep *in;
	int length;
	int retval;

	switch (req->status) {
	default:
		dev->net->stats.tx_errors++;
		VDBG(dev, "tx err %d\n", req->status);
		/* FALLTHROUGH */
	case -ECONNRESET:		/* unlink */
	case -ESHUTDOWN:		/* disconnect etc */
		break;
	case 0:
		if (!req->zero)
			dev->net->stats.tx_bytes += req->length-1;
		else
			dev->net->stats.tx_bytes += req->length;
	}
	dev->net->stats.tx_packets++;

	spin_lock(&dev->req_lock);
	list_add_tail(&req->list, &dev->tx_reqs);

	if (dev->port_usb->multi_pkt_xfer) {
		dev->no_tx_req_used--;
		req->length = 0;
		in = dev->port_usb->in_ep;

		if (!list_empty(&dev->tx_reqs)) {
			new_req = container_of(dev->tx_reqs.next,
					struct usb_request, list);
			list_del(&new_req->list);
			spin_unlock(&dev->req_lock);
			if (new_req->length > 0) {
				length = new_req->length;

				/* NCM requires no zlp if transfer is
				 * dwNtbInMaxSize */
				if (dev->port_usb->is_fixed &&
					length == dev->port_usb->fixed_in_len &&
					(length % in->maxpacket) == 0)
					new_req->zero = 0;
				else
					new_req->zero = 1;

				/* use zlp framing on tx for strict CDC-Ether
				 * conformance, though any robust network rx
				 * path ignores extra padding. and some hardware
				 * doesn't like to write zlps.
				 */
				if (new_req->zero && !dev->zlp &&
						(length % in->maxpacket) == 0) {
					new_req->zero = 0;
					length++;
				}

				new_req->length = length;
				retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
				switch (retval) {
				default:
					DBG(dev, "tx queue err %d\n", retval);
					new_req->length = 0;
					spin_lock(&dev->req_lock);
					list_add_tail(&new_req->list,
							&dev->tx_reqs);
					spin_unlock(&dev->req_lock);
					break;
				case 0:
					spin_lock(&dev->req_lock);
					dev->no_tx_req_used++;
					spin_unlock(&dev->req_lock);
					net->trans_start = jiffies;
				}
			} else {
				spin_lock(&dev->req_lock);
				/*
				 * Put the idle request at the back of the
				 * queue. The xmit function will put the
				 * unfinished request at the beginning of the
				 * queue.
				 */
				list_add_tail(&new_req->list, &dev->tx_reqs);
				spin_unlock(&dev->req_lock);
			}
		} else {
Example #28
0
static int mtp_function_setup(struct usb_function *f,
					const struct usb_ctrlrequest *ctrl)
{
	int	value = -EOPNOTSUPP;
	u16     wIndex = le16_to_cpu(ctrl->wIndex);
	u16     wLength = le16_to_cpu(ctrl->wLength);
	struct usb_composite_dev *cdev = f->config->cdev;
	struct usb_request	*req = cdev->req;
	struct ctl_req_wrapper	*ctl_req;

	if (f->hidden == 1)
		return value;

	mtp_debug("bRequestType=0x%x bRequest=0x%x wIndex=0x%x wLength=0x%x\n",
		ctrl->bRequestType, ctrl->bRequest, wIndex, wLength);

	switch (ctrl->bRequestType & USB_TYPE_MASK) {
	case USB_TYPE_VENDOR:
		switch (ctrl->bRequest) {
		case MTP_MOD_VENDOR_CODE:
			if (wIndex == mtp_ext_id) {
				memcpy(req->buf, mtp_ext_desc,
						sizeof(mtp_ext_desc));
				if (wLength < mtp_ext_desc[0])
					value = wLength;
				else
					value = mtp_ext_desc[0];

				req->zero = 0;
				req->length = value;
				if (usb_ep_queue(cdev->gadget->ep0, req,
					GFP_ATOMIC))
					mtp_err("ep0 in queue failed\n");
			}
			break;
		default:
			break;
		}
		break;
	case USB_TYPE_CLASS:
		switch (ctrl->bRequest) {
		case MTP_CLASS_CANCEL_REQ:
		case MTP_CLASS_GET_EXTEND_EVEVT_DATA:
		case MTP_CLASS_GET_DEVICE_STATUS:
			mtp_debug("ctl request=0x%x\n", ctrl->bRequest);
			ctl_req = ctl_req_get(&g_usb_mtp_context.ctl_rx_reqs);
			if (!ctl_req) {
				mtp_err("get free ctl req failed\n");
				break;
			}
			memcpy(&ctl_req->creq, ctrl,
					sizeof(struct usb_ctrlrequest));
			ctl_req->header = 1;
			ctl_req_put(&g_usb_mtp_context.ctl_rx_done_reqs,
				ctl_req);
			value = 0;
			if ((ctrl->bRequest  == MTP_CLASS_CANCEL_REQ)
				&& wLength == MTP_CANCEL_REQ_DATA_SIZE) {

				memset(&ctl_req->cancel_data, 0,
					MTP_CANCEL_REQ_DATA_SIZE);
				value = wLength;
				cdev->gadget->ep0->driver_data = ctl_req;
				req->complete = mtp_ctl_read_complete;
				req->zero = 0;
				req->length = wLength;

				if (usb_ep_queue(cdev->gadget->ep0,
						req, GFP_ATOMIC)) {
					mtp_err("ep0 out queue failed\n");
					mtp_ctl_read_complete(cdev->gadget->ep0,
							req);
				}
			} else
				wake_up(&g_usb_mtp_context.ctl_rx_wq);
			break;
		default:
			break;
		}
	}

	mtp_debug("return value=%d\n", value);
	return value;
}
Example #29
0
static int
rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
{
	struct sk_buff	*skb;
	int		retval = -ENOMEM;
	size_t		size = 0;
	struct usb_ep	*out;
	unsigned long	flags;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb)
		out = dev->port_usb->out_ep;
	else
		out = NULL;
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!out)
		return -ENOTCONN;


	/* Padding up to RX_EXTRA handles minor disagreements with host.
	 * Normally we use the USB "terminate on short read" convention;
	 * so allow up to (N*maxpacket), since that memory is normally
	 * already allocated.  Some hardware doesn't deal well with short
	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
	 * byte off the end (to force hardware errors on overflow).
	 *
	 * RNDIS uses internal framing, and explicitly allows senders to
	 * pad to end-of-packet.  That's potentially nice for speed, but
	 * means receivers can't recover lost synch on their own (because
	 * new packets don't only start after a short RX).
	 */
	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
	size += dev->port_usb->header_len;
	size += out->maxpacket - 1;
	size -= size % out->maxpacket;

	if (dev->port_usb->is_fixed)
		size = max_t(size_t, size, dev->port_usb->fixed_out_len);

	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
	if (skb == NULL) {
		DBG(dev, "no rx skb\n");
		goto enomem;
	}

	/* Some platforms perform better when IP packets are aligned,
	 * but on at least one, checksumming fails otherwise.  Note:
	 * RNDIS headers involve variable numbers of LE32 values.
	 */
	skb_reserve(skb, NET_IP_ALIGN);

	req->buf = skb->data;
	req->length = size;
	req->complete = rx_complete;
	req->context = skb;

	retval = usb_ep_queue(out, req, gfp_flags);
	if (retval == -ENOMEM)
enomem:
		defer_kevent(dev, WORK_RX_MEMORY);
	if (retval) {
		DBG(dev, "rx submit --> %d\n", retval);
		if (skb)
			dev_kfree_skb_any(skb);
		spin_lock_irqsave(&dev->req_lock, flags);
		list_add(&req->list, &dev->rx_reqs);
		spin_unlock_irqrestore(&dev->req_lock, flags);
	}
	return retval;
}
Example #30
0
static int
rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
{
	struct sk_buff	*skb;
	int		retval = -ENOMEM;
	size_t		size = 0;
	struct usb_ep	*out;
	unsigned long	flags;
	unsigned short reserve_headroom;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb)
		out = dev->port_usb->out_ep;
	else
		out = NULL;
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!out)
		return -ENOTCONN;


	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
	size += dev->port_usb->header_len;
	size += out->maxpacket - 1;
	size -= size % out->maxpacket;

	if (dev->ul_max_pkts_per_xfer)
		size *= dev->ul_max_pkts_per_xfer;

	if (dev->port_usb->is_fixed)
		size = max_t(size_t, size, dev->port_usb->fixed_out_len);

	if (dev->rx_needed_headroom)
		reserve_headroom = dev->rx_needed_headroom;
	else
		reserve_headroom = NET_IP_ALIGN;

	pr_debug("%s: size: %zu + %d(hr)", __func__, size, reserve_headroom);

	skb = alloc_skb(size + reserve_headroom, gfp_flags);
	if (skb == NULL) {
		DBG(dev, "no rx skb\n");
		goto enomem;
	}

	skb_reserve(skb, reserve_headroom);

	req->buf = skb->data;
	req->length = size;
	req->context = skb;

	retval = usb_ep_queue(out, req, gfp_flags);
	if (retval == -ENOMEM)
enomem:
		defer_kevent(dev, WORK_RX_MEMORY);
	if (retval) {
		DBG(dev, "rx submit --> %d\n", retval);
		if (skb)
			dev_kfree_skb_any(skb);
	}
	return retval;
}