示例#1
0
/* returns a tegra_iovmm_area for a handle. if the handle already has
 * an iovmm_area allocated, the handle is simply removed from its MRU list
 * and the existing iovmm_area is returned.
 *
 * if no existing allocation exists, try to allocate a new IOVMM area.
 *
 * if a new area can not be allocated, try to re-use the most-recently-unpinned
 * handle's allocation.
 *
 * and if that fails, iteratively evict handles from the MRU lists and free
 * their allocations, until the new allocation succeeds.
 */
struct tegra_iovmm_area *nvmap_handle_iovmm_locked(struct nvmap_client *c,
					    struct nvmap_handle *h)
{
	struct list_head *mru;
	struct nvmap_handle *evict = NULL;
	struct tegra_iovmm_area *vm = NULL;
	unsigned int i, idx;
	pgprot_t prot;

	BUG_ON(!h || !c || !c->share);

	prot = nvmap_pgprot(h, pgprot_kernel);

	if (h->pgalloc.area) {
		BUG_ON(list_empty(&h->pgalloc.mru_list));
		list_del(&h->pgalloc.mru_list);
		INIT_LIST_HEAD(&h->pgalloc.mru_list);
		return h->pgalloc.area;
	}

	vm = tegra_iovmm_create_vm(c->share->iovmm, NULL,
			h->size, h->align, prot,
			h->pgalloc.iovm_addr);

	if (vm) {
		INIT_LIST_HEAD(&h->pgalloc.mru_list);
		return vm;
	}
	/* if client is looking for specific iovm address, return from here. */
	if ((vm == NULL) && (h->pgalloc.iovm_addr != 0))
		return NULL;
	/* attempt to re-use the most recently unpinned IOVMM area in the
	 * same size bin as the current handle. If that fails, iteratively
	 * evict handles (starting from the current bin) until an allocation
	 * succeeds or no more areas can be evicted */
	mru = mru_list(c->share, h->size);
	if (!list_empty(mru))
		evict = list_first_entry(mru, struct nvmap_handle,
					 pgalloc.mru_list);

	if (evict && evict->pgalloc.area->iovm_length >= h->size) {
		list_del(&evict->pgalloc.mru_list);
		vm = evict->pgalloc.area;
		evict->pgalloc.area = NULL;
		INIT_LIST_HEAD(&evict->pgalloc.mru_list);
		return vm;
	}

	idx = mru - c->share->mru_lists;

	for (i = 0; i < c->share->nr_mru && !vm; i++, idx++) {
		if (idx >= c->share->nr_mru)
			idx = 0;
		mru = &c->share->mru_lists[idx];
		while (!list_empty(mru) && !vm) {
			evict = list_first_entry(mru, struct nvmap_handle,
						 pgalloc.mru_list);

			BUG_ON(atomic_read(&evict->pin) != 0);
			BUG_ON(!evict->pgalloc.area);
			list_del(&evict->pgalloc.mru_list);
			INIT_LIST_HEAD(&evict->pgalloc.mru_list);
			tegra_iovmm_free_vm(evict->pgalloc.area);
			evict->pgalloc.area = NULL;
			vm = tegra_iovmm_create_vm(c->share->iovmm,
					NULL, h->size, h->align,
					prot, h->pgalloc.iovm_addr);
		}
	}
	return vm;
}
示例#2
0
文件: mcl_list.c 项目: lishengwen/mcl
int mcl_list_delete(void *data, mcl_list *list_ptr, mcl_free_fn_t free_fn)
{
	return list_del(data, list_ptr, 1, NULL, free_fn);
}
/**
 * Find the first available slot for a new block of shared memory
 * and map the user buffer.
 * Update the descriptors to L1 descriptors
 * Update the buffer_start_offset and buffer_size fields
 * shmem_desc is updated to the mapped shared memory descriptor
 **/
int tf_map_shmem(
		struct tf_connection *connection,
		u32 buffer,
		/* flags for read-write access rights on the memory */
		u32 flags,
		bool in_user_space,
		u32 descriptors[TF_MAX_COARSE_PAGES],
		u32 *buffer_start_offset,
		u32 buffer_size,
		struct tf_shmem_desc **shmem_desc,
		u32 *descriptor_count)
{
	struct tf_shmem_desc *desc = NULL;
	int error;

	dprintk(KERN_INFO "tf_map_shmem(%p, %p, flags = 0x%08x)\n",
					connection,
					(void *) buffer,
					flags);

	/*
	 * Added temporary to avoid kernel stack buffer
	 */
	if (!in_user_space) {
		if (object_is_on_stack((void *)buffer) != 0) {
			dprintk(KERN_ERR
				"tf_map_shmem: "
				"kernel stack buffers "
				"(addr=0x%08X) "
				"are not supported",
				buffer);
			error = -ENOSYS;
			goto error;
		}
	}

	mutex_lock(&(connection->shmem_mutex));

	/*
	 * Check the list of free shared memory
	 * is not empty
	 */
	if (list_empty(&(connection->free_shmem_list))) {
		if (atomic_read(&(connection->shmem_count)) ==
				TF_SHMEM_MAX_COUNT) {
			printk(KERN_ERR "tf_map_shmem(%p):"
				" maximum shared memories already registered\n",
				connection);
			error = -ENOMEM;
			goto error;
		}

		/* no descriptor available, allocate a new one */

		desc = (struct tf_shmem_desc *) internal_kmalloc(
			sizeof(*desc), GFP_KERNEL);
		if (desc == NULL) {
			printk(KERN_ERR "tf_map_shmem(%p):"
				" failed to allocate descriptor\n",
				connection);
			error = -ENOMEM;
			goto error;
		}

		/* Initialize the structure */
		desc->type = TF_SHMEM_TYPE_REGISTERED_SHMEM;
		atomic_set(&desc->ref_count, 1);
		INIT_LIST_HEAD(&(desc->list));

		atomic_inc(&(connection->shmem_count));
	} else {
		/* take the first free shared memory descriptor */
		desc = list_first_entry(&(connection->free_shmem_list),
			struct tf_shmem_desc, list);
		list_del(&(desc->list));
	}

	/* Add the descriptor to the used list */
	list_add(&(desc->list), &(connection->used_shmem_list));

	error = tf_fill_descriptor_table(
			&(connection->cpt_alloc_context),
			desc,
			buffer,
			connection->vmas,
			descriptors,
			buffer_size,
			buffer_start_offset,
			in_user_space,
			flags,
			descriptor_count);

	if (error != 0) {
		dprintk(KERN_ERR "tf_map_shmem(%p):"
			" tf_fill_descriptor_table failed with error "
			"code %d!\n",
			connection,
			error);
		goto error;
	}
	desc->client_buffer = (u8 *) buffer;

	/*
	 * Successful completion.
	 */
	*shmem_desc = desc;
	mutex_unlock(&(connection->shmem_mutex));
	dprintk(KERN_DEBUG "tf_map_shmem: success\n");
	return 0;


	/*
	 * Error handling.
	 */
error:
	mutex_unlock(&(connection->shmem_mutex));
	dprintk(KERN_ERR "tf_map_shmem: failure with error code %d\n",
		error);

	tf_unmap_shmem(
			connection,
			desc,
			0);

	return error;
}
示例#4
0
static void *hub_probe(struct usb_device *dev, unsigned int i,
		       const struct usb_device_id *id)
{
	struct usb_interface_descriptor *interface;
	struct usb_endpoint_descriptor *endpoint;
	struct usb_hub *hub;
	unsigned long flags;

	interface = &dev->actconfig->interface[i].altsetting[0];

	/* Some hubs have a subclass of 1, which AFAICT according to the */
	/*  specs is not defined, but it works */
	if ((interface->bInterfaceSubClass != 0) &&
	    (interface->bInterfaceSubClass != 1)) {
		err("invalid subclass (%d) for USB hub device #%d",
			interface->bInterfaceSubClass, dev->devnum);
		return NULL;
	}

	/* Multiple endpoints? What kind of mutant ninja-hub is this? */
	if (interface->bNumEndpoints != 1) {
		err("invalid bNumEndpoints (%d) for USB hub device #%d",
			interface->bNumEndpoints, dev->devnum);
		return NULL;
	}

	endpoint = &interface->endpoint[0];

	/* Output endpoint? Curiousier and curiousier.. */
	if (!(endpoint->bEndpointAddress & USB_DIR_IN)) {
		err("Device #%d is hub class, but has output endpoint?",
			dev->devnum);
		return NULL;
	}

	/* If it's not an interrupt endpoint, we'd better punt! */
	if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_INT) {
		err("Device #%d is hub class, but has endpoint other than interrupt?",
			dev->devnum);
		return NULL;
	}

	/* We found a hub */
	info("USB hub found");

	hub = kmalloc(sizeof(*hub), GFP_KERNEL);
	if (!hub) {
		err("couldn't kmalloc hub struct");
		return NULL;
	}

	memset(hub, 0, sizeof(*hub));

	INIT_LIST_HEAD(&hub->event_list);
	hub->dev = dev;
	init_MUTEX(&hub->khubd_sem);

	/* Record the new hub's existence */
	spin_lock_irqsave(&hub_event_lock, flags);
	INIT_LIST_HEAD(&hub->hub_list);
	list_add(&hub->hub_list, &hub_list);
	spin_unlock_irqrestore(&hub_event_lock, flags);

	if (usb_hub_configure(hub, endpoint) >= 0)
		return hub;

	err("hub configuration failed for device #%d", dev->devnum);

	/* free hub, but first clean up its list. */
	spin_lock_irqsave(&hub_event_lock, flags);

	/* Delete it and then reset it */
	list_del(&hub->event_list);
	INIT_LIST_HEAD(&hub->event_list);
	list_del(&hub->hub_list);
	INIT_LIST_HEAD(&hub->hub_list);

	spin_unlock_irqrestore(&hub_event_lock, flags);

	kfree(hub);

	return NULL;
}
static ssize_t rmnet_ctl_read(struct file *file, char __user *buf, size_t count,
		loff_t *ppos)
{
	int				retval = 0;
	int				bytes_to_read;
	struct rmnet_ctrl_dev		*dev;
	struct ctrl_pkt_list_elem	*list_elem = NULL;
	unsigned long			flags;

	dev = file->private_data;
	if (!dev)
		return -ENODEV;

	DBG("%s: Read from %s\n", __func__, dev->name);

ctrl_read:
	if (!is_dev_connected(dev)) {
		dev_dbg(dev->devicep, "%s: Device not connected\n",
			__func__);
		return -ENETRESET;
	}
	spin_lock_irqsave(&dev->rx_lock, flags);
	if (list_empty(&dev->rx_list)) {
		spin_unlock_irqrestore(&dev->rx_lock, flags);

		retval = wait_event_interruptible(dev->read_wait_queue,
					!list_empty(&dev->rx_list) ||
					!is_dev_connected(dev));
		if (retval < 0)
			return retval;

		/* check device connected, because wait event returns 0
		 * at disconnect, also
		 */
		if (!is_dev_connected(dev)) {
			dev_dbg(dev->devicep, "%s: Device not connected\n",
				__func__);
			return -ENETRESET;
		}

		goto ctrl_read;
	}

	list_elem = list_first_entry(&dev->rx_list,
				     struct ctrl_pkt_list_elem, list);
	bytes_to_read = (uint32_t)(list_elem->cpkt.data_size);
	if (bytes_to_read > count) {
		spin_unlock_irqrestore(&dev->rx_lock, flags);
		dev_err(dev->devicep, "%s: Packet size %d > buf size %d\n",
			__func__, bytes_to_read, count);
		return -ENOMEM;
	}
	spin_unlock_irqrestore(&dev->rx_lock, flags);

	if (copy_to_user(buf, list_elem->cpkt.data, bytes_to_read)) {
			dev_err(dev->devicep,
				"%s: copy_to_user failed for %s\n",
				__func__, dev->name);
		return -EFAULT;
	}
	spin_lock_irqsave(&dev->rx_lock, flags);
	list_del(&list_elem->list);
	spin_unlock_irqrestore(&dev->rx_lock, flags);

	kfree(list_elem->cpkt.data);
	kfree(list_elem);
	DBG("%s: Returning %d bytes to %s\n", __func__, bytes_to_read,
			dev->name);
	DUMP_BUFFER("Read: ", bytes_to_read, buf);

	return bytes_to_read;
}
示例#6
0
文件: slist.c 项目: ferreiro/C
void remove_slist ( slist_t* slist,void* elem)
{
    struct list_head* link=((struct list_head*)(((char*)elem) +  slist->node_offset));
    list_del(link);
    slist->size--;
}
示例#7
0
文件: swap.c 项目: spinlock/ucore
// swap_page_del - clear PG_swap flag in page, and del page from hash_list.
static void
swap_page_del(struct Page *page) {
    assert(PageSwap(page));
    ClearPageSwap(page);
    list_del(&(page->page_link));
}
示例#8
0
void del_video (onlineTV_t tv, int pos)
{
    list_del(tv->playlist, pos);
}
示例#9
0
void next_video (onlineTV_t tv)
{
    list_del(tv->playlist, list_size(tv->playlist) - 1);
    if(((video_t *)list_get(tv->playlist, list_size(tv->playlist) - 1))->type == 1) // 1 це тип відео, 0 - реклама
        event(tv);
}
示例#10
0
/*
 * Context: caller owns port_lock, and port_usb is set
 */
static unsigned gs_start_rx(struct gs_port *port)
/*
__releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
	struct list_head	*pool;
	struct usb_ep		*out;
	unsigned		started = 0;

	if (!port || !port->port_usb) {
		pr_err("Error - port or port->usb is NULL.");
		return -EIO;
	}

	pool = &port->read_pool;
	out  = port->port_usb->out;

	while (!list_empty(pool)) {
		struct usb_request	*req;
		int			status;
		struct tty_struct	*tty;

		/* no more rx if closed */
		tty = port->port.tty;
		if (!tty)
			break;

		if (port->read_started >= RX_QUEUE_SIZE)
			break;

		req = list_entry(pool->next, struct usb_request, list);
		list_del(&req->list);
		req->length = RX_BUF_SIZE;

		/* drop lock while we call out; the controller driver
		 * may need to call us back (e.g. for disconnect)
		 */
		spin_unlock(&port->port_lock);
		status = usb_ep_queue(out, req, GFP_ATOMIC);
		spin_lock(&port->port_lock);
		/*
		 * If port_usb is NULL, gserial disconnect is called
		 * while the spinlock is dropped and all requests are
		 * freed. Free the current request here.
		 */
		if (!port->port_usb) {
			started = 0;
			gs_free_req(out, req);
			break;
		}
		if (status) {
			pr_debug("%s: %s %s err %d\n",
					__func__, "queue", out->name, status);
			list_add(&req->list, pool);
			break;
		}
		port->read_started++;

	}
	return port->read_started;
}
示例#11
0
文件: xpmem_fwd.c 项目: azet/kitten
/* Process an XPMEM_DOMID_REQUEST/RESPONSE/RELEASE command */
static int
xpmem_fwd_process_domid_cmd(struct xpmem_partition_state * part_state,
			    xpmem_link_t		   link,
			    struct xpmem_cmd_ex		 * cmd)
{
    struct xpmem_fwd_state * fwd_state = part_state->fwd_state;

    /* There's no reason not to reuse the input command struct for responses */
    struct xpmem_cmd_ex    * out_cmd  = cmd;
    xpmem_link_t	     out_link = link;

    switch (cmd->type) {
	case XPMEM_DOMID_REQUEST: {
	    /* A domid is requested by someone downstream from us on link
	     * 'link'. If we can't reach the nameserver, just return failure,
	     * because the request should not come through us unless we have a
	     * route already
	     */
	    if (!xpmem_have_ns_link(fwd_state)) {
		return -1;
	    }

	    /* Buffer the request */
	    {
		struct xpmem_domid_req_iter * iter = NULL;
		unsigned long		      flags = 0;

		iter = kmem_alloc(sizeof(struct xpmem_domid_req_iter));
		if (!iter) {
		    return -ENOMEM;
		}

		iter->link = link;

		spin_lock_irqsave(&(fwd_state->lock), flags);
		{
		    list_add_tail(&(iter->node), &(fwd_state->domid_req_list));
		}
		spin_unlock_irqrestore(&(fwd_state->lock), flags);

		/* Forward request up to the nameserver */
		out_link = fwd_state->ns_link;
	    }

	    break;
	}

	case XPMEM_DOMID_RESPONSE: {
	    int ret = 0;
	    /* We've been allocated a domid.
	     *
	     * If our domain has no domid, take it for ourselves it.
	     * Otherwise, assign it to a link that has requested a domid from us
	     */
	     
	    if (part_state->domid <= 0) {
		part_state->domid = cmd->domid_req.domid;

		/* Update the domid map to remember our own domid */
		ret = xpmem_add_domid(part_state, part_state->domid, part_state->local_link);

		if (ret == 0) {
		    XPMEM_ERR("Cannot insert domid %lli into hashtable", part_state->domid);
		    return -EFAULT;
		}

		return 0;
	    } else {
		struct xpmem_domid_req_iter * iter = NULL;
		unsigned long		      flags = 0;

		if (list_empty(&(fwd_state->domid_req_list))) {
		    XPMEM_ERR("We currently do not support the buffering of XPMEM domids");
		    return -1;
		}

		spin_lock_irqsave(&(fwd_state->lock), flags);
		{
		    iter = list_first_entry(&(fwd_state->domid_req_list),
				struct xpmem_domid_req_iter,
				node);
		    list_del(&(iter->node));
		}
		spin_unlock_irqrestore(&(fwd_state->lock), flags);

		/* Forward the domid to this link */
		out_link = iter->link;
		kmem_free(iter);

		/* Update the domid map to remember who has this */
		ret = xpmem_add_domid(part_state, cmd->domid_req.domid, out_link);

		if (ret == 0) {
		    XPMEM_ERR("Cannot insert domid %lli into hashtable", cmd->domid_req.domid);
		    return -EFAULT;
		}
	    }

	    break;
	}

	case XPMEM_DOMID_RELEASE:
	    /* Someone downstream is releasing their domid: simply forward to the
	     * namserver */
	    out_link = xpmem_search_domid(part_state, out_cmd->dst_dom);

	    if (out_link == 0) {
		XPMEM_ERR("Cannot find domid %lli in hashtable", out_cmd->dst_dom);
		return -EFAULT;
	    }

	    break;

	default: {
	    XPMEM_ERR("Unknown DOMID operation: %s", cmd_to_string(cmd->type));
	    return -EINVAL;
	}
    }

    /* Send the response */
    if (xpmem_send_cmd_link(part_state, out_link, out_cmd)) {
	XPMEM_ERR("Cannot send command on link %lli", out_link);
	return -EFAULT;
    }

    return 0;
}
示例#12
0
/*
 * gs_start_tx
 *
 * This function finds available write requests, calls
 * gs_send_packet to fill these packets with data, and
 * continues until either there are no more write requests
 * available or no more data to send.  This function is
 * run whenever data arrives or write requests are available.
 *
 * Context: caller owns port_lock; port_usb is non-null.
 */
static int gs_start_tx(struct gs_port *port)
/*
__releases(&port->port_lock)
__acquires(&port->port_lock)
*/
{
	struct list_head	*pool;
	struct usb_ep		*in;
	int			status = 0;
	static long 		prev_len;
	bool			do_tty_wake = false;

	if (!port || !port->port_usb) {
		pr_err("Error - port or port->usb is NULL.");
		return -EIO;
	}

	pool = &port->write_pool;
	in   = port->port_usb->in;

	while (!list_empty(pool)) {
		struct usb_request	*req;
		int			len;

		if (port->write_started >= TX_QUEUE_SIZE)
			break;

		req = list_entry(pool->next, struct usb_request, list);
		len = gs_send_packet(port, req->buf, TX_BUF_SIZE);
		if (len == 0) {
			/* Queue zero length packet explicitly to make it
			 * work with UDCs which don't support req->zero flag
			 */
			if (prev_len && (prev_len % in->maxpacket == 0)) {
				req->length = 0;
				list_del(&req->list);
				spin_unlock(&port->port_lock);
				status = usb_ep_queue(in, req, GFP_ATOMIC);
				spin_lock(&port->port_lock);
				if (!port->port_usb) {
					gs_free_req(in, req);
					break;
				}
				if (status) {
					printk(KERN_ERR "%s: %s err %d\n",
					__func__, "queue", status);
					list_add(&req->list, pool);
				}
				prev_len = 0;
			}
			wake_up_interruptible(&port->drain_wait);
			break;
		}
		do_tty_wake = true;

		req->length = len;
		list_del(&req->list);

		pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
				port->port_num, len, *((u8 *)req->buf),
				*((u8 *)req->buf+1), *((u8 *)req->buf+2));

		/* Drop lock while we call out of driver; completions
		 * could be issued while we do so.  Disconnection may
		 * happen too; maybe immediately before we queue this!
		 *
		 * NOTE that we may keep sending data for a while after
		 * the TTY closed (dev->ioport->port_tty is NULL).
		 */
		spin_unlock(&port->port_lock);
		status = usb_ep_queue(in, req, GFP_ATOMIC);
		spin_lock(&port->port_lock);
		/*
		 * If port_usb is NULL, gserial disconnect is called
		 * while the spinlock is dropped and all requests are
		 * freed. Free the current request here.
		 */
		if (!port->port_usb) {
			do_tty_wake = false;
			gs_free_req(in, req);
			break;
		}
		if (status) {
			pr_debug("%s: %s %s err %d\n",
					__func__, "queue", in->name, status);
			list_add(&req->list, pool);
			break;
		}
		prev_len = req->length;
		port->nbytes_from_tty += req->length;

		port->write_started++;

	}

	if (do_tty_wake && port->port.tty)
		tty_wakeup(port->port.tty);
	return status;
}
示例#13
0
文件: ispvideo.c 项目: 168519/linux
/*
 * omap3isp_video_buffer_next - Complete the current buffer and return the next
 * @video: ISP video object
 *
 * Remove the current video buffer from the DMA queue and fill its timestamp and
 * field count before handing it back to videobuf2.
 *
 * For capture video nodes the buffer state is set to VB2_BUF_STATE_DONE if no
 * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise.
 * For video output nodes the buffer state is always set to VB2_BUF_STATE_DONE.
 *
 * The DMA queue is expected to contain at least one buffer.
 *
 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
 * empty.
 */
struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
{
	struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
	enum isp_pipeline_state state;
	struct isp_buffer *buf;
	unsigned long flags;
	struct timespec ts;

	spin_lock_irqsave(&video->irqlock, flags);
	if (WARN_ON(list_empty(&video->dmaqueue))) {
		spin_unlock_irqrestore(&video->irqlock, flags);
		return NULL;
	}

	buf = list_first_entry(&video->dmaqueue, struct isp_buffer,
			       irqlist);
	list_del(&buf->irqlist);
	spin_unlock_irqrestore(&video->irqlock, flags);

	ktime_get_ts(&ts);
	buf->vb.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
	buf->vb.v4l2_buf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;

	/* Do frame number propagation only if this is the output video node.
	 * Frame number either comes from the CSI receivers or it gets
	 * incremented here if H3A is not active.
	 * Note: There is no guarantee that the output buffer will finish
	 * first, so the input number might lag behind by 1 in some cases.
	 */
	if (video == pipe->output && !pipe->do_propagation)
		buf->vb.v4l2_buf.sequence =
			atomic_inc_return(&pipe->frame_number);
	else
		buf->vb.v4l2_buf.sequence = atomic_read(&pipe->frame_number);

	if (pipe->field != V4L2_FIELD_NONE)
		buf->vb.v4l2_buf.sequence /= 2;

	buf->vb.v4l2_buf.field = pipe->field;

	/* Report pipeline errors to userspace on the capture device side. */
	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
		state = VB2_BUF_STATE_ERROR;
		pipe->error = false;
	} else {
		state = VB2_BUF_STATE_DONE;
	}

	vb2_buffer_done(&buf->vb, state);

	spin_lock_irqsave(&video->irqlock, flags);

	if (list_empty(&video->dmaqueue)) {
		spin_unlock_irqrestore(&video->irqlock, flags);

		if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
			state = ISP_PIPELINE_QUEUE_OUTPUT
			      | ISP_PIPELINE_STREAM;
		else
			state = ISP_PIPELINE_QUEUE_INPUT
			      | ISP_PIPELINE_STREAM;

		spin_lock_irqsave(&pipe->lock, flags);
		pipe->state &= ~state;
		if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
			video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
		spin_unlock_irqrestore(&pipe->lock, flags);
		return NULL;
	}

	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
		spin_lock(&pipe->lock);
		pipe->state &= ~ISP_PIPELINE_STREAM;
		spin_unlock(&pipe->lock);
	}

	buf = list_first_entry(&video->dmaqueue, struct isp_buffer,
			       irqlist);
	buf->vb.state = VB2_BUF_STATE_ACTIVE;

	spin_unlock_irqrestore(&video->irqlock, flags);

	return buf;
}
示例#14
0
文件: server.c 项目: noah/cmus
static void read_commands(struct client *client)
{
    char buf[1024];
    int pos = 0;
    if (!client->authenticated)
        client->authenticated = addr.sa.sa_family == AF_UNIX;

    while (1) {
        int rc, s, i;

        rc = read(client->fd, buf + pos, sizeof(buf) - pos);
        if (rc == -1) {
            if (errno == EINTR)
                continue;
            if (errno == EAGAIN)
                return;
            goto close;
        }
        if (rc == 0)
            goto close;
        pos += rc;

        s = 0;
        for (i = 0; i < pos; i++) {
            const char *line, *msg;
            char *cmd, *arg;
            int ret;

            if (buf[i] != '\n')
                continue;

            buf[i] = 0;
            line = buf + s;
            s = i + 1;

            if (!client->authenticated) {
                if (!server_password) {
                    msg = "password is unset, tcp/ip disabled";
                    d_print("%s\n", msg);
                    ret = send_answer(client->fd, "%s\n\n", msg);
                    goto close;
                }
                if (strncmp(line, "passwd ", 7) == 0)
                    line += 7;
                client->authenticated = !strcmp(line, server_password);
                if (!client->authenticated) {
                    msg = "authentication failed";
                    d_print("%s\n", msg);
                    ret = send_answer(client->fd, "%s\n\n", msg);
                    goto close;
                }
                ret = write_all(client->fd, "\n", 1);
                continue;
            }

            while (isspace((unsigned char)*line))
                line++;

            if (*line == '/') {
                int restricted = 0;
                line++;
                search_direction = SEARCH_FORWARD;
                if (*line == '/') {
                    line++;
                    restricted = 1;
                }
                search_text(line, restricted, 1);
                ret = write_all(client->fd, "\n", 1);
            } else if (*line == '?') {
                int restricted = 0;
                line++;
                search_direction = SEARCH_BACKWARD;
                if (*line == '?') {
                    line++;
                    restricted = 1;
                }
                search_text(line, restricted, 1);
                ret = write_all(client->fd, "\n", 1);
            } else if (parse_command(line, &cmd, &arg)) {
                if (!strcmp(cmd, "status")) {
                    ret = cmd_status(client);
                } else {
                    if (strcmp(cmd, "passwd") != 0) {
                        set_client_fd(client->fd);
                        run_parsed_command(cmd, arg);
                        set_client_fd(-1);
                    }
                    ret = write_all(client->fd, "\n", 1);
                }
                free(cmd);
                free(arg);
            } else {
                // don't hang cmus-remote
                ret = write_all(client->fd, "\n", 1);
            }
            if (ret < 0) {
                d_print("write: %s\n", strerror(errno));
                goto close;
            }
        }
        memmove(buf, buf + s, pos - s);
        pos -= s;
    }
    return;
close:
    close(client->fd);
    list_del(&client->node);
    free(client);
}
示例#15
0
static void acm_rx_tasklet(unsigned long _acm)
{
	struct acm *acm = (void *)_acm;
	struct acm_rb *buf;
	struct tty_struct *tty;
	struct acm_ru *rcv;
	unsigned long flags;
	unsigned char throttled;

	dbg("Entering acm_rx_tasklet");

	if (!ACM_READY(acm)) {
		dbg("acm_rx_tasklet: ACM not ready");
		return;
	}

	spin_lock_irqsave(&acm->throttle_lock, flags);
	throttled = acm->throttle;
	spin_unlock_irqrestore(&acm->throttle_lock, flags);
	if (throttled) {
		dbg("acm_rx_tasklet: throttled");
		return;
	}

	tty = tty_port_tty_get(&acm->port);

next_buffer:
	spin_lock_irqsave(&acm->read_lock, flags);
	if (list_empty(&acm->filled_read_bufs)) {
		spin_unlock_irqrestore(&acm->read_lock, flags);
		goto urbs;
	}
	buf = list_entry(acm->filled_read_bufs.next,
			 struct acm_rb, list);
	list_del(&buf->list);
	spin_unlock_irqrestore(&acm->read_lock, flags);

	dbg("acm_rx_tasklet: procesing buf 0x%p, size = %d", buf, buf->size);

	if (tty) {
		spin_lock_irqsave(&acm->throttle_lock, flags);
		throttled = acm->throttle;
		spin_unlock_irqrestore(&acm->throttle_lock, flags);
		if (!throttled) {
			tty_buffer_request_room(tty, buf->size);
			tty_insert_flip_string(tty, buf->base, buf->size);
			tty_flip_buffer_push(tty);
		} else {
			tty_kref_put(tty);
			dbg("Throttling noticed");
			spin_lock_irqsave(&acm->read_lock, flags);
			list_add(&buf->list, &acm->filled_read_bufs);
			spin_unlock_irqrestore(&acm->read_lock, flags);
			return;
		}
	}

	spin_lock_irqsave(&acm->read_lock, flags);
	list_add(&buf->list, &acm->spare_read_bufs);
	spin_unlock_irqrestore(&acm->read_lock, flags);
	goto next_buffer;

urbs:
	tty_kref_put(tty);

	while (!list_empty(&acm->spare_read_bufs)) {
		spin_lock_irqsave(&acm->read_lock, flags);
		if (list_empty(&acm->spare_read_urbs)) {
			acm->processing = 0;
			spin_unlock_irqrestore(&acm->read_lock, flags);
			return;
		}
		rcv = list_entry(acm->spare_read_urbs.next,
				 struct acm_ru, list);
		list_del(&rcv->list);
		spin_unlock_irqrestore(&acm->read_lock, flags);

		buf = list_entry(acm->spare_read_bufs.next,
				 struct acm_rb, list);
		list_del(&buf->list);

		rcv->buffer = buf;

		if (acm->is_int_ep)
			usb_fill_int_urb(rcv->urb, acm->dev,
					 acm->rx_endpoint,
					 buf->base,
					 acm->readsize,
					 acm_read_bulk, rcv, acm->bInterval);
		else
			usb_fill_bulk_urb(rcv->urb, acm->dev,
					  acm->rx_endpoint,
					  buf->base,
					  acm->readsize,
					  acm_read_bulk, rcv);
		rcv->urb->transfer_dma = buf->dma;
		rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;

		/* This shouldn't kill the driver as unsuccessful URBs are
		   returned to the free-urbs-pool and resubmited ASAP */
		spin_lock_irqsave(&acm->read_lock, flags);
		if (acm->susp_count ||
				usb_submit_urb(rcv->urb, GFP_ATOMIC) < 0) {
			list_add(&buf->list, &acm->spare_read_bufs);
			list_add(&rcv->list, &acm->spare_read_urbs);
			acm->processing = 0;
			spin_unlock_irqrestore(&acm->read_lock, flags);
			return;
		} else {
			spin_unlock_irqrestore(&acm->read_lock, flags);
			dbg("acm_rx_tasklet: sending urb 0x%p, rcv 0x%p, buf 0x%p", rcv->urb, rcv, buf);
		}
	}
	spin_lock_irqsave(&acm->read_lock, flags);
	acm->processing = 0;
	spin_unlock_irqrestore(&acm->read_lock, flags);
}
ssize_t videobuf_read_stream(struct videobuf_queue *q,
			     char __user *data, size_t count, loff_t *ppos,
			     int vbihack, int nonblocking)
{
	int rc, retval;
	unsigned long flags = 0;

	MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);

	dprintk(2, "%s\n", __func__);
	videobuf_queue_lock(q);
	retval = -EBUSY;
	if (q->streaming)
		goto done;
	if (!q->reading) {
		retval = __videobuf_read_start(q);
		if (retval < 0)
			goto done;
	}

	retval = 0;
	while (count > 0) {
		/* get / wait for data */
		if (NULL == q->read_buf) {
			q->read_buf = list_entry(q->stream.next,
						 struct videobuf_buffer,
						 stream);
			list_del(&q->read_buf->stream);
			q->read_off = 0;
		}
		rc = videobuf_waiton(q, q->read_buf, nonblocking, 1);
		if (rc < 0) {
			if (0 == retval)
				retval = rc;
			break;
		}

		if (q->read_buf->state == VIDEOBUF_DONE) {
			rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
					retval, vbihack, nonblocking);
			if (rc < 0) {
				retval = rc;
				break;
			}
			retval      += rc;
			count       -= rc;
			q->read_off += rc;
		} else {
			/* some error */
			q->read_off = q->read_buf->size;
			if (0 == retval)
				retval = -EIO;
		}

		/* requeue buffer when done with copying */
		if (q->read_off == q->read_buf->size) {
			list_add_tail(&q->read_buf->stream,
				      &q->stream);
			spin_lock_irqsave(q->irqlock, flags);
			q->ops->buf_queue(q, q->read_buf);
			spin_unlock_irqrestore(q->irqlock, flags);
			q->read_buf = NULL;
		}
		if (retval < 0)
			break;
	}
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
			struct list_head *pages, struct page *page,
			unsigned nr_pages)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct inode *inode = mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
	sector_t last_block_in_file;
	sector_t block_nr;
	struct block_device *bdev = inode->i_sb->s_bdev;
	struct f2fs_map_blocks map;

	map.m_pblk = 0;
	map.m_lblk = 0;
	map.m_len = 0;
	map.m_flags = 0;

	for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

		prefetchw(&page->flags);
		if (pages) {
			page = list_entry(pages->prev, struct page, lru);
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
						  page->index, GFP_KERNEL))
				goto next_page;
		}

		block_in_file = (sector_t)page->index;
		last_block = block_in_file + nr_pages;
		last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
								blkbits;
		if (last_block > last_block_in_file)
			last_block = last_block_in_file;

		/*
		 * Map blocks using the previous result first.
		 */
		if ((map.m_flags & F2FS_MAP_MAPPED) &&
				block_in_file > map.m_lblk &&
				block_in_file < (map.m_lblk + map.m_len))
			goto got_it;

		/*
		 * Then do more f2fs_map_blocks() calls until we are
		 * done with this page.
		 */
		map.m_flags = 0;

		if (block_in_file < last_block) {
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

			if (f2fs_map_blocks(inode, &map, 0,
							F2FS_GET_BLOCK_READ))
				goto set_error_page;
		}
got_it:
		if ((map.m_flags & F2FS_MAP_MAPPED)) {
			block_nr = map.m_pblk + block_in_file - map.m_lblk;
			SetPageMappedToDisk(page);

			if (!PageUptodate(page) && !cleancache_get_page(page)) {
				SetPageUptodate(page);
				goto confused;
			}
		} else {
			zero_user_segment(page, 0, PAGE_CACHE_SIZE);
			SetPageUptodate(page);
			unlock_page(page);
			goto next_page;
		}

		/*
		 * This page will go to BIO.  Do we need to send this
		 * BIO off first?
		 */
		if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
			submit_bio(READ, bio);
			bio = NULL;
		}
		if (bio == NULL) {
			struct f2fs_crypto_ctx *ctx = NULL;

			if (f2fs_encrypted_inode(inode) &&
					S_ISREG(inode->i_mode)) {
				struct page *cpage;

				ctx = f2fs_get_crypto_ctx(inode);
				if (IS_ERR(ctx))
					goto set_error_page;

				/* wait the page to be moved by cleaning */
				cpage = find_lock_page(
						META_MAPPING(F2FS_I_SB(inode)),
						block_nr);
				if (cpage) {
					f2fs_wait_on_page_writeback(cpage,
									DATA);
					f2fs_put_page(cpage, 1);
				}
			}

			bio = bio_alloc(GFP_KERNEL,
				min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
			if (!bio) {
				if (ctx)
					f2fs_release_crypto_ctx(ctx);
				goto set_error_page;
			}
			bio->bi_bdev = bdev;
			bio->bi_sector = SECTOR_FROM_BLOCK(block_nr);
			bio->bi_end_io = f2fs_read_end_io;
			bio->bi_private = ctx;
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		last_block_in_bio = block_nr;
		goto next_page;
set_error_page:
		SetPageError(page);
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
		unlock_page(page);
		goto next_page;
confused:
		if (bio) {
			submit_bio(READ, bio);
			bio = NULL;
		}
		unlock_page(page);
next_page:
		if (pages)
			page_cache_release(page);
	}
	BUG_ON(pages && !list_empty(pages));
	if (bio)
		submit_bio(READ, bio);
	return 0;
}
示例#18
0
int aac_fib_send(u16 command, struct fib * fibptr, unsigned long size,
		int priority, int wait, int reply, fib_callback callback,
		void * callback_data)
{
	struct aac_dev * dev = fibptr->dev;
	struct hw_fib * hw_fib = fibptr->hw_fib;
	struct aac_queue * q;
	unsigned long flags = 0;
	unsigned long qflags;

	if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
		return -EBUSY;
	/*
	 *	There are 5 cases with the wait and reponse requested flags. 
	 *	The only invalid cases are if the caller requests to wait and
	 *	does not request a response and if the caller does not want a
	 *	response and the Fib is not allocated from pool. If a response
	 *	is not requesed the Fib will just be deallocaed by the DPC
	 *	routine when the response comes back from the adapter. No
	 *	further processing will be done besides deleting the Fib. We 
	 *	will have a debug mode where the adapter can notify the host
	 *	it had a problem and the host can log that fact.
	 */
	if (wait && !reply) {
		return -EINVAL;
	} else if (!wait && reply) {
		hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
		FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
	} else if (!wait && !reply) {
		hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
		FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
	} else if (wait && reply) {
		hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
		FIB_COUNTER_INCREMENT(aac_config.NormalSent);
	} 
	/*
	 *	Map the fib into 32bits by using the fib number
	 */

	hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
	hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
	/*
	 *	Set FIB state to indicate where it came from and if we want a
	 *	response from the adapter. Also load the command from the
	 *	caller.
	 *
	 *	Map the hw fib pointer as a 32bit value
	 */
	hw_fib->header.Command = cpu_to_le16(command);
	hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
	fibptr->hw_fib->header.Flags = 0;	/* 0 the flags field - internal only*/
	/*
	 *	Set the size of the Fib we want to send to the adapter
	 */
	hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
	if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
		return -EMSGSIZE;
	}                
	/*
	 *	Get a queue entry connect the FIB to it and send an notify
	 *	the adapter a command is ready.
	 */
	hw_fib->header.XferState |= cpu_to_le32(NormalPriority);

	/*
	 *	Fill in the Callback and CallbackContext if we are not
	 *	going to wait.
	 */
	if (!wait) {
		fibptr->callback = callback;
		fibptr->callback_data = callback_data;
	}

	fibptr->done = 0;
	fibptr->flags = 0;

	FIB_COUNTER_INCREMENT(aac_config.FibsSent);

	dprintk((KERN_DEBUG "Fib contents:.\n"));
	dprintk((KERN_DEBUG "  Command =               %d.\n", le32_to_cpu(hw_fib->header.Command)));
	dprintk((KERN_DEBUG "  SubCommand =            %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
	dprintk((KERN_DEBUG "  XferState  =            %x.\n", le32_to_cpu(hw_fib->header.XferState)));
	dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib));
	dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
	dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));

	if (!dev->queues)
		return -ENODEV;
	q = &dev->queues->queue[AdapNormCmdQueue];

	if(wait)
		spin_lock_irqsave(&fibptr->event_lock, flags);
	spin_lock_irqsave(q->lock, qflags);
	if (dev->new_comm_interface) {
		unsigned long count = 10000000L; /* 50 seconds */
		list_add_tail(&fibptr->queue, &q->pendingq);
		q->numpending++;
		spin_unlock_irqrestore(q->lock, qflags);
		while (aac_adapter_send(fibptr) != 0) {
			if (--count == 0) {
				if (wait)
					spin_unlock_irqrestore(&fibptr->event_lock, flags);
				spin_lock_irqsave(q->lock, qflags);
				q->numpending--;
				list_del(&fibptr->queue);
				spin_unlock_irqrestore(q->lock, qflags);
				return -ETIMEDOUT;
			}
			udelay(5);
		}
	} else {
		u32 index;
		unsigned long nointr = 0;
		aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr);

		list_add_tail(&fibptr->queue, &q->pendingq);
		q->numpending++;
		*(q->headers.producer) = cpu_to_le32(index + 1);
		spin_unlock_irqrestore(q->lock, qflags);
		dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
		if (!(nointr & aac_config.irq_mod))
			aac_adapter_notify(dev, AdapNormCmdQueue);
	}

	/*
	 *	If the caller wanted us to wait for response wait now. 
	 */
    
	if (wait) {
		spin_unlock_irqrestore(&fibptr->event_lock, flags);
		/* Only set for first known interruptable command */
		if (wait < 0) {
			/*
			 * *VERY* Dangerous to time out a command, the
			 * assumption is made that we have no hope of
			 * functioning because an interrupt routing or other
			 * hardware failure has occurred.
			 */
			unsigned long count = 36000000L; /* 3 minutes */
			while (down_trylock(&fibptr->event_wait)) {
				if (--count == 0) {
					spin_lock_irqsave(q->lock, qflags);
					q->numpending--;
					list_del(&fibptr->queue);
					spin_unlock_irqrestore(q->lock, qflags);
					if (wait == -1) {
	        				printk(KERN_ERR "aacraid: fib_send: first asynchronous command timed out.\n"
						  "Usually a result of a PCI interrupt routing problem;\n"
						  "update mother board BIOS or consider utilizing one of\n"
						  "the SAFE mode kernel options (acpi, apic etc)\n");
					}
					return -ETIMEDOUT;
				}
				udelay(5);
			}
		} else
			down(&fibptr->event_wait);
		if(fibptr->done == 0)
			BUG();
			
		if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
			return -ETIMEDOUT;
		} else {
			return 0;
		}
	}
	/*
	 *	If the user does not want a response than return success otherwise
	 *	return pending
	 */
	if (reply)
		return -EINPROGRESS;
	else
		return 0;
}
示例#19
0
文件: swap.c 项目: spinlock/ucore
// swap_list_del - delete page from the swap list
static inline void
swap_list_del(struct Page *page) {
    assert(PageSwap(page));
    (PageActive(page) ? &active_list : &inactive_list)->nr_pages --;
    list_del(&(page->swap_link));
}
/**
 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
 *
 * When moving to rx_free an SKB is allocated for the slot.
 *
 * Also restock the Rx queue via iwl_rx_queue_restock.
 * This is called as a scheduled work item (except for during initialization)
 */
static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
{
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
	struct list_head *element;
	struct iwl_rx_mem_buffer *rxb;
	struct page *page;
	unsigned long flags;
	gfp_t gfp_mask = priority;

	while (1) {
		spin_lock_irqsave(&rxq->lock, flags);
		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			return;
		}
		spin_unlock_irqrestore(&rxq->lock, flags);

		if (rxq->free_count > RX_LOW_WATERMARK)
			gfp_mask |= __GFP_NOWARN;

		if (hw_params(trans).rx_page_order > 0)
			gfp_mask |= __GFP_COMP;

		/* Alloc a new receive buffer */
		page = alloc_pages(gfp_mask,
				  hw_params(trans).rx_page_order);
		if (!page) {
			if (net_ratelimit())
				IWL_DEBUG_INFO(trans, "alloc_pages failed, "
					   "order: %d\n",
					   hw_params(trans).rx_page_order);

			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
			    net_ratelimit())
				IWL_CRIT(trans, "Failed to alloc_pages with %s."
					 "Only %u free buffers remaining.\n",
					 priority == GFP_ATOMIC ?
					 "GFP_ATOMIC" : "GFP_KERNEL",
					 rxq->free_count);
			/* We don't reschedule replenish work here -- we will
			 * call the restock method and if it still needs
			 * more buffers it will schedule replenish */
			return;
		}

		spin_lock_irqsave(&rxq->lock, flags);

		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			__free_pages(page, hw_params(trans).rx_page_order);
			return;
		}
		element = rxq->rx_used.next;
		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
		list_del(element);

		spin_unlock_irqrestore(&rxq->lock, flags);

		BUG_ON(rxb->page);
		rxb->page = page;
		/* Get physical address of the RB */
		rxb->page_dma = dma_map_page(trans->dev, page, 0,
				PAGE_SIZE << hw_params(trans).rx_page_order,
				DMA_FROM_DEVICE);
		/* dma address must be no more than 36 bits */
		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
		/* and also 256 byte aligned! */
		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));

		spin_lock_irqsave(&rxq->lock, flags);

		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;

		spin_unlock_irqrestore(&rxq->lock, flags);
	}
}
static void audio_mvs_process_ul_pkt(uint8_t *voc_pkt,
				     uint32_t pkt_len,
				     void *private_data)
{
	struct audio_mvs_buf_node *buf_node = NULL;
	struct audio_mvs_info_type *audio = private_data;
	unsigned long dsp_flags;

	/* Copy up-link packet into out_queue. */
	spin_lock_irqsave(&audio->dsp_lock, dsp_flags);

	if (!list_empty(&audio->free_out_queue)) {
		buf_node = list_first_entry(&audio->free_out_queue,
					    struct audio_mvs_buf_node,
					    list);
		list_del(&buf_node->list);

		switch (audio->mvs_mode) {
		case MVS_MODE_AMR:
		case MVS_MODE_AMR_WB: {
			/* Remove the DSP frame info header. Header format:
			 * Bits 0-3: Frame rate
			 * Bits 4-7: Frame type
			 */
			buf_node->frame.frame_type = ((*voc_pkt) & 0xF0) >> 4;
			voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;

			buf_node->frame.len = pkt_len - DSP_FRAME_HDR_LEN;

			memcpy(&buf_node->frame.voc_pkt[0],
			       voc_pkt,
			       buf_node->frame.len);

			list_add_tail(&buf_node->list, &audio->out_queue);
			break;
		}

		case MVS_MODE_IS127: {
			buf_node->frame.frame_type = 0;
			voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;

			buf_node->frame.len = pkt_len - DSP_FRAME_HDR_LEN;

			memcpy(&buf_node->frame.voc_pkt[0],
			       voc_pkt,
			       buf_node->frame.len);

			list_add_tail(&buf_node->list, &audio->out_queue);
			break;
		}

		case MVS_MODE_G729A: {
			/* G729 frames are 10ms each, but the DSP works with
			 * 20ms frames and sends two 10ms frames per buffer.
			 * Extract the two frames and put them in separate
			 * buffers.
			 */
			/* Remove the first DSP frame info header.
			 * Header format:
			 * Bits 0-1: Frame type
			 */
			buf_node->frame.frame_type = (*voc_pkt) & 0x03;
			voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;

			/* There are two frames in the buffer. Length of the
			 * first frame:
			 */
			buf_node->frame.len = (pkt_len -
					       2 * DSP_FRAME_HDR_LEN) / 2;

			memcpy(&buf_node->frame.voc_pkt[0],
			       voc_pkt,
			       buf_node->frame.len);
			voc_pkt = voc_pkt + buf_node->frame.len;

			list_add_tail(&buf_node->list, &audio->out_queue);

			/* Get another buffer from the free Q and fill in the
			 * second frame.
			 */
			if (!list_empty(&audio->free_out_queue)) {
				buf_node =
					list_first_entry(&audio->free_out_queue,
						      struct audio_mvs_buf_node,
						      list);
				list_del(&buf_node->list);

				/* Remove the second DSP frame info header.
				 * Header format:
				 * Bits 0-1: Frame type
				 */
				buf_node->frame.frame_type = (*voc_pkt) & 0x03;
				voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;

				/* There are two frames in the buffer. Length
				 * of the first frame:
				 */
				buf_node->frame.len = (pkt_len -
						     2 * DSP_FRAME_HDR_LEN) / 2;

				memcpy(&buf_node->frame.voc_pkt[0],
				       voc_pkt,
				       buf_node->frame.len);

				list_add_tail(&buf_node->list,
					      &audio->out_queue);

			} else {
				/* Drop the second frame. */
				pr_aud_err("%s: UL data dropped, read is slow\n",
				       __func__);
			}

			break;
		}
示例#22
0
static inline void lru_del(struct ashmem_range *range) {
    list_del(&range->lru);
    lru_count -= range_size(range);
}
示例#23
0
static void usb_hub_events(void)
{
	unsigned long flags;
	struct list_head *tmp;
	struct usb_device *dev;
	struct usb_hub *hub;
	struct usb_hub_status *hubsts;
	u16 hubstatus;
	u16 hubchange;
	u16 portstatus;
	u16 portchange;
	int i, ret;

	/*
	 *  We restart the list everytime to avoid a deadlock with
	 * deleting hubs downstream from this one. This should be
	 * safe since we delete the hub from the event list.
	 * Not the most efficient, but avoids deadlocks.
	 */
	while (1) {
		spin_lock_irqsave(&hub_event_lock, flags);

		if (list_empty(&hub_event_list))
			break;

		/* Grab the next entry from the beginning of the list */
		tmp = hub_event_list.next;

		hub = list_entry(tmp, struct usb_hub, event_list);
		dev = hub->dev;

		list_del(tmp);
		INIT_LIST_HEAD(tmp);

		down(&hub->khubd_sem); /* never blocks, we were on list */
		spin_unlock_irqrestore(&hub_event_lock, flags);

		if (hub->error) {
			dbg("resetting hub %d for error %d", dev->devnum, hub->error);
			printk("resetting hub %d for error %d\n", dev->devnum, hub->error);

			if (usb_hub_reset(hub)) {
				err("error resetting hub %d - disconnecting", dev->devnum);
				up(&hub->khubd_sem);
				usb_hub_disconnect(dev);
				continue;
			}

			hub->nerrors = 0;
			hub->error = 0;
		}

		for (i = 2; i < hub->descriptor->bNbrPorts; i++) {
			ret = usb_hub_port_status(dev, i, &portstatus, &portchange);
			if (ret < 0) {
				continue;
			}

			if (portchange & USB_PORT_STAT_C_CONNECTION) {
				dbg("port %d connection change", i + 1);

				usb_hub_port_connect_change(hub, i, portstatus, portchange);
			} else if (portchange & USB_PORT_STAT_C_ENABLE) {
				dbg("port %d enable change, status %x", i + 1, portstatus);
				usb_clear_port_feature(dev, i + 1, USB_PORT_FEAT_C_ENABLE);

				/*
				 * EM interference sometimes causes bad shielded USB devices to 
				 * be shutdown by the hub, this hack enables them again.
				 * Works at least with mouse driver. 
				 */
				if (!(portstatus & USB_PORT_STAT_ENABLE) && 
				    (portstatus & USB_PORT_STAT_CONNECTION) && (dev->children[i])) {
					err("already running port %i disabled by hub (EMI?), re-enabling...",
						i + 1);
					usb_hub_port_connect_change(hub, i, portstatus, portchange);
				}
			}

			if (portchange & USB_PORT_STAT_C_SUSPEND) {
				dbg("port %d suspend change", i + 1);
				usb_clear_port_feature(dev, i + 1,  USB_PORT_FEAT_C_SUSPEND);
			}
			
			if (portchange & USB_PORT_STAT_C_OVERCURRENT) {
				err("port %d over-current change", i + 1);
				usb_clear_port_feature(dev, i + 1, USB_PORT_FEAT_C_OVER_CURRENT);
				usb_hub_power_on(hub);
			}

			if (portchange & USB_PORT_STAT_C_RESET) {
				dbg("port %d reset change", i + 1);
				usb_clear_port_feature(dev, i + 1, USB_PORT_FEAT_C_RESET);
			}
		} /* end for i */

		/* deal with hub status changes */
		hubsts = kmalloc(sizeof *hubsts, GFP_KERNEL);
		if (!hubsts) {
			err("couldn't allocate hubsts");
		} else {
			if (usb_get_hub_status(dev, hubsts) < 0)
				err("get_hub_status failed");
			else {
				hubstatus = le16_to_cpup(&hubsts->wHubStatus);
				hubchange = le16_to_cpup(&hubsts->wHubChange);
				if (hubchange & HUB_CHANGE_LOCAL_POWER) {
					dbg("hub power change");
					usb_clear_hub_feature(dev, C_HUB_LOCAL_POWER);
				}
				if (hubchange & HUB_CHANGE_OVERCURRENT) {
					dbg("hub overcurrent change");
					wait_ms(500);	/* Cool down */
					usb_clear_hub_feature(dev, C_HUB_OVER_CURRENT);
					usb_hub_power_on(hub);
				}
			}
			kfree(hubsts);
		}
		up(&hub->khubd_sem);
        } /* end while (1) */

	spin_unlock_irqrestore(&hub_event_lock, flags);
}
static void vpe_do_tasklet(unsigned long data)
{
	unsigned long flags;
	uint32_t pyaddr = 0, pcbcraddr = 0;
	uint32_t src_y, src_cbcr, temp;

	struct vpe_isr_queue_cmd_type *qcmd = NULL;

	CDBG("[CAM] === vpe_do_tasklet start === \n");

	spin_lock_irqsave(&vpe_ctrl->tasklet_lock, flags);
	qcmd = list_first_entry(&vpe_ctrl->tasklet_q,
		struct vpe_isr_queue_cmd_type, list);

	if (!qcmd) {
		spin_unlock_irqrestore(&vpe_ctrl->tasklet_lock, flags);
		return;
	}

	list_del(&qcmd->list);
	spin_unlock_irqrestore(&vpe_ctrl->tasklet_lock, flags);

	/* interrupt to be processed,  *qcmd has the payload.  */
	if (qcmd->irq_status & 0x1) {
		if (vpe_ctrl->output_type == OUTPUT_TYPE_ST_L) {
			CDBG("[CAM] vpe left frame done.\n");
			vpe_ctrl->output_type = 0;
			CDBG("[CAM] vpe send out msg.\n");
			orig_src_y =
				msm_io_r(vpe_device->vpebase + VPE_SRCP0_ADDR_OFFSET);
			orig_src_cbcr =
				msm_io_r(vpe_device->vpebase + VPE_SRCP1_ADDR_OFFSET);

		pyaddr =
			msm_io_r(vpe_device->vpebase + VPE_OUTP0_ADDR_OFFSET);
		pcbcraddr =
			msm_io_r(vpe_device->vpebase + VPE_OUTP1_ADDR_OFFSET);
			CDBG("[CAM] %s: out_w = %d, out_h = %d\n", __func__, vpe_ctrl->out_w,
					 vpe_ctrl->out_h);

			if (vpe_ctrl->frame_pack == TOP_DOWN_FULL) {
				msm_io_w(pyaddr + (vpe_ctrl->out_w * vpe_ctrl->out_h),
						vpe_device->vpebase + VPE_OUTP0_ADDR_OFFSET);
				msm_io_w(pcbcraddr + (vpe_ctrl->out_w * vpe_ctrl->out_h/2),
						vpe_device->vpebase + VPE_OUTP1_ADDR_OFFSET);
			} else if ((vpe_ctrl->frame_pack == SIDE_BY_SIDE_HALF)
					|| (vpe_ctrl->frame_pack == SIDE_BY_SIDE_FULL)) {
				msm_io_w(pyaddr + vpe_ctrl->out_w,
						vpe_device->vpebase + VPE_OUTP0_ADDR_OFFSET);
				msm_io_w(pcbcraddr + vpe_ctrl->out_w,
						vpe_device->vpebase + VPE_OUTP1_ADDR_OFFSET);
			} else
				CDBG("[CAM] %s: Invalid packing = %d\n", __func__, vpe_ctrl->frame_pack);
			vpe_send_msg_no_payload(MSG_ID_VPE_OUTPUT_ST_L);
			vpe_ctrl->state = 0;   /* put it back to idle. */
			kfree(qcmd);
			return;
		} else if (vpe_ctrl->output_type == OUTPUT_TYPE_ST_R) {
			src_y = orig_src_y;
			src_cbcr = orig_src_cbcr;
			CDBG("[CAM] %s: out_w = %d, out_h = %d\n", __func__, vpe_ctrl->out_w,
					 vpe_ctrl->out_h);
			if (vpe_ctrl->frame_pack == TOP_DOWN_FULL) {
				pyaddr = msm_io_r(vpe_device->vpebase + VPE_OUTP0_ADDR_OFFSET) -
					(vpe_ctrl->out_w * vpe_ctrl->out_h);
			} else if ((vpe_ctrl->frame_pack == SIDE_BY_SIDE_HALF)
					|| (vpe_ctrl->frame_pack == SIDE_BY_SIDE_FULL)) {
				pyaddr = msm_io_r(vpe_device->vpebase + VPE_OUTP0_ADDR_OFFSET) -
					vpe_ctrl->out_w;
			} else
				CDBG("[CAM] %s: Invalid packing = %d\n", __func__, vpe_ctrl->frame_pack);
			pcbcraddr = vpe_ctrl->pcbcr_before_dis;
		} else {
		src_y =
			msm_io_r(vpe_device->vpebase + VPE_SRCP0_ADDR_OFFSET);
		src_cbcr =
			msm_io_r(vpe_device->vpebase + VPE_SRCP1_ADDR_OFFSET);
			pyaddr =
				msm_io_r(vpe_device->vpebase + VPE_OUTP0_ADDR_OFFSET);
			pcbcraddr =
				msm_io_r(vpe_device->vpebase + VPE_OUTP1_ADDR_OFFSET);
		}
		if (vpe_ctrl->dis_en)
			pcbcraddr = vpe_ctrl->pcbcr_before_dis;

		msm_io_w(src_y,
				vpe_device->vpebase + VPE_OUTP0_ADDR_OFFSET);
		msm_io_w(src_cbcr,
				vpe_device->vpebase + VPE_OUTP1_ADDR_OFFSET);

		temp = msm_io_r(
		vpe_device->vpebase + VPE_OP_MODE_OFFSET) & 0xFFFFFFFC;
		msm_io_w(temp, vpe_device->vpebase + VPE_OP_MODE_OFFSET);
		CDBG("[CAM] vpe send out msg.\n");
		vpe_ctrl->output_type = 0;
		/*  now pass this frame to msm_camera.c. */
		if (vpe_ctrl->output_type == OUTPUT_TYPE_ST_R)
			vpe_send_outmsg(MSG_ID_VPE_OUTPUT_ST_R, pyaddr, pcbcraddr);
		else
		vpe_send_outmsg(MSG_ID_VPE_OUTPUT_V, pyaddr, pcbcraddr);
		vpe_ctrl->state = 0;   /* put it back to idle. */

	}
	kfree(qcmd);
}
示例#25
0
void omapdss_unregister_display(struct omap_dss_device *dssdev)
{
	mutex_lock(&panel_list_mutex);
	list_del(&dssdev->panel_list);
	mutex_unlock(&panel_list_mutex);
}
示例#26
0
int test_run (void)
   {
   SCL_list_t       list;
   SCL_iterator_t   iterator;

   void*   data;
   int     stat;
   int     i;

   printf ("Create list: ");
   fflush (stdout);
   list = list_new();
   printf ("(%p = list_new()) != NULL ... ", list);
   fflush (stdout);
   printf ("%s\n", ((list!=NULL) ? "PASS" : "FAIL"));

   printf ("Push data in back list.\n");
   for (i=0 ; i<LIST_SIZE ; i++)
      {
      stat = list_push_back (list, (void*)g_data[i]);
      printf (
             "([%d,%s] = list_push_back (list, data[%d]) .... %s\n",
             stat, scl_statstr(stat), i, ((stat==SCL_OK) ? "PASS" : "FAIL")
             );
      }

   printf (
          "%2ld = list_count (list) .......................... %s\n",
          list_count(list), ((list_count(list)==LIST_SIZE) ? "PASS" : "FAIL")
          );

   printf ("Foreach on list.\n");
   for (i=0 ; i<LIST_SIZE ; i++) g_flag[i] = 0;
   list_foreach (list, cbfn, g_context);

   printf ("REVERSE the list.\n");
   list_reverse (list);

   printf ("Iterate through the list.\n");
   iterator = list_begin (list);
   for (i=0 ; i<LIST_SIZE ; i++) g_flag[i] = 0;
   while (iterator != NULL)
      {
      data_flag (list_data_get(iterator));
      iterator = list_next (iterator);
      }

   printf ("Iterate back through the list.\n");
   iterator = list_end (list);
   for (i=0 ; i<LIST_SIZE ; i++) g_flag[i] = 0;
   while (iterator != NULL)
      {
      data_flag (list_data_get(iterator));
      iterator = list_prev (iterator);
      }

   printf ("REVERSE the list.\n");
   list_reverse (list);

   printf ("Accesses on list.\n");
   for (i=0 ; i<LIST_SIZE ; i++)
      {
      data = list_access (list, i);
      printf (
             "list_access(list,%d) == %08X ................. %s\n",
             i,
             (unsigned long)data,
             (((unsigned long)data==g_data[i]) ? "PASS" : "FAIL")
             );
      }

   printf ("Accesses on list via iterator.\n");
   for (i=0 ; i<LIST_SIZE ; i++)
      {
      iterator = list_at (list, i);
      printf (
             "list_at(list,%d) => %08X ..................... %s\n",
             i,
             (unsigned long)list_data_get(iterator),
             (((unsigned long)list_data_get(iterator)==g_data[i]) ? "PASS" : "FAIL")
             );
      }

   printf ("Erase list.\n");
   list_erase (list);

   printf (
          "%2ld = list_count (list) .......................... %s\n",
          list_count(list), ((list_count(list)==0) ? "PASS" : "FAIL")
          );

   printf ("Delete list.\n");
   list_del (list);

   printf ("\n");

   return 0;
   }
示例#27
0
文件: mcl_list.c 项目: lishengwen/mcl
int mcl_list_del_cmp(void *data, mcl_list *list_ptr, mcl_list_cmp_func cmp_func, mcl_free_fn_t free_fn)
{
	return list_del(data, list_ptr, 1, cmp_func, free_fn);
}
示例#28
0
static void syslog_work_fn(struct work_struct *work)
{
    struct msghdr   msg;
    struct kvec     iov;
    int ret;
    size_t n = 0;
    struct logs * log_entry;

    if (sl_socket != NULL)
        goto cont;
    ret = syslog_connect(&sl_socket);
    if (ret >= 0)
        goto cont;
    if (!timer_pending(&timer))
    {
        timer.expires = jiffies + msecs_to_jiffies(reconnect_freq);
        add_timer(&timer);
    }
    return ;

cont:

    if (list_empty(&logs_list))
        return;

    spin_lock_irq(&log_lock);

    while (!list_empty(&logs_list))
    {
        log_entry =  list_first_entry(&logs_list, struct logs, logs_list);

        printk(KERN_DEBUG "ip_syslog: work data (%d): %d\n", counter, log_entry->counter);

        n = strlen(log_entry->data);
        iov.iov_base     = (void *)log_entry->data;
        iov.iov_len      = n;
        msg.msg_name = NULL;
        msg.msg_namelen = 0;
        msg.msg_iov = (struct iovec *)&iov;
        msg.msg_iovlen = 1;
        msg.msg_control = NULL;
        msg.msg_controllen = 0;
        msg.msg_namelen = 0;
        msg.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL;

        ret = kernel_sendmsg(sl_socket, &msg, &iov, 1, n);
        if (ret < 0)
        {
            printk("ip_syslog: kernel_sendmsg error:%d\n", ret);
            if (ret == -EPIPE)
            {
                syslog_close(&sl_socket);
                schedule_work(&syslog_work);
            }
            break;
        }

        loglist_total--;
        list_del(&log_entry->logs_list);
        kfree(log_entry->data);
        kfree(log_entry);
    }

    spin_unlock_irq(&log_lock);
    return ;
}
示例#29
0
void *
mem_get (struct mem_pool *mem_pool)
{
        struct list_head *list = NULL;
        void             *ptr = NULL;
        int             *in_use = NULL;
        struct mem_pool **pool_ptr = NULL;

        if (!mem_pool) {
                gf_log_callingfn ("mem-pool", GF_LOG_ERROR, "invalid argument");
                return NULL;
        }

        LOCK (&mem_pool->lock);
        {
                mem_pool->alloc_count++;
                if (mem_pool->cold_count) {
                        list = mem_pool->list.next;
                        list_del (list);

                        mem_pool->hot_count++;
                        mem_pool->cold_count--;

                        if (mem_pool->max_alloc < mem_pool->hot_count)
                                mem_pool->max_alloc = mem_pool->hot_count;

                        ptr = list;
                        in_use = (ptr + GF_MEM_POOL_LIST_BOUNDARY +
                                  GF_MEM_POOL_PTR);
                        *in_use = 1;

                        goto fwd_addr_out;
                }

                /* This is a problem area. If we've run out of
                 * chunks in our slab above, we need to allocate
                 * enough memory to service this request.
                 * The problem is, these individual chunks will fail
                 * the first address range check in __is_member. Now, since
                 * we're not allocating a full second slab, we wont have
                 * enough info perform the range check in __is_member.
                 *
                 * I am working around this by performing a regular allocation
                 * , just the way the caller would've done when not using the
                 * mem-pool. That also means, we're not padding the size with
                 * the list_head structure because, this will not be added to
                 * the list of chunks that belong to the mem-pool allocated
                 * initially.
                 *
                 * This is the best we can do without adding functionality for
                 * managing multiple slabs. That does not interest us at present
                 * because it is too much work knowing that a better slab
                 * allocator is coming RSN.
                 */
                mem_pool->pool_misses++;
                mem_pool->curr_stdalloc++;
                if (mem_pool->max_stdalloc < mem_pool->curr_stdalloc)
                        mem_pool->max_stdalloc = mem_pool->curr_stdalloc;
                ptr = GF_CALLOC (1, mem_pool->padded_sizeof_type,
                                 gf_common_mt_mem_pool);

                /* Memory coming from the heap need not be transformed from a
                 * chunkhead to a usable pointer since it is not coming from
                 * the pool.
                 */
        }
fwd_addr_out:
        pool_ptr = mem_pool_from_ptr (ptr);
        *pool_ptr = (struct mem_pool *)mem_pool;
        ptr = mem_pool_chunkhead2ptr (ptr);
        UNLOCK (&mem_pool->lock);

        return ptr;
}
示例#30
0
static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
					struct net_device *net)
{
	struct eth_dev		*dev = netdev_priv(net);
	int			length = skb->len;
	int			retval;
	struct usb_request	*req = NULL;
	unsigned long		flags;
	struct usb_ep		*in;
	u16			cdc_filter;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb) {
		in = dev->port_usb->in_ep;
		cdc_filter = dev->port_usb->cdc_filter;
	} else {
		in = NULL;
		cdc_filter = 0;
	}
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!in) {
		dev_kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	/* apply outgoing CDC or RNDIS filters */
	if (!is_promisc(cdc_filter)) {
		u8		*dest = skb->data;

		if (is_multicast_ether_addr(dest)) {
			u16	type;

			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
			 * SET_ETHERNET_MULTICAST_FILTERS requests
			 */
			if (is_broadcast_ether_addr(dest))
				type = USB_CDC_PACKET_TYPE_BROADCAST;
			else
				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
			if (!(cdc_filter & type)) {
				dev_kfree_skb_any(skb);
				return NETDEV_TX_OK;
			}
		}
		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
	}

	spin_lock_irqsave(&dev->req_lock, flags);
	/*
	 * this freelist can be empty if an interrupt triggered disconnect()
	 * and reconfigured the gadget (shutting down this queue) after the
	 * network stack decided to xmit but before we got the spinlock.
	 */
	if (list_empty(&dev->tx_reqs)) {
		spin_unlock_irqrestore(&dev->req_lock, flags);
		return NETDEV_TX_BUSY;
	}

	req = container_of(dev->tx_reqs.next, struct usb_request, list);
	list_del(&req->list);

	/* temporarily stop TX queue when the freelist empties */
	if (list_empty(&dev->tx_reqs))
		netif_stop_queue(net);
	spin_unlock_irqrestore(&dev->req_lock, flags);

	/* no buffer copies needed, unless the network stack did it
	 * or the hardware can't use skb buffers.
	 * or there's not enough space for extra headers we need
	 */
	if (dev->wrap) {
		unsigned long	flags;

		spin_lock_irqsave(&dev->lock, flags);
		if (dev->port_usb)
			skb = dev->wrap(dev->port_usb, skb);
		spin_unlock_irqrestore(&dev->lock, flags);
		if (!skb)
			goto drop;

		length = skb->len;
	}
	req->buf = skb->data;
	req->context = skb;
	req->complete = tx_complete;

	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
	if (dev->port_usb->is_fixed &&
	    length == dev->port_usb->fixed_in_len &&
	    (length % in->maxpacket) == 0)
		req->zero = 0;
	else
		req->zero = 1;

	/* use zlp framing on tx for strict CDC-Ether conformance,
	 * though any robust network rx path ignores extra padding.
	 * and some hardware doesn't like to write zlps.
	 */
	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
		length++;

	req->length = length;

	/* throttle high/super speed IRQ rate back slightly */
	if (gadget_is_dualspeed(dev->gadget))
		req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
				     dev->gadget->speed == USB_SPEED_SUPER)
			? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
			: 0;

	retval = usb_ep_queue(in, req, GFP_ATOMIC);
	switch (retval) {
	default:
		DBG(dev, "tx queue err %d\n", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		atomic_inc(&dev->tx_qlen);
	}

	if (retval) {
		dev_kfree_skb_any(skb);
drop:
		dev->net->stats.tx_dropped++;
		spin_lock_irqsave(&dev->req_lock, flags);
		if (list_empty(&dev->tx_reqs))
			netif_start_queue(net);
		list_add(&req->list, &dev->tx_reqs);
		spin_unlock_irqrestore(&dev->req_lock, flags);
	}
	return NETDEV_TX_OK;
}