示例#1
0
文件: usb.c 项目: avagin/linux
/**
 * wq_clear_halt - work queue function
 * @wq_obj: work_struct object to execute
 *
 * This sends a clear_halt to the given USB pipe.
 */
static void wq_clear_halt(struct work_struct *wq_obj)
{
	struct clear_hold_work *clear_work = to_clear_hold_work(wq_obj);
	struct most_dev *mdev = clear_work->mdev;
	unsigned int channel = clear_work->channel;
	int pipe = clear_work->pipe;

	mutex_lock(&mdev->io_mutex);
	most_stop_enqueue(&mdev->iface, channel);
	usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
	if (usb_clear_halt(mdev->usb_device, pipe))
		dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");

	/* If the functional Stall condition has been set on an
	 * asynchronous rx channel, we need to clear the tx channel
	 * too, since the hardware runs its clean-up sequence on both
	 * channels, as they are physically one on the network.
	 *
	 * The USB interface that exposes the asynchronous channels
	 * contains always two endpoints, and two only.
	 */
	if (mdev->conf[channel].data_type == MOST_CH_ASYNC &&
	    mdev->conf[channel].direction == MOST_CH_RX) {
		int peer = 1 - channel;
		int snd_pipe = usb_sndbulkpipe(mdev->usb_device,
					       mdev->ep_address[peer]);
		usb_clear_halt(mdev->usb_device, snd_pipe);
	}
	mdev->is_channel_healthy[channel] = true;
	most_resume_enqueue(&mdev->iface, channel);
	mutex_unlock(&mdev->io_mutex);
}
示例#2
0
文件: hdm_usb.c 项目: acton393/linux
/**
 * wq_clear_halt - work queue function
 * @wq_obj: work_struct object to execute
 *
 * This sends a clear_halt to the given USB pipe.
 */
static void wq_clear_halt(struct work_struct *wq_obj)
{
	struct clear_hold_work *clear_work = to_clear_hold_work(wq_obj);
	struct most_dev *mdev = clear_work->mdev;
	unsigned int channel = clear_work->channel;
	int pipe = clear_work->pipe;

	mutex_lock(&mdev->io_mutex);
	most_stop_enqueue(&mdev->iface, channel);
	free_anchored_buffers(mdev, channel, MBO_E_INVAL);
	if (usb_clear_halt(mdev->usb_device, pipe))
		dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");

	mdev->is_channel_healthy[channel] = true;
	most_resume_enqueue(&mdev->iface, channel);
	mutex_unlock(&mdev->io_mutex);
}