Ejemplo n.º 1
0
/* hcd->hub_irq_enable() */
static void admhc_rhsc_enable(struct usb_hcd *hcd)
{
	struct admhcd	*ahcd = hcd_to_admhcd(hcd);

	spin_lock_irq(&ahcd->lock);
	if (!ahcd->autostop)
		del_timer(&hcd->rh_timer);	/* Prevent next poll */
	admhc_intr_enable(ahcd, ADMHC_INTR_INSM);
	spin_unlock_irq(&ahcd->lock);
}
Ejemplo n.º 2
0
/* admhc_shutdown forcibly disables IRQs and DMA, helping kexec and
 * other cases where the next software may expect clean state from the
 * "firmware".  this is bus-neutral, unlike shutdown() methods.
 */
static void
admhc_shutdown(struct usb_hcd *hcd)
{
	struct admhcd *ahcd;

	ahcd = hcd_to_admhcd(hcd);
	admhc_intr_disable(ahcd, ADMHC_INTR_MIE);
	admhc_dma_disable(ahcd);
	admhc_usb_reset(ahcd);
	/* flush the writes */
	admhc_writel_flush(ahcd);
}
Ejemplo n.º 3
0
static int
admhc_hub_status_data(struct usb_hcd *hcd, char *buf)
{
	struct admhcd	*ahcd = hcd_to_admhcd(hcd);
	int		i, changed = 0, length = 1;
	int		any_connected = 0;
	unsigned long	flags;
	u32		status;

	spin_lock_irqsave(&ahcd->lock, flags);
	if (!HCD_HW_ACCESSIBLE(hcd))
		goto done;

	/* init status */
	status = admhc_read_rhdesc(ahcd);
	if (status & (ADMHC_RH_LPSC | ADMHC_RH_OCIC))
		buf[0] = changed = 1;
	else
		buf[0] = 0;
	if (ahcd->num_ports > 7) {
		buf[1] = 0;
		length++;
	}

	/* look at each port */
	for (i = 0; i < ahcd->num_ports; i++) {
		status = admhc_read_portstatus(ahcd, i);

		/* can't autostop if ports are connected */
		any_connected |= (status & ADMHC_PS_CCS);

		if (status & (ADMHC_PS_CSC | ADMHC_PS_PESC | ADMHC_PS_PSSC
				| ADMHC_PS_OCIC | ADMHC_PS_PRSC)) {
			changed = 1;
			if (i < 7)
				buf[0] |= 1 << (i + 1);
			else
				buf[1] |= 1 << (i - 7);
		}
	}

	if (admhc_root_hub_state_changes(ahcd, changed,
			any_connected))
		set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
	else
		clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);

done:
	spin_unlock_irqrestore(&ahcd->lock, flags);

	return changed ? length : 0;
}
Ejemplo n.º 4
0
static int admhc_start_port_reset(struct usb_hcd *hcd, unsigned port)
{
	struct admhcd	*ahcd = hcd_to_admhcd(hcd);
	u32			status;

	if (!port)
		return -EINVAL;
	port--;

	/* start port reset before HNP protocol times out */
	status = admhc_read_portstatus(ahcd, port);
	if (!(status & ADMHC_PS_CCS))
		return -ENODEV;

	/* khubd will finish the reset later */
	admhc_write_portstatus(ahcd, port, ADMHC_PS_PRS);
	return 0;
}
Ejemplo n.º 5
0
static void admhc_stop(struct usb_hcd *hcd)
{
	struct admhcd *ahcd = hcd_to_admhcd(hcd);

	admhc_dump(ahcd, 1);

	flush_scheduled_work();

	admhc_usb_reset(ahcd);
	admhc_intr_disable(ahcd, ADMHC_INTR_MIE);

	free_irq(hcd->irq, hcd);
	hcd->irq = -1;

	remove_debug_files(ahcd);
	admhc_eds_cleanup(ahcd);
	admhc_mem_cleanup(ahcd);
}
Ejemplo n.º 6
0
/*
 * decouple the URB from the HC queues (TDs, urb_priv);
 * reporting is always done
 * asynchronously, and we might be dealing with an urb that's
 * partially transferred, or an ED with other urbs being unlinked.
 */
static int admhc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
		int status)
{
	struct admhcd *ahcd = hcd_to_admhcd(hcd);
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&ahcd->lock, flags);

#ifdef ADMHC_VERBOSE_DEBUG
	urb_print(ahcd, urb, "DEQUEUE", 1, status);
#endif
	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
	if (ret) {
		/* Do nothing */
		;
	} else if (HC_IS_RUNNING(hcd->state)) {
		struct urb_priv *urb_priv;

		/* Unless an IRQ completed the unlink while it was being
		 * handed to us, flag it for unlink and giveback, and force
		 * some upcoming INTR_SF to call finish_unlinks()
		 */
		urb_priv = urb->hcpriv;
		if (urb_priv) {
			if (urb_priv->ed->state == ED_OPER)
				start_ed_unlink(ahcd, urb_priv->ed);
		}
	} else {
		/*
		 * with HC dead, we won't respect hc queue pointers
		 * any more ... just clean up every urb's memory.
		 */
		if (urb->hcpriv)
			finish_urb(ahcd, urb, status);
	}
	spin_unlock_irqrestore(&ahcd->lock, flags);

	return ret;
}
Ejemplo n.º 7
0
static int admhc_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
		u16 wIndex, char *buf, u16 wLength)
{
	struct admhcd	*ahcd = hcd_to_admhcd(hcd);
	int		ports = ahcd->num_ports;
	int		ret = 0;

	if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
		return -ESHUTDOWN;

	switch (typeReq) {
	case ClearHubFeature:
		switch (wValue) {
		case C_HUB_OVER_CURRENT:
#if 0			/* FIXME */
			admhc_writel(ahcd, ADMHC_RH_OCIC,
					&ahcd->regs->roothub.status);
#endif
		case C_HUB_LOCAL_POWER:
			break;
		default:
			goto error;
		}
		break;
	case ClearPortFeature:
		if (!wIndex || wIndex > ports)
			goto error;
		wIndex--;

		switch (wValue) {
		case USB_PORT_FEAT_ENABLE:
			ret = admhc_port_disable(ahcd, wIndex);
			break;
		case USB_PORT_FEAT_SUSPEND:
			ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_CPS);
			break;
		case USB_PORT_FEAT_POWER:
			ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_CPP);
			break;
		case USB_PORT_FEAT_C_CONNECTION:
			ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_CSC);
			break;
		case USB_PORT_FEAT_C_ENABLE:
			ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_PESC);
			break;
		case USB_PORT_FEAT_C_SUSPEND:
			ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_PSSC);
			break;
		case USB_PORT_FEAT_C_OVER_CURRENT:
			ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_OCIC);
			break;
		case USB_PORT_FEAT_C_RESET:
			ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_PRSC);
			break;
		default:
			goto error;
		}
		break;
	case GetHubDescriptor:
		ret = admhc_get_hub_descriptor(ahcd, buf);
		break;
	case GetHubStatus:
		ret = admhc_get_hub_status(ahcd, buf);
		break;
	case GetPortStatus:
		if (!wIndex || wIndex > ports)
			goto error;
		wIndex--;

		ret = admhc_get_port_status(ahcd, wIndex, buf);
		break;
	case SetHubFeature:
		switch (wValue) {
		case C_HUB_OVER_CURRENT:
			/* FIXME:  this can be cleared, yes? */
		case C_HUB_LOCAL_POWER:
			break;
		default:
			goto error;
		}
		break;
	case SetPortFeature:
		if (!wIndex || wIndex > ports)
			goto error;
		wIndex--;

		switch (wValue) {
		case USB_PORT_FEAT_ENABLE:
			ret = admhc_port_enable(ahcd, wIndex);
			break;
		case USB_PORT_FEAT_RESET:
			ret = admhc_port_reset(ahcd, wIndex);
			break;
		case USB_PORT_FEAT_SUSPEND:
#ifdef	CONFIG_USB_OTG
			if (hcd->self.otg_port == (wIndex + 1)
					&& hcd->self.b_hnp_enable)
				start_hnp(ahcd);
			else
#endif
			ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_SPS);
			break;
		case USB_PORT_FEAT_POWER:
			ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_SPP);
			break;
		default:
			goto error;
		}
		break;

	default:
error:
		/* "protocol stall" on error */
		ret = -EPIPE;
	}

	return ret;
}
Ejemplo n.º 8
0
/*
 * queue up an urb for anything except the root hub
 */
static int admhc_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
		gfp_t mem_flags)
{
	struct admhcd	*ahcd = hcd_to_admhcd(hcd);
	struct ed	*ed;
	struct urb_priv	*urb_priv;
	unsigned int	pipe = urb->pipe;
	int		td_cnt = 0;
	unsigned long	flags;
	int		ret = 0;

#ifdef ADMHC_VERBOSE_DEBUG
	spin_lock_irqsave(&ahcd->lock, flags);
	urb_print(ahcd, urb, "ENQEUE", usb_pipein(pipe), -EINPROGRESS);
	spin_unlock_irqrestore(&ahcd->lock, flags);
#endif

	/* every endpoint has an ed, locate and maybe (re)initialize it */
	ed = ed_get(ahcd, urb->ep, urb->dev, pipe, urb->interval);
	if (!ed)
		return -ENOMEM;

	/* for the private part of the URB we need the number of TDs */
	switch (ed->type) {
	case PIPE_CONTROL:
		if (urb->transfer_buffer_length > TD_DATALEN_MAX)
			/* td_submit_urb() doesn't yet handle these */
			return -EMSGSIZE;

		/* 1 TD for setup, 1 for ACK, plus ... */
		td_cnt = 2;
		/* FALLTHROUGH */
	case PIPE_BULK:
		/* one TD for every 4096 Bytes (can be upto 8K) */
		td_cnt += urb->transfer_buffer_length / TD_DATALEN_MAX;
		/* ... and for any remaining bytes ... */
		if ((urb->transfer_buffer_length % TD_DATALEN_MAX) != 0)
			td_cnt++;
		/* ... and maybe a zero length packet to wrap it up */
		if (td_cnt == 0)
			td_cnt++;
		else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0
			&& (urb->transfer_buffer_length
				% usb_maxpacket(urb->dev, pipe,
					usb_pipeout (pipe))) == 0)
			td_cnt++;
		break;
	case PIPE_INTERRUPT:
		/*
		 * for Interrupt IN/OUT transactions, each ED contains
		 * only 1 TD.
		 * TODO: check transfer_buffer_length?
		 */
		td_cnt = 1;
		break;
	case PIPE_ISOCHRONOUS:
		/* number of packets from URB */
		td_cnt = urb->number_of_packets;
		break;
	}

	urb_priv = urb_priv_alloc(ahcd, td_cnt, mem_flags);
	if (!urb_priv)
		return -ENOMEM;

	urb_priv->ed = ed;

	spin_lock_irqsave(&ahcd->lock, flags);
	/* don't submit to a dead HC */
	if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
		ret = -ENODEV;
		goto fail;
	}
	if (!HC_IS_RUNNING(hcd->state)) {
		ret = -ENODEV;
		goto fail;
	}

	ret = usb_hcd_link_urb_to_ep(hcd, urb);
	if (ret)
		goto fail;

	/* schedule the ed if needed */
	if (ed->state == ED_IDLE) {
		ret = ed_schedule(ahcd, ed);
		if (ret < 0) {
			usb_hcd_unlink_urb_from_ep(hcd, urb);
			goto fail;
		}
		if (ed->type == PIPE_ISOCHRONOUS) {
			u16	frame = admhc_frame_no(ahcd);

			/* delay a few frames before the first TD */
			frame += max_t (u16, 8, ed->interval);
			frame &= ~(ed->interval - 1);
			frame |= ed->branch;
			urb->start_frame = frame;

			/* yes, only URB_ISO_ASAP is supported, and
			 * urb->start_frame is never used as input.
			 */
		}
	} else if (ed->type == PIPE_ISOCHRONOUS)
		urb->start_frame = ed->last_iso + ed->interval;

	/* fill the TDs and link them to the ed; and
	 * enable that part of the schedule, if needed
	 * and update count of queued periodic urbs
	 */
	urb->hcpriv = urb_priv;
	td_submit_urb(ahcd, urb);

#ifdef ADMHC_VERBOSE_DEBUG
	admhc_dump_ed(ahcd, "admhc_urb_enqueue", urb_priv->ed, 1);
#endif

fail:
	if (ret)
		urb_priv_free(ahcd, urb_priv);

	spin_unlock_irqrestore(&ahcd->lock, flags);
	return ret;
}
Ejemplo n.º 9
0
static irqreturn_t admhc_irq(struct usb_hcd *hcd)
{
	struct admhcd *ahcd = hcd_to_admhcd(hcd);
	struct admhcd_regs __iomem *regs = ahcd->regs;
 	u32 ints;

	ints = admhc_readl(ahcd, &regs->int_status);
	if ((ints & ADMHC_INTR_INTA) == 0) {
		/* no unmasked interrupt status is set */
		return IRQ_NONE;
	}

	ints &= admhc_readl(ahcd, &regs->int_enable);

	if (ints & ADMHC_INTR_FATI) {
		/* e.g. due to PCI Master/Target Abort */
		admhc_disable(ahcd);
		admhc_err(ahcd, "Fatal Error, controller disabled\n");
		admhc_dump(ahcd, 1);
		admhc_usb_reset(ahcd);
	}

	if (ints & ADMHC_INTR_BABI) {
		admhc_intr_disable(ahcd, ADMHC_INTR_BABI);
		admhc_intr_ack(ahcd, ADMHC_INTR_BABI);
		admhc_err(ahcd, "Babble Detected\n");
	}

	if (ints & ADMHC_INTR_INSM) {
		admhc_vdbg(ahcd, "Root Hub Status Change\n");
		ahcd->next_statechange = jiffies + STATECHANGE_DELAY;
		admhc_intr_ack(ahcd, ADMHC_INTR_RESI | ADMHC_INTR_INSM);

		/* NOTE: Vendors didn't always make the same implementation
		 * choices for RHSC.  Many followed the spec; RHSC triggers
		 * on an edge, like setting and maybe clearing a port status
		 * change bit.  With others it's level-triggered, active
		 * until khubd clears all the port status change bits.  We'll
		 * always disable it here and rely on polling until khubd
		 * re-enables it.
		 */
		admhc_intr_disable(ahcd, ADMHC_INTR_INSM);
		usb_hcd_poll_rh_status(hcd);
	} else if (ints & ADMHC_INTR_RESI) {
		/* For connect and disconnect events, we expect the controller
		 * to turn on RHSC along with RD.  But for remote wakeup events
		 * this might not happen.
		 */
		admhc_vdbg(ahcd, "Resume Detect\n");
		admhc_intr_ack(ahcd, ADMHC_INTR_RESI);
		hcd->poll_rh = 1;
		if (ahcd->autostop) {
			spin_lock(&ahcd->lock);
			admhc_rh_resume(ahcd);
			spin_unlock(&ahcd->lock);
		} else
			usb_hcd_resume_root_hub(hcd);
	}

	if (ints & ADMHC_INTR_TDC) {
		admhc_vdbg(ahcd, "Transfer Descriptor Complete\n");
		admhc_intr_ack(ahcd, ADMHC_INTR_TDC);
		if (HC_IS_RUNNING(hcd->state))
			admhc_intr_disable(ahcd, ADMHC_INTR_TDC);
		spin_lock(&ahcd->lock);
		admhc_td_complete(ahcd);
		spin_unlock(&ahcd->lock);
		if (HC_IS_RUNNING(hcd->state))
			admhc_intr_enable(ahcd, ADMHC_INTR_TDC);
	}

	if (ints & ADMHC_INTR_SO) {
		/* could track INTR_SO to reduce available PCI/... bandwidth */
		admhc_vdbg(ahcd, "Schedule Overrun\n");
	}

#if 1
	spin_lock(&ahcd->lock);
	if (ahcd->ed_rm_list)
		finish_unlinks(ahcd, admhc_frame_no(ahcd));

	if ((ints & ADMHC_INTR_SOFI) != 0 && !ahcd->ed_rm_list
			&& HC_IS_RUNNING(hcd->state))
		admhc_intr_disable(ahcd, ADMHC_INTR_SOFI);
	spin_unlock(&ahcd->lock);
#else
	if (ints & ADMHC_INTR_SOFI) {
		admhc_vdbg(ahcd, "Start Of Frame\n");
		spin_lock(&ahcd->lock);

		/* handle any pending ED removes */
		finish_unlinks(ahcd, admhc_frameno(ahcd));

		/* leaving INTR_SOFI enabled when there's still unlinking
		 * to be done in the (next frame).
		 */
		if ((ahcd->ed_rm_list == NULL) ||
			HC_IS_RUNNING(hcd->state) == 0)
			/*
			 * disable INTR_SOFI if there are no unlinking to be
			 * done (in the next frame)
			 */
			admhc_intr_disable(ahcd, ADMHC_INTR_SOFI);

		spin_unlock(&ahcd->lock);
	}
#endif

	if (HC_IS_RUNNING(hcd->state)) {
		admhc_intr_ack(ahcd, ints);
		admhc_intr_enable(ahcd, ADMHC_INTR_MIE);
		admhc_writel_flush(ahcd);
	}

	return IRQ_HANDLED;
}
Ejemplo n.º 10
0
static int admhc_get_frame_number(struct usb_hcd *hcd)
{
	struct admhcd *ahcd = hcd_to_admhcd(hcd);

	return admhc_frame_no(ahcd);
}
Ejemplo n.º 11
0
static void admhc_endpoint_disable(struct usb_hcd *hcd,
		struct usb_host_endpoint *ep)
{
	struct admhcd		*ahcd = hcd_to_admhcd(hcd);
	unsigned long		flags;
	struct ed		*ed = ep->hcpriv;
	unsigned		limit = 1000;

	/* ASSERT:  any requests/urbs are being unlinked */
	/* ASSERT:  nobody can be submitting urbs for this any more */

	if (!ed)
		return;

#ifdef ADMHC_VERBOSE_DEBUG
	spin_lock_irqsave(&ahcd->lock, flags);
	admhc_dump_ed(ahcd, "EP-DISABLE", ed, 1);
	spin_unlock_irqrestore(&ahcd->lock, flags);
#endif

rescan:
	spin_lock_irqsave(&ahcd->lock, flags);

	if (!HC_IS_RUNNING(hcd->state)) {
sanitize:
		ed->state = ED_IDLE;
		finish_unlinks(ahcd, 0);
	}

	switch (ed->state) {
	case ED_UNLINK:		/* wait for hw to finish? */
		/* major IRQ delivery trouble loses INTR_SOFI too... */
		if (limit-- == 0) {
			admhc_warn(ahcd, "IRQ INTR_SOFI lossage\n");
			goto sanitize;
		}
		spin_unlock_irqrestore(&ahcd->lock, flags);
		schedule_timeout_uninterruptible(1);
		goto rescan;
	case ED_IDLE:		/* fully unlinked */
		if (list_empty(&ed->td_list)) {
			td_free (ahcd, ed->dummy);
			ed_free (ahcd, ed);
			break;
		}
		/* else FALL THROUGH */
	default:
		/* caller was supposed to have unlinked any requests;
		 * that's not our job.  can't recover; must leak ed.
		 */
		admhc_err(ahcd, "leak ed %p (#%02x) state %d%s\n",
			ed, ep->desc.bEndpointAddress, ed->state,
			list_empty(&ed->td_list) ? "" : " (has tds)");
		td_free(ahcd, ed->dummy);
		break;
	}

	ep->hcpriv = NULL;

	spin_unlock_irqrestore(&ahcd->lock, flags);
	return;
}