Beispiel #1
0
/*
 * theory of operation:
 *
 * priv->tx_echo holds the number of the oldest can_frame put for
 * transmission into the hardware, but not yet ACKed by the CAN tx
 * complete IRQ.
 *
 * We iterate from priv->tx_echo to priv->tx_next and check if the
 * packet has been transmitted, echo it back to the CAN framework.
 * If we discover a not yet transmitted package, stop looking for more.
 */
static void c_can_do_tx(struct net_device *dev)
{
	u32 val;
	u32 msg_obj_no;
	struct c_can_priv *priv = netdev_priv(dev);
	struct net_device_stats *stats = &dev->stats;

	for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
		msg_obj_no = get_tx_echo_msg_obj(priv);
		val = c_can_read_reg32(priv, &priv->regs->txrqst1);
		if (!(val & (1 << msg_obj_no))) {
			can_get_echo_skb(dev,
					msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
			stats->tx_bytes += priv->read_reg(priv,
					&priv->regs->ifregs[0].msg_cntrl)
					& IF_MCONT_DLC_MASK;
			stats->tx_packets++;
			c_can_inval_msg_object(dev, 0, msg_obj_no);
		}
	}

	/* restart queue if wrap-up or if queue stalled on last pkt */
	if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
			((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
		netif_wake_queue(dev);
}
Beispiel #2
0
/*
 * theory of operation:
 *
 * priv->tx_echo holds the number of the oldest can_frame put for
 * transmission into the hardware, but not yet ACKed by the CAN tx
 * complete IRQ.
 *
 * We iterate from priv->tx_echo to priv->tx_next and check if the
 * packet has been transmitted, echo it back to the CAN framework.
 * If we discover a not yet transmitted packet, stop looking for more.
 */
static void c_can_do_tx(struct net_device *dev)
{
	u32 val;
	u32 msg_obj_no;
	struct c_can_priv *priv = netdev_priv(dev);
	struct net_device_stats *stats = &dev->stats;

	for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
		msg_obj_no = get_tx_echo_msg_obj(priv);
		val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
		if (!(val & (1 << (msg_obj_no - 1)))) {
			can_get_echo_skb(dev,
					msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
			stats->tx_bytes += priv->read_reg(priv,
					C_CAN_IFACE(MSGCTRL_REG, 0))
					& IF_MCONT_DLC_MASK;
			stats->tx_packets++;
			can_led_event(dev, CAN_LED_EVENT_TX);
			c_can_inval_msg_object(dev, 0, msg_obj_no);
		} else {
			break;
		}
	}

	/* restart queue if wrap-up or if queue stalled on last pkt */
	if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
			((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
		netif_wake_queue(dev);
}
Beispiel #3
0
/*
 * callback for bulk IN urb
 */
static void ems_usb_write_bulk_callback(struct urb *urb)
{
	struct ems_tx_urb_context *context = urb->context;
	struct ems_usb *dev;
	struct net_device *netdev;

	BUG_ON(!context);

	dev = context->dev;
	netdev = dev->netdev;

	/* free up our allocated buffer */
	usb_free_coherent(urb->dev, urb->transfer_buffer_length,
			  urb->transfer_buffer, urb->transfer_dma);

	atomic_dec(&dev->active_tx_urbs);

	if (!netif_device_present(netdev))
		return;

	if (urb->status)
		netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);

	netif_trans_update(netdev);

	/* transmission complete interrupt */
	netdev->stats.tx_packets++;
	netdev->stats.tx_bytes += context->dlc;

	can_get_echo_skb(netdev, context->echo_index);

	/* Release context */
	context->echo_index = MAX_TX_URBS;

}
Beispiel #4
0
static void c_can_do_tx(struct net_device *dev)
{
	struct c_can_priv *priv = netdev_priv(dev);
	struct net_device_stats *stats = &dev->stats;
	u32 idx, obj, pkts = 0, bytes = 0, pend, clr;

	clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG);

	while ((idx = ffs(pend))) {
		idx--;
		pend &= ~(1 << idx);
		obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
		c_can_inval_tx_object(dev, IF_RX, obj);
		can_get_echo_skb(dev, idx);
		bytes += priv->dlc[idx];
		pkts++;
	}

	/* Clear the bits in the tx_active mask */
	atomic_sub(clr, &priv->tx_active);

	if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1)))
		netif_wake_queue(dev);

	if (pkts) {
		stats->tx_bytes += bytes;
		stats->tx_packets += pkts;
		can_led_event(dev, CAN_LED_EVENT_TX);
	}
}
Beispiel #5
0
/* Let priv->eskbp catch up to regs->txrd and echo back the skbs if echo
 * is true and free them otherwise.
 *
 * If budget is >= 0, stop after handling at most budget skbs. Otherwise,
 * continue until priv->eskbp catches up to regs->txrd.
 *
 * priv->lock *must* be held when calling this function
 */
static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo)
{
	struct grcan_priv *priv = netdev_priv(dev);
	struct grcan_registers __iomem *regs = priv->regs;
	struct grcan_dma *dma = &priv->dma;
	struct net_device_stats *stats = &dev->stats;
	int i, work_done;

	/* Updates to priv->eskbp and wake-ups of the queue needs to
	 * be atomic towards the reads of priv->eskbp and shut-downs
	 * of the queue in grcan_start_xmit.
	 */
	u32 txrd = grcan_read_reg(&regs->txrd);

	for (work_done = 0; work_done < budget || budget < 0; work_done++) {
		if (priv->eskbp == txrd)
			break;
		i = priv->eskbp / GRCAN_MSG_SIZE;
		if (echo) {
			/* Normal echo of messages */
			stats->tx_packets++;
			stats->tx_bytes += priv->txdlc[i];
			priv->txdlc[i] = 0;
			can_get_echo_skb(dev, i);
		} else {
			/* For cleanup of untransmitted messages */
			can_free_echo_skb(dev, i);
		}

		priv->eskbp = grcan_ring_add(priv->eskbp, GRCAN_MSG_SIZE,
					     dma->tx.size);
		txrd = grcan_read_reg(&regs->txrd);
	}
	return work_done;
}
Beispiel #6
0
irqreturn_t omapl_tx_can_intr(int irq, void *dev_id)
{
	struct net_device *ndev = dev_id;
	struct omapl_pru_can_priv *priv = netdev_priv(ndev);
	struct net_device_stats *stats = &ndev->stats;
	u32 bit_set, mbxno;

	pru_can_get_interrupt_status(&priv->can_tx_hndl);
	if ((PRU_CAN_ISR_BIT_CCI & priv->can_tx_hndl.u32interruptstatus)
	    || (PRU_CAN_ISR_BIT_SRDI & priv->can_tx_hndl.u32interruptstatus)) {
		__can_debug("tx_int_status = 0x%X\n",
			    priv->can_tx_hndl.u32interruptstatus);
		can_free_echo_skb(ndev, 0);
	} else {
		for (bit_set = 0; ((priv->can_tx_hndl.u32interruptstatus
				    & 0xFF) >> bit_set != 0); bit_set++) ;
		if (0 == bit_set) {
			__can_err("%s: invalid mailbox number\n", __func__);
			can_free_echo_skb(ndev, 0);
		} else {
			mbxno = bit_set - 1;	/* mail box numbering starts from 0 */
			if (PRU_CAN_ISR_BIT_ESI & priv->can_tx_hndl.
			    u32interruptstatus) {
				/* read gsr and ack pru */
				pru_can_get_global_status(&priv->can_tx_hndl);
				omapl_pru_can_err(ndev,
						  priv->can_tx_hndl.
						  u32interruptstatus,
						  priv->can_tx_hndl.
						  u32globalstatus);
			} else {
				stats->tx_packets++;
				/* stats->tx_bytes += dlc; */
				//can_//get_echo_skb(ndev, 0);
			}
		}
	}
	if (netif_queue_stopped(ndev))
		netif_wake_queue(ndev);

	can_get_echo_skb(ndev, 0);
	pru_can_transfer_mode_set(true, ecanreceive);
	return IRQ_HANDLED;
}
Beispiel #7
0
/* Send close command to device */
static int usb_8dev_cmd_close(struct usb_8dev_priv *priv)
{
	struct usb_8dev_cmd_msg inmsg;
	struct usb_8dev_cmd_msg outmsg = {
		.channel = 0,
		.command = USB_8DEV_CLOSE,
		.opt1 = 0,
		.opt2 = 0
	};

	return usb_8dev_send_cmd(priv, &outmsg, &inmsg);
}

/* Get firmware and hardware version */
static int usb_8dev_cmd_version(struct usb_8dev_priv *priv, u32 *res)
{
	struct usb_8dev_cmd_msg	inmsg;
	struct usb_8dev_cmd_msg	outmsg = {
		.channel = 0,
		.command = USB_8DEV_GET_SOFTW_HARDW_VER,
		.opt1 = 0,
		.opt2 = 0
	};

	int err = usb_8dev_send_cmd(priv, &outmsg, &inmsg);
	if (err)
		return err;

	*res = be32_to_cpup((__be32 *)inmsg.data);

	return err;
}

/* Set network device mode
 *
 * Maybe we should leave this function empty, because the device
 * set mode variable with open command.
 */
static int usb_8dev_set_mode(struct net_device *netdev, enum can_mode mode)
{
	struct usb_8dev_priv *priv = netdev_priv(netdev);
	int err = 0;

	switch (mode) {
	case CAN_MODE_START:
		err = usb_8dev_cmd_open(priv);
		if (err)
			netdev_warn(netdev, "couldn't start device");
		break;

	default:
		return -EOPNOTSUPP;
	}

	return err;
}

/* Read error/status frames */
static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
				struct usb_8dev_rx_msg *msg)
{
	struct can_frame *cf;
	struct sk_buff *skb;
	struct net_device_stats *stats = &priv->netdev->stats;

	/* Error message:
	 * byte 0: Status
	 * byte 1: bit   7: Receive Passive
	 * byte 1: bit 0-6: Receive Error Counter
	 * byte 2: Transmit Error Counter
	 * byte 3: Always 0 (maybe reserved for future use)
	 */

	u8 state = msg->data[0];
	u8 rxerr = msg->data[1] & USB_8DEV_RP_MASK;
	u8 txerr = msg->data[2];
	int rx_errors = 0;
	int tx_errors = 0;

	skb = alloc_can_err_skb(priv->netdev, &cf);
	if (!skb)
		return;

	switch (state) {
	case USB_8DEV_STATUSMSG_OK:
		priv->can.state = CAN_STATE_ERROR_ACTIVE;
		cf->can_id |= CAN_ERR_PROT;
		cf->data[2] = CAN_ERR_PROT_ACTIVE;
		break;
	case USB_8DEV_STATUSMSG_BUSOFF:
		priv->can.state = CAN_STATE_BUS_OFF;
		cf->can_id |= CAN_ERR_BUSOFF;
		can_bus_off(priv->netdev);
		break;
	case USB_8DEV_STATUSMSG_OVERRUN:
	case USB_8DEV_STATUSMSG_BUSLIGHT:
	case USB_8DEV_STATUSMSG_BUSHEAVY:
		cf->can_id |= CAN_ERR_CRTL;
		break;
	default:
		priv->can.state = CAN_STATE_ERROR_WARNING;
		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
		priv->can.can_stats.bus_error++;
		break;
	}

	switch (state) {
	case USB_8DEV_STATUSMSG_OK:
	case USB_8DEV_STATUSMSG_BUSOFF:
		break;
	case USB_8DEV_STATUSMSG_ACK:
		cf->can_id |= CAN_ERR_ACK;
		tx_errors = 1;
		break;
	case USB_8DEV_STATUSMSG_CRC:
		cf->data[2] |= CAN_ERR_PROT_UNSPEC;
		cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
			       CAN_ERR_PROT_LOC_CRC_DEL;
		rx_errors = 1;
		break;
	case USB_8DEV_STATUSMSG_BIT0:
		cf->data[2] |= CAN_ERR_PROT_BIT0;
		tx_errors = 1;
		break;
	case USB_8DEV_STATUSMSG_BIT1:
		cf->data[2] |= CAN_ERR_PROT_BIT1;
		tx_errors = 1;
		break;
	case USB_8DEV_STATUSMSG_FORM:
		cf->data[2] |= CAN_ERR_PROT_FORM;
		rx_errors = 1;
		break;
	case USB_8DEV_STATUSMSG_STUFF:
		cf->data[2] |= CAN_ERR_PROT_STUFF;
		rx_errors = 1;
		break;
	case USB_8DEV_STATUSMSG_OVERRUN:
		cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
		stats->rx_over_errors++;
		rx_errors = 1;
		break;
	case USB_8DEV_STATUSMSG_BUSLIGHT:
		priv->can.state = CAN_STATE_ERROR_WARNING;
		cf->data[1] = (txerr > rxerr) ?
			CAN_ERR_CRTL_TX_WARNING :
			CAN_ERR_CRTL_RX_WARNING;
		priv->can.can_stats.error_warning++;
		break;
	case USB_8DEV_STATUSMSG_BUSHEAVY:
		priv->can.state = CAN_STATE_ERROR_PASSIVE;
		cf->data[1] = (txerr > rxerr) ?
			CAN_ERR_CRTL_TX_PASSIVE :
			CAN_ERR_CRTL_RX_PASSIVE;
		priv->can.can_stats.error_passive++;
		break;
	default:
		netdev_warn(priv->netdev,
			    "Unknown status/error message (%d)\n", state);
		break;
	}

	if (tx_errors) {
		cf->data[2] |= CAN_ERR_PROT_TX;
		stats->tx_errors++;
	}

	if (rx_errors)
		stats->rx_errors++;

	cf->data[6] = txerr;
	cf->data[7] = rxerr;

	priv->bec.txerr = txerr;
	priv->bec.rxerr = rxerr;

	netif_rx(skb);

	stats->rx_packets++;
	stats->rx_bytes += cf->can_dlc;
}

/* Read data and status frames */
static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
				struct usb_8dev_rx_msg *msg)
{
	struct can_frame *cf;
	struct sk_buff *skb;
	struct net_device_stats *stats = &priv->netdev->stats;

	if (msg->type == USB_8DEV_TYPE_ERROR_FRAME &&
		   msg->flags == USB_8DEV_ERR_FLAG) {
		usb_8dev_rx_err_msg(priv, msg);
	} else if (msg->type == USB_8DEV_TYPE_CAN_FRAME) {
		skb = alloc_can_skb(priv->netdev, &cf);
		if (!skb)
			return;

		cf->can_id = be32_to_cpu(msg->id);
		cf->can_dlc = get_can_dlc(msg->dlc & 0xF);

		if (msg->flags & USB_8DEV_EXTID)
			cf->can_id |= CAN_EFF_FLAG;

		if (msg->flags & USB_8DEV_RTR)
			cf->can_id |= CAN_RTR_FLAG;
		else
			memcpy(cf->data, msg->data, cf->can_dlc);

		netif_rx(skb);

		stats->rx_packets++;
		stats->rx_bytes += cf->can_dlc;

		can_led_event(priv->netdev, CAN_LED_EVENT_RX);
	} else {
		netdev_warn(priv->netdev, "frame type %d unknown",
			 msg->type);
	}

}

/* Callback for reading data from device
 *
 * Check urb status, call read function and resubmit urb read operation.
 */
static void usb_8dev_read_bulk_callback(struct urb *urb)
{
	struct usb_8dev_priv *priv = urb->context;
	struct net_device *netdev;
	int retval;
	int pos = 0;

	netdev = priv->netdev;

	if (!netif_device_present(netdev))
		return;

	switch (urb->status) {
	case 0: /* success */
		break;

	case -ENOENT:
	case -ESHUTDOWN:
		return;

	default:
		netdev_info(netdev, "Rx URB aborted (%d)\n",
			 urb->status);
		goto resubmit_urb;
	}

	while (pos < urb->actual_length) {
		struct usb_8dev_rx_msg *msg;

		if (pos + sizeof(struct usb_8dev_rx_msg) > urb->actual_length) {
			netdev_err(priv->netdev, "format error\n");
			break;
		}

		msg = (struct usb_8dev_rx_msg *)(urb->transfer_buffer + pos);
		usb_8dev_rx_can_msg(priv, msg);

		pos += sizeof(struct usb_8dev_rx_msg);
	}

resubmit_urb:
	usb_fill_bulk_urb(urb, priv->udev,
			  usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_RX),
			  urb->transfer_buffer, RX_BUFFER_SIZE,
			  usb_8dev_read_bulk_callback, priv);

	retval = usb_submit_urb(urb, GFP_ATOMIC);

	if (retval == -ENODEV)
		netif_device_detach(netdev);
	else if (retval)
		netdev_err(netdev,
			"failed resubmitting read bulk urb: %d\n", retval);
}

/* Callback handler for write operations
 *
 * Free allocated buffers, check transmit status and
 * calculate statistic.
 */
static void usb_8dev_write_bulk_callback(struct urb *urb)
{
	struct usb_8dev_tx_urb_context *context = urb->context;
	struct usb_8dev_priv *priv;
	struct net_device *netdev;

	BUG_ON(!context);

	priv = context->priv;
	netdev = priv->netdev;

	/* free up our allocated buffer */
	usb_free_coherent(urb->dev, urb->transfer_buffer_length,
			  urb->transfer_buffer, urb->transfer_dma);

	atomic_dec(&priv->active_tx_urbs);

	if (!netif_device_present(netdev))
		return;

	if (urb->status)
		netdev_info(netdev, "Tx URB aborted (%d)\n",
			 urb->status);

	netdev->stats.tx_packets++;
	netdev->stats.tx_bytes += context->dlc;

	can_get_echo_skb(netdev, context->echo_index);

	can_led_event(netdev, CAN_LED_EVENT_TX);

	/* Release context */
	context->echo_index = MAX_TX_URBS;

	netif_wake_queue(netdev);
}

/* Send data to device */
static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
				      struct net_device *netdev)
{
	struct usb_8dev_priv *priv = netdev_priv(netdev);
	struct net_device_stats *stats = &netdev->stats;
	struct can_frame *cf = (struct can_frame *) skb->data;
	struct usb_8dev_tx_msg *msg;
	struct urb *urb;
	struct usb_8dev_tx_urb_context *context = NULL;
	u8 *buf;
	int i, err;
	size_t size = sizeof(struct usb_8dev_tx_msg);

	if (can_dropped_invalid_skb(netdev, skb))
		return NETDEV_TX_OK;

	/* create a URB, and a buffer for it, and copy the data to the URB */
	urb = usb_alloc_urb(0, GFP_ATOMIC);
	if (!urb) {
		netdev_err(netdev, "No memory left for URBs\n");
		goto nomem;
	}

	buf = usb_alloc_coherent(priv->udev, size, GFP_ATOMIC,
				 &urb->transfer_dma);
	if (!buf) {
		netdev_err(netdev, "No memory left for USB buffer\n");
		goto nomembuf;
	}

	memset(buf, 0, size);

	msg = (struct usb_8dev_tx_msg *)buf;
	msg->begin = USB_8DEV_DATA_START;
	msg->flags = 0x00;

	if (cf->can_id & CAN_RTR_FLAG)
		msg->flags |= USB_8DEV_RTR;

	if (cf->can_id & CAN_EFF_FLAG)
		msg->flags |= USB_8DEV_EXTID;

	msg->id = cpu_to_be32(cf->can_id & CAN_ERR_MASK);
	msg->dlc = cf->can_dlc;
	memcpy(msg->data, cf->data, cf->can_dlc);
	msg->end = USB_8DEV_DATA_END;

	for (i = 0; i < MAX_TX_URBS; i++) {
		if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
			context = &priv->tx_contexts[i];
			break;
		}
	}

	/* May never happen! When this happens we'd more URBs in flight as
	 * allowed (MAX_TX_URBS).
	 */
	if (!context)
		goto nofreecontext;

	context->priv = priv;
	context->echo_index = i;
	context->dlc = cf->can_dlc;

	usb_fill_bulk_urb(urb, priv->udev,
			  usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_TX),
			  buf, size, usb_8dev_write_bulk_callback, context);
	urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
	usb_anchor_urb(urb, &priv->tx_submitted);

	can_put_echo_skb(skb, netdev, context->echo_index);

	atomic_inc(&priv->active_tx_urbs);

	err = usb_submit_urb(urb, GFP_ATOMIC);
	if (unlikely(err))
		goto failed;
	else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
		/* Slow down tx path */
		netif_stop_queue(netdev);

	/* Release our reference to this URB, the USB core will eventually free
	 * it entirely.
	 */
	usb_free_urb(urb);

	return NETDEV_TX_OK;

nofreecontext:
	usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
	usb_free_urb(urb);

	netdev_warn(netdev, "couldn't find free context");

	return NETDEV_TX_BUSY;

failed:
	can_free_echo_skb(netdev, context->echo_index);

	usb_unanchor_urb(urb);
	usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);

	atomic_dec(&priv->active_tx_urbs);

	if (err == -ENODEV)
		netif_device_detach(netdev);
	else
		netdev_warn(netdev, "failed tx_urb %d\n", err);

nomembuf:
	usb_free_urb(urb);

nomem:
	dev_kfree_skb(skb);
	stats->tx_dropped++;

	return NETDEV_TX_OK;
}

static int usb_8dev_get_berr_counter(const struct net_device *netdev,
				     struct can_berr_counter *bec)
{
	struct usb_8dev_priv *priv = netdev_priv(netdev);

	bec->txerr = priv->bec.txerr;
	bec->rxerr = priv->bec.rxerr;

	return 0;
}

/* Start USB device */
static int usb_8dev_start(struct usb_8dev_priv *priv)
{
	struct net_device *netdev = priv->netdev;
	int err, i;

	for (i = 0; i < MAX_RX_URBS; i++) {
		struct urb *urb = NULL;
		u8 *buf;

		/* create a URB, and a buffer for it */
		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!urb) {
			netdev_err(netdev, "No memory left for URBs\n");
			err = -ENOMEM;
			break;
		}

		buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL,
					 &urb->transfer_dma);
		if (!buf) {
			netdev_err(netdev, "No memory left for USB buffer\n");
			usb_free_urb(urb);
			err = -ENOMEM;
			break;
		}

		usb_fill_bulk_urb(urb, priv->udev,
				  usb_rcvbulkpipe(priv->udev,
						  USB_8DEV_ENDP_DATA_RX),
				  buf, RX_BUFFER_SIZE,
				  usb_8dev_read_bulk_callback, priv);
		urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
		usb_anchor_urb(urb, &priv->rx_submitted);

		err = usb_submit_urb(urb, GFP_KERNEL);
		if (err) {
			usb_unanchor_urb(urb);
			usb_free_coherent(priv->udev, RX_BUFFER_SIZE, buf,
					  urb->transfer_dma);
			usb_free_urb(urb);
			break;
		}

		/* Drop reference, USB core will take care of freeing it */
		usb_free_urb(urb);
	}

	/* Did we submit any URBs */
	if (i == 0) {
		netdev_warn(netdev, "couldn't setup read URBs\n");
		return err;
	}

	/* Warn if we've couldn't transmit all the URBs */
	if (i < MAX_RX_URBS)
		netdev_warn(netdev, "rx performance may be slow\n");

	err = usb_8dev_cmd_open(priv);
	if (err)
		goto failed;

	priv->can.state = CAN_STATE_ERROR_ACTIVE;

	return 0;

failed:
	if (err == -ENODEV)
		netif_device_detach(priv->netdev);

	netdev_warn(netdev, "couldn't submit control: %d\n", err);

	return err;
}

/* Open USB device */
static int usb_8dev_open(struct net_device *netdev)
{
	struct usb_8dev_priv *priv = netdev_priv(netdev);
	int err;

	/* common open */
	err = open_candev(netdev);
	if (err)
		return err;

	can_led_event(netdev, CAN_LED_EVENT_OPEN);

	/* finally start device */
	err = usb_8dev_start(priv);
	if (err) {
		if (err == -ENODEV)
			netif_device_detach(priv->netdev);

		netdev_warn(netdev, "couldn't start device: %d\n",
			 err);

		close_candev(netdev);

		return err;
	}

	netif_start_queue(netdev);

	return 0;
}

static void unlink_all_urbs(struct usb_8dev_priv *priv)
{
	int i;

	usb_kill_anchored_urbs(&priv->rx_submitted);

	usb_kill_anchored_urbs(&priv->tx_submitted);
	atomic_set(&priv->active_tx_urbs, 0);

	for (i = 0; i < MAX_TX_URBS; i++)
		priv->tx_contexts[i].echo_index = MAX_TX_URBS;
}

/* Close USB device */
static int usb_8dev_close(struct net_device *netdev)
{
	struct usb_8dev_priv *priv = netdev_priv(netdev);
	int err = 0;

	/* Send CLOSE command to CAN controller */
	err = usb_8dev_cmd_close(priv);
	if (err)
		netdev_warn(netdev, "couldn't stop device");

	priv->can.state = CAN_STATE_STOPPED;

	netif_stop_queue(netdev);

	/* Stop polling */
	unlink_all_urbs(priv);

	close_candev(netdev);

	can_led_event(netdev, CAN_LED_EVENT_STOP);

	return err;
}

static const struct net_device_ops usb_8dev_netdev_ops = {
	.ndo_open = usb_8dev_open,
	.ndo_stop = usb_8dev_close,
	.ndo_start_xmit = usb_8dev_start_xmit,
	.ndo_change_mtu = can_change_mtu,
};

static const struct can_bittiming_const usb_8dev_bittiming_const = {
	.name = "usb_8dev",
	.tseg1_min = 1,
	.tseg1_max = 16,
	.tseg2_min = 1,
	.tseg2_max = 8,
	.sjw_max = 4,
	.brp_min = 1,
	.brp_max = 1024,
	.brp_inc = 1,
};

/* Probe USB device
 *
 * Check device and firmware.
 * Set supported modes and bittiming constants.
 * Allocate some memory.
 */
static int usb_8dev_probe(struct usb_interface *intf,
			 const struct usb_device_id *id)
{
	struct net_device *netdev;
	struct usb_8dev_priv *priv;
	int i, err = -ENOMEM;
	u32 version;
	char buf[18];
	struct usb_device *usbdev = interface_to_usbdev(intf);

	/* product id looks strange, better we also check iProduct string */
	if (usb_string(usbdev, usbdev->descriptor.iProduct, buf,
		       sizeof(buf)) > 0 && strcmp(buf, "USB2CAN converter")) {
		dev_info(&usbdev->dev, "ignoring: not an USB2CAN converter\n");
		return -ENODEV;
	}

	netdev = alloc_candev(sizeof(struct usb_8dev_priv), MAX_TX_URBS);
	if (!netdev) {
		dev_err(&intf->dev, "Couldn't alloc candev\n");
		return -ENOMEM;
	}

	priv = netdev_priv(netdev);

	priv->udev = usbdev;
	priv->netdev = netdev;

	priv->can.state = CAN_STATE_STOPPED;
	priv->can.clock.freq = USB_8DEV_ABP_CLOCK;
	priv->can.bittiming_const = &usb_8dev_bittiming_const;
	priv->can.do_set_mode = usb_8dev_set_mode;
	priv->can.do_get_berr_counter = usb_8dev_get_berr_counter;
	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
				      CAN_CTRLMODE_LISTENONLY |
				      CAN_CTRLMODE_ONE_SHOT;

	netdev->netdev_ops = &usb_8dev_netdev_ops;

	netdev->flags |= IFF_ECHO; /* we support local echo */

	init_usb_anchor(&priv->rx_submitted);

	init_usb_anchor(&priv->tx_submitted);
	atomic_set(&priv->active_tx_urbs, 0);

	for (i = 0; i < MAX_TX_URBS; i++)
		priv->tx_contexts[i].echo_index = MAX_TX_URBS;

	priv->cmd_msg_buffer = kzalloc(sizeof(struct usb_8dev_cmd_msg),
				      GFP_KERNEL);
	if (!priv->cmd_msg_buffer)
		goto cleanup_candev;

	usb_set_intfdata(intf, priv);

	SET_NETDEV_DEV(netdev, &intf->dev);

	mutex_init(&priv->usb_8dev_cmd_lock);

	err = register_candev(netdev);
	if (err) {
		netdev_err(netdev,
			"couldn't register CAN device: %d\n", err);
		goto cleanup_cmd_msg_buffer;
	}

	err = usb_8dev_cmd_version(priv, &version);
	if (err) {
		netdev_err(netdev, "can't get firmware version\n");
		goto cleanup_unregister_candev;
	} else {
		netdev_info(netdev,
			 "firmware: %d.%d, hardware: %d.%d\n",
			 (version>>24) & 0xff, (version>>16) & 0xff,
			 (version>>8) & 0xff, version & 0xff);
	}

	devm_can_led_init(netdev);

	return 0;

cleanup_unregister_candev:
	unregister_netdev(priv->netdev);

cleanup_cmd_msg_buffer:
	kfree(priv->cmd_msg_buffer);

cleanup_candev:
	free_candev(netdev);

	return err;

}

/* Called by the usb core when driver is unloaded or device is removed */
static void usb_8dev_disconnect(struct usb_interface *intf)
{
	struct usb_8dev_priv *priv = usb_get_intfdata(intf);

	usb_set_intfdata(intf, NULL);

	if (priv) {
		netdev_info(priv->netdev, "device disconnected\n");

		unregister_netdev(priv->netdev);
		free_candev(priv->netdev);

		unlink_all_urbs(priv);
	}

}

static struct usb_driver usb_8dev_driver = {
	.name =		"usb_8dev",
	.probe =	usb_8dev_probe,
	.disconnect =	usb_8dev_disconnect,
	.id_table =	usb_8dev_table,
};

module_usb_driver(usb_8dev_driver);

MODULE_AUTHOR("Bernd Krumboeck <*****@*****.**>");
MODULE_DESCRIPTION("CAN driver for 8 devices USB2CAN interfaces");
MODULE_LICENSE("GPL v2");
Beispiel #8
0
static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
{
	struct net_device *ndev = (struct net_device *)dev_id;
	struct ti_hecc_priv *priv = netdev_priv(ndev);
	struct net_device_stats *stats = &ndev->stats;
	u32 mbxno, mbx_mask, int_status, err_status;
	unsigned long ack, flags;

	int_status = hecc_read(priv,
		(priv->int_line) ? HECC_CANGIF1 : HECC_CANGIF0);

	if (!int_status)
		return IRQ_NONE;

	err_status = hecc_read(priv, HECC_CANES);
	if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO |
		HECC_CANES_EP | HECC_CANES_EW))
			ti_hecc_error(ndev, int_status, err_status);

	if (int_status & HECC_CANGIF_GMIF) {
		while (priv->tx_tail - priv->tx_head > 0) {
			mbxno = get_tx_tail_mb(priv);
			mbx_mask = BIT(mbxno);
			if (!(mbx_mask & hecc_read(priv, HECC_CANTA)))
				break;
			hecc_clear_bit(priv, HECC_CANMIM, mbx_mask);
			hecc_write(priv, HECC_CANTA, mbx_mask);
			spin_lock_irqsave(&priv->mbx_lock, flags);
			hecc_clear_bit(priv, HECC_CANME, mbx_mask);
			spin_unlock_irqrestore(&priv->mbx_lock, flags);
			stats->tx_bytes += hecc_read_mbx(priv, mbxno,
						HECC_CANMCF) & 0xF;
			stats->tx_packets++;
			can_get_echo_skb(ndev, mbxno);
			--priv->tx_tail;
		}

		/* restart queue if wrap-up or if queue stalled on last pkt */
		if (((priv->tx_head == priv->tx_tail) &&
		((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) ||
		(((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) &&
		((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK)))
			netif_wake_queue(ndev);

		/* Disable RX mailbox interrupts and let NAPI reenable them */
		if (hecc_read(priv, HECC_CANRMP)) {
			ack = hecc_read(priv, HECC_CANMIM);
			ack &= BIT(HECC_MAX_TX_MBOX) - 1;
			hecc_write(priv, HECC_CANMIM, ack);
			napi_schedule(&priv->napi);
		}
	}

	/* clear all interrupt conditions - read back to avoid spurious ints */
	if (priv->int_line) {
		hecc_write(priv, HECC_CANGIF1, HECC_SET_REG);
		int_status = hecc_read(priv, HECC_CANGIF1);
	} else {
		hecc_write(priv, HECC_CANGIF0, HECC_SET_REG);
		int_status = hecc_read(priv, HECC_CANGIF0);
	}

	return IRQ_HANDLED;
}
Beispiel #9
0
static void gs_usb_receive_bulk_callback(struct urb *urb)
{
	struct gs_usb *usbcan = urb->context;
	struct gs_can *dev;
	struct net_device *netdev;
	int rc;
	struct net_device_stats *stats;
	struct gs_host_frame *hf = urb->transfer_buffer;
	struct gs_tx_context *txc;
	struct can_frame *cf;
	struct sk_buff *skb;

	BUG_ON(!usbcan);

	switch (urb->status) {
	case 0: /* success */
		break;
	case -ENOENT:
	case -ESHUTDOWN:
		return;
	default:
		/* do not resubmit aborted urbs. eg: when device goes down */
		return;
	}

	/* device reports out of range channel id */
	if (hf->channel >= GS_MAX_INTF)
		goto resubmit_urb;

	dev = usbcan->canch[hf->channel];

	netdev = dev->netdev;
	stats = &netdev->stats;

	if (!netif_device_present(netdev))
		return;

	if (hf->echo_id == -1) { /* normal rx */
		skb = alloc_can_skb(dev->netdev, &cf);
		if (!skb)
			return;

		cf->can_id = hf->can_id;

		cf->can_dlc = get_can_dlc(hf->can_dlc);
		memcpy(cf->data, hf->data, 8);

		/* ERROR frames tell us information about the controller */
		if (hf->can_id & CAN_ERR_FLAG)
			gs_update_state(dev, cf);

		netdev->stats.rx_packets++;
		netdev->stats.rx_bytes += hf->can_dlc;

		netif_rx(skb);
	} else { /* echo_id == hf->echo_id */
		if (hf->echo_id >= GS_MAX_TX_URBS) {
			netdev_err(netdev,
				   "Unexpected out of range echo id %d\n",
				   hf->echo_id);
			goto resubmit_urb;
		}

		netdev->stats.tx_packets++;
		netdev->stats.tx_bytes += hf->can_dlc;

		txc = gs_get_tx_context(dev, hf->echo_id);

		/* bad devices send bad echo_ids. */
		if (!txc) {
			netdev_err(netdev,
				   "Unexpected unused echo id %d\n",
				   hf->echo_id);
			goto resubmit_urb;
		}

		can_get_echo_skb(netdev, hf->echo_id);

		gs_free_tx_context(txc);

		netif_wake_queue(netdev);
	}

	if (hf->flags & GS_CAN_FLAG_OVERFLOW) {
		skb = alloc_can_err_skb(netdev, &cf);
		if (!skb)
			goto resubmit_urb;

		cf->can_id |= CAN_ERR_CRTL;
		cf->can_dlc = CAN_ERR_DLC;
		cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
		stats->rx_over_errors++;
		stats->rx_errors++;
		netif_rx(skb);
	}

 resubmit_urb:
	usb_fill_bulk_urb(urb,
			  usbcan->udev,
			  usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
			  hf,
			  sizeof(struct gs_host_frame),
			  gs_usb_receive_bulk_callback,
			  usbcan
			  );

	rc = usb_submit_urb(urb, GFP_ATOMIC);

	/* USB failure take down all interfaces */
	if (rc == -ENODEV) {
		for (rc = 0; rc < GS_MAX_INTF; rc++) {
			if (usbcan->canch[rc])
				netif_device_detach(usbcan->canch[rc]->netdev);
		}
	}
}