static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct can_frame *frame = (struct can_frame *)skb->data; struct c_can_priv *priv = netdev_priv(dev); u32 idx, obj; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; /* * This is not a FIFO. C/D_CAN sends out the buffers * prioritized. The lowest buffer number wins. */ idx = fls(atomic_read(&priv->tx_active)); obj = idx + C_CAN_MSG_OBJ_TX_FIRST; /* If this is the last buffer, stop the xmit queue */ if (idx == C_CAN_MSG_OBJ_TX_NUM - 1) netif_stop_queue(dev); /* * Store the message in the interface so we can call * can_put_echo_skb(). We must do this before we enable * transmit as we might race against do_tx(). */ c_can_setup_tx_object(dev, IF_TX, frame, idx); priv->dlc[idx] = frame->can_dlc; can_put_echo_skb(skb, dev, idx); /* Update the active bits */ atomic_add((1 << idx), &priv->tx_active); /* Start transmission */ c_can_object_put(dev, IF_TX, obj, IF_COMM_TX); return NETDEV_TX_OK; }
static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, struct net_device *dev) { u32 msg_obj_no; struct c_can_priv *priv = netdev_priv(dev); struct can_frame *frame = (struct can_frame *)skb->data; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; msg_obj_no = get_tx_next_msg_obj(priv); /* prepare message object for transmission */ c_can_write_msg_object(dev, 0, frame, msg_obj_no); can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); /* * we have to stop the queue in case of a wrap around or * if the next TX message object is still in use */ priv->tx_next++; if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) || (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0) netif_stop_queue(dev); return NETDEV_TX_OK; }
/* * ti_hecc_xmit: HECC Transmit * * The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the * priority of the mailbox for tranmission is dependent upon priority setting * field in mailbox registers. The mailbox with highest value in priority field * is transmitted first. Only when two mailboxes have the same value in * priority field the highest numbered mailbox is transmitted first. * * To utilize the HECC priority feature as described above we start with the * highest numbered mailbox with highest priority level and move on to the next * mailbox with the same priority level and so on. Once we loop through all the * transmit mailboxes we choose the next priority level (lower) and so on * until we reach the lowest priority level on the lowest numbered mailbox * when we stop transmission until all mailboxes are transmitted and then * restart at highest numbered mailbox with highest priority. * * Two counters (head and tail) are used to track the next mailbox to transmit * and to track the echo buffer for already transmitted mailbox. The queue * is stopped when all the mailboxes are busy or when there is a priority * value roll-over happens. */ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ti_hecc_priv *priv = netdev_priv(ndev); struct can_frame *cf = (struct can_frame *)skb->data; u32 mbxno, mbx_mask, data; unsigned long flags; if (can_dropped_invalid_skb(ndev, skb)) return NETDEV_TX_OK; mbxno = get_tx_head_mb(priv); mbx_mask = BIT(mbxno); spin_lock_irqsave(&priv->mbx_lock, flags); if (unlikely(hecc_read(priv, HECC_CANME) & mbx_mask)) { spin_unlock_irqrestore(&priv->mbx_lock, flags); netif_stop_queue(ndev); netdev_err(priv->ndev, "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n", priv->tx_head, priv->tx_tail); return NETDEV_TX_BUSY; } spin_unlock_irqrestore(&priv->mbx_lock, flags); /* Prepare mailbox for transmission */ data = cf->can_dlc | (get_tx_head_prio(priv) << 8); if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */ data |= HECC_CANMCF_RTR; hecc_write_mbx(priv, mbxno, HECC_CANMCF, data); if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */ data = (cf->can_id & CAN_EFF_MASK) | HECC_CANMID_IDE; else /* Standard frame format */ data = (cf->can_id & CAN_SFF_MASK) << 18; hecc_write_mbx(priv, mbxno, HECC_CANMID, data); hecc_write_mbx(priv, mbxno, HECC_CANMDL, be32_to_cpu(*(u32 *)(cf->data))); if (cf->can_dlc > 4) hecc_write_mbx(priv, mbxno, HECC_CANMDH, be32_to_cpu(*(u32 *)(cf->data + 4))); else *(u32 *)(cf->data + 4) = 0; can_put_echo_skb(skb, ndev, mbxno); spin_lock_irqsave(&priv->mbx_lock, flags); --priv->tx_head; if ((hecc_read(priv, HECC_CANME) & BIT(get_tx_head_mb(priv))) || (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) { netif_stop_queue(ndev); } hecc_set_bit(priv, HECC_CANME, mbx_mask); spin_unlock_irqrestore(&priv->mbx_lock, flags); hecc_clear_bit(priv, HECC_CANMD, mbx_mask); hecc_set_bit(priv, HECC_CANMIM, mbx_mask); hecc_write(priv, HECC_CANTRS, mbx_mask); return NETDEV_TX_OK; }
static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct can_frame *frame = (struct can_frame *)skb->data; struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; int i, rtr, buf_id; u32 can_id; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; out_8(®s->cantier, 0); i = ~priv->tx_active & MSCAN_TXE; buf_id = ffs(i) - 1; switch (hweight8(i)) { case 0: netif_stop_queue(dev); netdev_err(dev, "Tx Ring full when queue awake!\n"); return NETDEV_TX_BUSY; case 1: /* * if buf_id < 3, then current frame will be send out of order, * since buffer with lower id have higher priority (hell..) */ netif_stop_queue(dev); case 2: if (buf_id < priv->prev_buf_id) { priv->cur_pri++; if (priv->cur_pri == 0xff) { set_bit(F_TX_WAIT_ALL, &priv->flags); netif_stop_queue(dev); } } set_bit(F_TX_PROGRESS, &priv->flags); break; } priv->prev_buf_id = buf_id; out_8(®s->cantbsel, i); rtr = frame->can_id & CAN_RTR_FLAG; /* RTR is always the lowest bit of interest, then IDs follow */ if (frame->can_id & CAN_EFF_FLAG) { can_id = (frame->can_id & CAN_EFF_MASK) << (MSCAN_EFF_RTR_SHIFT + 1); if (rtr) can_id |= 1 << MSCAN_EFF_RTR_SHIFT; out_be16(®s->tx.idr3_2, can_id); can_id >>= 16; /* EFF_FLAGS are between the IDs :( */ can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0) | MSCAN_EFF_FLAGS; } else {
static int vcan_tx(struct sk_buff *skb, struct net_device *dev) { struct can_frame *cf = (struct can_frame *)skb->data; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) struct net_device_stats *stats = &dev->stats; #else struct net_device_stats *stats = netdev_priv(dev); #endif int loop; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; stats->tx_packets++; stats->tx_bytes += cf->can_dlc; /* set flag whether this packet has to be looped back */ loop = skb->pkt_type == PACKET_LOOPBACK; if (!echo) { /* no echo handling available inside this driver */ if (loop) { /* * only count the packets here, because the * CAN core already did the echo for us */ stats->rx_packets++; stats->rx_bytes += cf->can_dlc; } kfree_skb(skb); return NETDEV_TX_OK; } /* perform standard echo handling for CAN network interfaces */ if (loop) { struct sock *srcsk = skb->sk; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return NETDEV_TX_OK; /* receive with packet counting */ skb->sk = srcsk; vcan_rx(skb, dev); } else { /* no looped packets => no counting */ kfree_skb(skb); } return NETDEV_TX_OK; }
static netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev) #endif { struct softing_priv *priv = netdev_priv(dev); struct softing *card = priv->card; int ret; int bhlock; u8 *ptr; u8 cmd; unsigned int fifo_wr; struct can_frame msg; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; if (in_interrupt()) { bhlock = 0; spin_lock(&card->spin); } else { bhlock = 1; spin_lock_bh(&card->spin); } ret = NETDEV_TX_BUSY; if (!card->fw.up) goto xmit_done; if (card->tx.pending >= TXMAX) goto xmit_done; if (priv->tx.pending >= TX_ECHO_SKB_MAX) goto xmit_done; fifo_wr = card->dpram.tx->wr; if (fifo_wr == card->dpram.tx->rd) /*fifo full */ goto xmit_done; memcpy(&msg, skb->data, sizeof(msg)); ptr = &card->dpram.tx->fifo[fifo_wr][0]; cmd = CMD_TX; if (msg.can_id & CAN_RTR_FLAG) cmd |= CMD_RTR; if (msg.can_id & CAN_EFF_FLAG) cmd |= CMD_XTD; if (priv->index) cmd |= CMD_BUS2; *ptr++ = cmd; *ptr++ = msg.can_dlc; *ptr++ = (msg.can_id >> 0); *ptr++ = (msg.can_id >> 8); if (msg.can_id & CAN_EFF_FLAG) { *ptr++ = (msg.can_id >> 16); *ptr++ = (msg.can_id >> 24); } else {
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) { struct canfd_frame *cfd = (struct canfd_frame *)skb->data; struct net_device_stats *stats = &dev->stats; int loop; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; stats->tx_packets++; stats->tx_bytes += cfd->len; /* set flag whether this packet has to be looped back */ loop = skb->pkt_type == PACKET_LOOPBACK; if (!echo) { /* no echo handling available inside this driver */ if (loop) { /* * only count the packets here, because the * CAN core already did the echo for us */ stats->rx_packets++; stats->rx_bytes += cfd->len; } consume_skb(skb); return NETDEV_TX_OK; } /* perform standard echo handling for CAN network interfaces */ if (loop) { skb = can_create_echo_skb(skb); if (!skb) return NETDEV_TX_OK; /* receive with packet counting */ vcan_rx(skb, dev); } else { /* no looped packets => no counting */ consume_skb(skb); } return NETDEV_TX_OK; }
/* trigger the tx queue-ing */ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct softing_priv *priv = netdev_priv(dev); struct softing *card = priv->card; int ret; uint8_t *ptr; uint8_t fifo_wr, fifo_rd; struct can_frame *cf = (struct can_frame *)skb->data; uint8_t buf[DPRAM_TX_SIZE]; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; spin_lock(&card->spin); ret = NETDEV_TX_BUSY; if (!card->fw.up || (card->tx.pending >= TXMAX) || (priv->tx.pending >= TX_ECHO_SKB_MAX)) goto xmit_done; fifo_wr = ioread8(&card->dpram[DPRAM_TX_WR]); fifo_rd = ioread8(&card->dpram[DPRAM_TX_RD]); if (fifo_wr == fifo_rd) /* fifo full */ goto xmit_done; memset(buf, 0, sizeof(buf)); ptr = buf; *ptr = CMD_TX; if (cf->can_id & CAN_RTR_FLAG) *ptr |= CMD_RTR; if (cf->can_id & CAN_EFF_FLAG) *ptr |= CMD_XTD; if (priv->index) *ptr |= CMD_BUS2; ++ptr; *ptr++ = cf->can_dlc; *ptr++ = (cf->can_id >> 0); *ptr++ = (cf->can_id >> 8); if (cf->can_id & CAN_EFF_FLAG) { *ptr++ = (cf->can_id >> 16); *ptr++ = (cf->can_id >> 24); } else {
static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct cc770_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct can_frame *cf = (struct can_frame *)skb->data; unsigned int mo = obj2msgobj(CC770_OBJ_TX); u8 dlc, rtr; u32 id; int i; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; if ((cc770_read_reg(priv, msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { netdev_err(dev, "TX register is still occupied!\n"); return NETDEV_TX_BUSY; } netif_stop_queue(dev); dlc = cf->can_dlc; id = cf->can_id; if (cf->can_id & CAN_RTR_FLAG) rtr = 0; else rtr = MSGCFG_DIR; cc770_write_reg(priv, msgobj[mo].ctrl1, RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES); cc770_write_reg(priv, msgobj[mo].ctrl0, MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES); if (id & CAN_EFF_FLAG) { id &= CAN_EFF_MASK; cc770_write_reg(priv, msgobj[mo].config, (dlc << 4) | rtr | MSGCFG_XTD); cc770_write_reg(priv, msgobj[mo].id[3], id << 3); cc770_write_reg(priv, msgobj[mo].id[2], id >> 5); cc770_write_reg(priv, msgobj[mo].id[1], id >> 13); cc770_write_reg(priv, msgobj[mo].id[0], id >> 21); } else {
static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev) { struct vxcan_priv *priv = netdev_priv(dev); struct net_device *peer; struct canfd_frame *cfd = (struct canfd_frame *)skb->data; struct net_device_stats *peerstats, *srcstats = &dev->stats; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; rcu_read_lock(); peer = rcu_dereference(priv->peer); if (unlikely(!peer)) { kfree_skb(skb); dev->stats.tx_dropped++; goto out_unlock; } skb = can_create_echo_skb(skb); if (!skb) goto out_unlock; /* reset CAN GW hop counter */ skb->csum_start = 0; skb->pkt_type = PACKET_BROADCAST; skb->dev = peer; skb->ip_summed = CHECKSUM_UNNECESSARY; if (netif_rx_ni(skb) == NET_RX_SUCCESS) { srcstats->tx_packets++; srcstats->tx_bytes += cfd->len; peerstats = &peer->stats; peerstats->rx_packets++; peerstats->rx_bytes += cfd->len; } out_unlock: rcu_read_unlock(); return NETDEV_TX_OK; }
/* Send close command to device */ static int usb_8dev_cmd_close(struct usb_8dev_priv *priv) { struct usb_8dev_cmd_msg inmsg; struct usb_8dev_cmd_msg outmsg = { .channel = 0, .command = USB_8DEV_CLOSE, .opt1 = 0, .opt2 = 0 }; return usb_8dev_send_cmd(priv, &outmsg, &inmsg); } /* Get firmware and hardware version */ static int usb_8dev_cmd_version(struct usb_8dev_priv *priv, u32 *res) { struct usb_8dev_cmd_msg inmsg; struct usb_8dev_cmd_msg outmsg = { .channel = 0, .command = USB_8DEV_GET_SOFTW_HARDW_VER, .opt1 = 0, .opt2 = 0 }; int err = usb_8dev_send_cmd(priv, &outmsg, &inmsg); if (err) return err; *res = be32_to_cpup((__be32 *)inmsg.data); return err; } /* Set network device mode * * Maybe we should leave this function empty, because the device * set mode variable with open command. */ static int usb_8dev_set_mode(struct net_device *netdev, enum can_mode mode) { struct usb_8dev_priv *priv = netdev_priv(netdev); int err = 0; switch (mode) { case CAN_MODE_START: err = usb_8dev_cmd_open(priv); if (err) netdev_warn(netdev, "couldn't start device"); break; default: return -EOPNOTSUPP; } return err; } /* Read error/status frames */ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv, struct usb_8dev_rx_msg *msg) { struct can_frame *cf; struct sk_buff *skb; struct net_device_stats *stats = &priv->netdev->stats; /* Error message: * byte 0: Status * byte 1: bit 7: Receive Passive * byte 1: bit 0-6: Receive Error Counter * byte 2: Transmit Error Counter * byte 3: Always 0 (maybe reserved for future use) */ u8 state = msg->data[0]; u8 rxerr = msg->data[1] & USB_8DEV_RP_MASK; u8 txerr = msg->data[2]; int rx_errors = 0; int tx_errors = 0; skb = alloc_can_err_skb(priv->netdev, &cf); if (!skb) return; switch (state) { case USB_8DEV_STATUSMSG_OK: priv->can.state = CAN_STATE_ERROR_ACTIVE; cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_ACTIVE; break; case USB_8DEV_STATUSMSG_BUSOFF: priv->can.state = CAN_STATE_BUS_OFF; cf->can_id |= CAN_ERR_BUSOFF; can_bus_off(priv->netdev); break; case USB_8DEV_STATUSMSG_OVERRUN: case USB_8DEV_STATUSMSG_BUSLIGHT: case USB_8DEV_STATUSMSG_BUSHEAVY: cf->can_id |= CAN_ERR_CRTL; break; default: priv->can.state = CAN_STATE_ERROR_WARNING; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; priv->can.can_stats.bus_error++; break; } switch (state) { case USB_8DEV_STATUSMSG_OK: case USB_8DEV_STATUSMSG_BUSOFF: break; case USB_8DEV_STATUSMSG_ACK: cf->can_id |= CAN_ERR_ACK; tx_errors = 1; break; case USB_8DEV_STATUSMSG_CRC: cf->data[2] |= CAN_ERR_PROT_UNSPEC; cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | CAN_ERR_PROT_LOC_CRC_DEL; rx_errors = 1; break; case USB_8DEV_STATUSMSG_BIT0: cf->data[2] |= CAN_ERR_PROT_BIT0; tx_errors = 1; break; case USB_8DEV_STATUSMSG_BIT1: cf->data[2] |= CAN_ERR_PROT_BIT1; tx_errors = 1; break; case USB_8DEV_STATUSMSG_FORM: cf->data[2] |= CAN_ERR_PROT_FORM; rx_errors = 1; break; case USB_8DEV_STATUSMSG_STUFF: cf->data[2] |= CAN_ERR_PROT_STUFF; rx_errors = 1; break; case USB_8DEV_STATUSMSG_OVERRUN: cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; rx_errors = 1; break; case USB_8DEV_STATUSMSG_BUSLIGHT: priv->can.state = CAN_STATE_ERROR_WARNING; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; priv->can.can_stats.error_warning++; break; case USB_8DEV_STATUSMSG_BUSHEAVY: priv->can.state = CAN_STATE_ERROR_PASSIVE; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; priv->can.can_stats.error_passive++; break; default: netdev_warn(priv->netdev, "Unknown status/error message (%d)\n", state); break; } if (tx_errors) { cf->data[2] |= CAN_ERR_PROT_TX; stats->tx_errors++; } if (rx_errors) stats->rx_errors++; cf->data[6] = txerr; cf->data[7] = rxerr; priv->bec.txerr = txerr; priv->bec.rxerr = rxerr; netif_rx(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; } /* Read data and status frames */ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv, struct usb_8dev_rx_msg *msg) { struct can_frame *cf; struct sk_buff *skb; struct net_device_stats *stats = &priv->netdev->stats; if (msg->type == USB_8DEV_TYPE_ERROR_FRAME && msg->flags == USB_8DEV_ERR_FLAG) { usb_8dev_rx_err_msg(priv, msg); } else if (msg->type == USB_8DEV_TYPE_CAN_FRAME) { skb = alloc_can_skb(priv->netdev, &cf); if (!skb) return; cf->can_id = be32_to_cpu(msg->id); cf->can_dlc = get_can_dlc(msg->dlc & 0xF); if (msg->flags & USB_8DEV_EXTID) cf->can_id |= CAN_EFF_FLAG; if (msg->flags & USB_8DEV_RTR) cf->can_id |= CAN_RTR_FLAG; else memcpy(cf->data, msg->data, cf->can_dlc); netif_rx(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; can_led_event(priv->netdev, CAN_LED_EVENT_RX); } else { netdev_warn(priv->netdev, "frame type %d unknown", msg->type); } } /* Callback for reading data from device * * Check urb status, call read function and resubmit urb read operation. */ static void usb_8dev_read_bulk_callback(struct urb *urb) { struct usb_8dev_priv *priv = urb->context; struct net_device *netdev; int retval; int pos = 0; netdev = priv->netdev; if (!netif_device_present(netdev)) return; switch (urb->status) { case 0: /* success */ break; case -ENOENT: case -ESHUTDOWN: return; default: netdev_info(netdev, "Rx URB aborted (%d)\n", urb->status); goto resubmit_urb; } while (pos < urb->actual_length) { struct usb_8dev_rx_msg *msg; if (pos + sizeof(struct usb_8dev_rx_msg) > urb->actual_length) { netdev_err(priv->netdev, "format error\n"); break; } msg = (struct usb_8dev_rx_msg *)(urb->transfer_buffer + pos); usb_8dev_rx_can_msg(priv, msg); pos += sizeof(struct usb_8dev_rx_msg); } resubmit_urb: usb_fill_bulk_urb(urb, priv->udev, usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_RX), urb->transfer_buffer, RX_BUFFER_SIZE, usb_8dev_read_bulk_callback, priv); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval == -ENODEV) netif_device_detach(netdev); else if (retval) netdev_err(netdev, "failed resubmitting read bulk urb: %d\n", retval); } /* Callback handler for write operations * * Free allocated buffers, check transmit status and * calculate statistic. */ static void usb_8dev_write_bulk_callback(struct urb *urb) { struct usb_8dev_tx_urb_context *context = urb->context; struct usb_8dev_priv *priv; struct net_device *netdev; BUG_ON(!context); priv = context->priv; netdev = priv->netdev; /* free up our allocated buffer */ usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); atomic_dec(&priv->active_tx_urbs); if (!netif_device_present(netdev)) return; if (urb->status) netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); netdev->stats.tx_packets++; netdev->stats.tx_bytes += context->dlc; can_get_echo_skb(netdev, context->echo_index); can_led_event(netdev, CAN_LED_EVENT_TX); /* Release context */ context->echo_index = MAX_TX_URBS; netif_wake_queue(netdev); } /* Send data to device */ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct usb_8dev_priv *priv = netdev_priv(netdev); struct net_device_stats *stats = &netdev->stats; struct can_frame *cf = (struct can_frame *) skb->data; struct usb_8dev_tx_msg *msg; struct urb *urb; struct usb_8dev_tx_urb_context *context = NULL; u8 *buf; int i, err; size_t size = sizeof(struct usb_8dev_tx_msg); if (can_dropped_invalid_skb(netdev, skb)) return NETDEV_TX_OK; /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { netdev_err(netdev, "No memory left for URBs\n"); goto nomem; } buf = usb_alloc_coherent(priv->udev, size, GFP_ATOMIC, &urb->transfer_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); goto nomembuf; } memset(buf, 0, size); msg = (struct usb_8dev_tx_msg *)buf; msg->begin = USB_8DEV_DATA_START; msg->flags = 0x00; if (cf->can_id & CAN_RTR_FLAG) msg->flags |= USB_8DEV_RTR; if (cf->can_id & CAN_EFF_FLAG) msg->flags |= USB_8DEV_EXTID; msg->id = cpu_to_be32(cf->can_id & CAN_ERR_MASK); msg->dlc = cf->can_dlc; memcpy(msg->data, cf->data, cf->can_dlc); msg->end = USB_8DEV_DATA_END; for (i = 0; i < MAX_TX_URBS; i++) { if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { context = &priv->tx_contexts[i]; break; } } /* May never happen! When this happens we'd more URBs in flight as * allowed (MAX_TX_URBS). */ if (!context) goto nofreecontext; context->priv = priv; context->echo_index = i; context->dlc = cf->can_dlc; usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_TX), buf, size, usb_8dev_write_bulk_callback, context); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &priv->tx_submitted); can_put_echo_skb(skb, netdev, context->echo_index); atomic_inc(&priv->active_tx_urbs); err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) goto failed; else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) /* Slow down tx path */ netif_stop_queue(netdev); /* Release our reference to this URB, the USB core will eventually free * it entirely. */ usb_free_urb(urb); return NETDEV_TX_OK; nofreecontext: usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); usb_free_urb(urb); netdev_warn(netdev, "couldn't find free context"); return NETDEV_TX_BUSY; failed: can_free_echo_skb(netdev, context->echo_index); usb_unanchor_urb(urb); usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); atomic_dec(&priv->active_tx_urbs); if (err == -ENODEV) netif_device_detach(netdev); else netdev_warn(netdev, "failed tx_urb %d\n", err); nomembuf: usb_free_urb(urb); nomem: dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; } static int usb_8dev_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { struct usb_8dev_priv *priv = netdev_priv(netdev); bec->txerr = priv->bec.txerr; bec->rxerr = priv->bec.rxerr; return 0; } /* Start USB device */ static int usb_8dev_start(struct usb_8dev_priv *priv) { struct net_device *netdev = priv->netdev; int err, i; for (i = 0; i < MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { netdev_err(netdev, "No memory left for URBs\n"); err = -ENOMEM; break; } buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL, &urb->transfer_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); err = -ENOMEM; break; } usb_fill_bulk_urb(urb, priv->udev, usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_RX), buf, RX_BUFFER_SIZE, usb_8dev_read_bulk_callback, priv); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &priv->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) { usb_unanchor_urb(urb); usb_free_coherent(priv->udev, RX_BUFFER_SIZE, buf, urb->transfer_dma); usb_free_urb(urb); break; } /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } /* Did we submit any URBs */ if (i == 0) { netdev_warn(netdev, "couldn't setup read URBs\n"); return err; } /* Warn if we've couldn't transmit all the URBs */ if (i < MAX_RX_URBS) netdev_warn(netdev, "rx performance may be slow\n"); err = usb_8dev_cmd_open(priv); if (err) goto failed; priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; failed: if (err == -ENODEV) netif_device_detach(priv->netdev); netdev_warn(netdev, "couldn't submit control: %d\n", err); return err; } /* Open USB device */ static int usb_8dev_open(struct net_device *netdev) { struct usb_8dev_priv *priv = netdev_priv(netdev); int err; /* common open */ err = open_candev(netdev); if (err) return err; can_led_event(netdev, CAN_LED_EVENT_OPEN); /* finally start device */ err = usb_8dev_start(priv); if (err) { if (err == -ENODEV) netif_device_detach(priv->netdev); netdev_warn(netdev, "couldn't start device: %d\n", err); close_candev(netdev); return err; } netif_start_queue(netdev); return 0; } static void unlink_all_urbs(struct usb_8dev_priv *priv) { int i; usb_kill_anchored_urbs(&priv->rx_submitted); usb_kill_anchored_urbs(&priv->tx_submitted); atomic_set(&priv->active_tx_urbs, 0); for (i = 0; i < MAX_TX_URBS; i++) priv->tx_contexts[i].echo_index = MAX_TX_URBS; } /* Close USB device */ static int usb_8dev_close(struct net_device *netdev) { struct usb_8dev_priv *priv = netdev_priv(netdev); int err = 0; /* Send CLOSE command to CAN controller */ err = usb_8dev_cmd_close(priv); if (err) netdev_warn(netdev, "couldn't stop device"); priv->can.state = CAN_STATE_STOPPED; netif_stop_queue(netdev); /* Stop polling */ unlink_all_urbs(priv); close_candev(netdev); can_led_event(netdev, CAN_LED_EVENT_STOP); return err; } static const struct net_device_ops usb_8dev_netdev_ops = { .ndo_open = usb_8dev_open, .ndo_stop = usb_8dev_close, .ndo_start_xmit = usb_8dev_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct can_bittiming_const usb_8dev_bittiming_const = { .name = "usb_8dev", .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 1024, .brp_inc = 1, }; /* Probe USB device * * Check device and firmware. * Set supported modes and bittiming constants. * Allocate some memory. */ static int usb_8dev_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct net_device *netdev; struct usb_8dev_priv *priv; int i, err = -ENOMEM; u32 version; char buf[18]; struct usb_device *usbdev = interface_to_usbdev(intf); /* product id looks strange, better we also check iProduct string */ if (usb_string(usbdev, usbdev->descriptor.iProduct, buf, sizeof(buf)) > 0 && strcmp(buf, "USB2CAN converter")) { dev_info(&usbdev->dev, "ignoring: not an USB2CAN converter\n"); return -ENODEV; } netdev = alloc_candev(sizeof(struct usb_8dev_priv), MAX_TX_URBS); if (!netdev) { dev_err(&intf->dev, "Couldn't alloc candev\n"); return -ENOMEM; } priv = netdev_priv(netdev); priv->udev = usbdev; priv->netdev = netdev; priv->can.state = CAN_STATE_STOPPED; priv->can.clock.freq = USB_8DEV_ABP_CLOCK; priv->can.bittiming_const = &usb_8dev_bittiming_const; priv->can.do_set_mode = usb_8dev_set_mode; priv->can.do_get_berr_counter = usb_8dev_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT; netdev->netdev_ops = &usb_8dev_netdev_ops; netdev->flags |= IFF_ECHO; /* we support local echo */ init_usb_anchor(&priv->rx_submitted); init_usb_anchor(&priv->tx_submitted); atomic_set(&priv->active_tx_urbs, 0); for (i = 0; i < MAX_TX_URBS; i++) priv->tx_contexts[i].echo_index = MAX_TX_URBS; priv->cmd_msg_buffer = kzalloc(sizeof(struct usb_8dev_cmd_msg), GFP_KERNEL); if (!priv->cmd_msg_buffer) goto cleanup_candev; usb_set_intfdata(intf, priv); SET_NETDEV_DEV(netdev, &intf->dev); mutex_init(&priv->usb_8dev_cmd_lock); err = register_candev(netdev); if (err) { netdev_err(netdev, "couldn't register CAN device: %d\n", err); goto cleanup_cmd_msg_buffer; } err = usb_8dev_cmd_version(priv, &version); if (err) { netdev_err(netdev, "can't get firmware version\n"); goto cleanup_unregister_candev; } else { netdev_info(netdev, "firmware: %d.%d, hardware: %d.%d\n", (version>>24) & 0xff, (version>>16) & 0xff, (version>>8) & 0xff, version & 0xff); } devm_can_led_init(netdev); return 0; cleanup_unregister_candev: unregister_netdev(priv->netdev); cleanup_cmd_msg_buffer: kfree(priv->cmd_msg_buffer); cleanup_candev: free_candev(netdev); return err; } /* Called by the usb core when driver is unloaded or device is removed */ static void usb_8dev_disconnect(struct usb_interface *intf) { struct usb_8dev_priv *priv = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (priv) { netdev_info(priv->netdev, "device disconnected\n"); unregister_netdev(priv->netdev); free_candev(priv->netdev); unlink_all_urbs(priv); } } static struct usb_driver usb_8dev_driver = { .name = "usb_8dev", .probe = usb_8dev_probe, .disconnect = usb_8dev_disconnect, .id_table = usb_8dev_table, }; module_usb_driver(usb_8dev_driver); MODULE_AUTHOR("Bernd Krumboeck <*****@*****.**>"); MODULE_DESCRIPTION("CAN driver for 8 devices USB2CAN interfaces"); MODULE_LICENSE("GPL v2");
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct gs_can *dev = netdev_priv(netdev); struct net_device_stats *stats = &dev->netdev->stats; struct urb *urb; struct gs_host_frame *hf; struct can_frame *cf; int rc; unsigned int idx; struct gs_tx_context *txc; if (can_dropped_invalid_skb(netdev, skb)) return NETDEV_TX_OK; /* find an empty context to keep track of transmission */ txc = gs_alloc_tx_context(dev); if (!txc) return NETDEV_TX_BUSY; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) goto nomem_urb; hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC, &urb->transfer_dma); if (!hf) { netdev_err(netdev, "No memory left for USB buffer\n"); goto nomem_hf; } idx = txc->echo_id; if (idx >= GS_MAX_TX_URBS) { netdev_err(netdev, "Invalid tx context %d\n", idx); goto badidx; } hf->echo_id = idx; hf->channel = dev->channel; cf = (struct can_frame *)skb->data; hf->can_id = cf->can_id; hf->can_dlc = cf->can_dlc; memcpy(hf->data, cf->data, cf->can_dlc); usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT), hf, sizeof(*hf), gs_usb_xmit_callback, txc); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->tx_submitted); can_put_echo_skb(skb, netdev, idx); atomic_inc(&dev->active_tx_urbs); rc = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(rc)) { /* usb send failed */ atomic_dec(&dev->active_tx_urbs); can_free_echo_skb(netdev, idx); gs_free_tx_context(txc); usb_unanchor_urb(urb); usb_free_coherent(dev->udev, sizeof(*hf), hf, urb->transfer_dma); if (rc == -ENODEV) { netif_device_detach(netdev); } else { netdev_err(netdev, "usb_submit failed (err=%d)\n", rc); stats->tx_dropped++; } } else { /* Slow down tx path */ if (atomic_read(&dev->active_tx_urbs) >= GS_MAX_TX_URBS) netif_stop_queue(netdev); } /* let usb core take care of this urb */ usb_free_urb(urb); return NETDEV_TX_OK; badidx: usb_free_coherent(dev->udev, sizeof(*hf), hf, urb->transfer_dma); nomem_hf: usb_free_urb(urb); nomem_urb: gs_free_tx_context(txc); dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; }
static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ems_usb *dev = netdev_priv(netdev); struct ems_tx_urb_context *context = NULL; struct net_device_stats *stats = &netdev->stats; struct can_frame *cf = (struct can_frame *)skb->data; struct ems_cpc_msg *msg; struct urb *urb; u8 *buf; int i, err; size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN + sizeof(struct cpc_can_msg); if (can_dropped_invalid_skb(netdev, skb)) return NETDEV_TX_OK; /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { netdev_err(netdev, "No memory left for URBs\n"); goto nomem; } buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); goto nomem; } msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE]; msg->msg.can_msg.id = cf->can_id & CAN_ERR_MASK; msg->msg.can_msg.length = cf->can_dlc; if (cf->can_id & CAN_RTR_FLAG) { msg->type = cf->can_id & CAN_EFF_FLAG ? CPC_CMD_TYPE_EXT_RTR_FRAME : CPC_CMD_TYPE_RTR_FRAME; msg->length = CPC_CAN_MSG_MIN_SIZE; } else { msg->type = cf->can_id & CAN_EFF_FLAG ? CPC_CMD_TYPE_EXT_CAN_FRAME : CPC_CMD_TYPE_CAN_FRAME; for (i = 0; i < cf->can_dlc; i++) msg->msg.can_msg.msg[i] = cf->data[i]; msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc; } /* Respect byte order */ msg->msg.can_msg.id = cpu_to_le32(msg->msg.can_msg.id); for (i = 0; i < MAX_TX_URBS; i++) { if (dev->tx_contexts[i].echo_index == MAX_TX_URBS) { context = &dev->tx_contexts[i]; break; } } /* * May never happen! When this happens we'd more URBs in flight as * allowed (MAX_TX_URBS). */ if (!context) { usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); usb_free_urb(urb); netdev_warn(netdev, "couldn't find free context\n"); return NETDEV_TX_BUSY; } context->dev = dev; context->echo_index = i; context->dlc = cf->can_dlc; usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf, size, ems_usb_write_bulk_callback, context); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->tx_submitted); can_put_echo_skb(skb, netdev, context->echo_index); atomic_inc(&dev->active_tx_urbs); err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) { can_free_echo_skb(netdev, context->echo_index); usb_unanchor_urb(urb); usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); dev_kfree_skb(skb); atomic_dec(&dev->active_tx_urbs); if (err == -ENODEV) { netif_device_detach(netdev); } else { netdev_warn(netdev, "failed tx_urb %d\n", err); stats->tx_dropped++; } } else { netdev->trans_start = jiffies; /* Slow down tx path */ if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS || dev->free_slots < 5) { netif_stop_queue(netdev); } } /* * Release our reference to this URB, the USB core will eventually free * it entirely. */ usb_free_urb(urb); return NETDEV_TX_OK; nomem: dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; }
static netdev_tx_t sam4e_netdev_start_xmit( struct sk_buff *skb, struct net_device *netdev) { struct sam4e_req *req; struct usb_device *udev; struct sam4e_usb_handle *sam4e_usb_hnd = netdev_priv(netdev); struct sam4e_usb *dev = sam4e_usb_hnd->sam4e_dev; struct net_device_stats *stats = &netdev->stats; int result; struct can_frame *cf = (struct can_frame *)skb->data; struct urb *urb; size_t size = sizeof(struct sam4e_req) + sizeof(struct sam4e_can_full_write); struct sam4e_can_full_write *cfw; if (can_dropped_invalid_skb(netdev, skb)) { pr_err("Dropping invalid can frame"); return NETDEV_TX_OK; } udev = dev->udev; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { pr_err("No memory left for URBs\n"); goto nomem; } req = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma); if (!req) { pr_err("No memory left for USB buffer\n"); usb_free_urb(urb); goto nomem; } /* Fill message data */ cfw = (struct sam4e_can_full_write *)&req->data; req->cmd = CMD_CAN_FULL_WRITE; req->len = sizeof(struct sam4e_req) + sizeof(struct sam4e_can_full_write); req->seq = atomic_inc_return(&dev->msg_seq); cfw->can = sam4e_usb_hnd->owner_netdev_index; cfw->mailbox = 0; cfw->prio = 0; cfw->mid = cf->can_id; cfw->dlc = cf->can_dlc; memcpy(cfw->data, cf->data, 8); LOGNI(">%x %2d [%d] send frame [%d] %x %d %x %x %x %x %x %x %x %x\n", req->cmd, req->len, req->seq, atomic_read(&dev->active_tx_urbs), cfw->mid, cfw->dlc, cfw->data[0], cfw->data[1], cfw->data[2], cfw->data[3], cfw->data[4], cfw->data[5], cfw->data[6], cfw->data[7]); usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, BULK_OUT_EP), req, size, sam4e_usb_write_bulk_callback, netdev); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->tx_submitted); atomic_inc(&dev->active_tx_urbs); result = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(result)) { usb_unanchor_urb(urb); usb_free_coherent(dev->udev, size, req, urb->transfer_dma); dev_kfree_skb(skb); atomic_dec(&dev->active_tx_urbs); if (result == -ENODEV) { netif_device_detach(netdev); } else { pr_err("failed tx_urb %d\n", result); stats->tx_dropped++; } } else { /* Put on hold tx path */ if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS) { LOGNI("Too many outstanding requests (%d). Stop queue", atomic_read(&dev->active_tx_urbs)); atomic_inc(&dev->netif_queue_stop); if (dev->netdev1) netif_stop_queue(dev->netdev1); if (dev->netdev2) netif_stop_queue(dev->netdev2); } } dev_kfree_skb(skb); usb_free_urb(urb); return NETDEV_TX_OK; nomem: dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; }