static void c_can_handle_lost_msg_obj(struct net_device *dev, int iface, int objno) { struct c_can_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; struct can_frame *frame; netdev_err(dev, "msg lost in buffer %d\n", objno); c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST); priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), IF_MCONT_CLR_MSGLST); c_can_object_put(dev, 0, objno, IF_COMM_CONTROL); /* create an error msg */ skb = alloc_can_err_skb(dev, &frame); if (unlikely(!skb)) return; frame->can_id |= CAN_ERR_CRTL; frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_errors++; stats->rx_over_errors++; netif_receive_skb(skb); }
static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev, int iface) { int i; for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT); }
/* * theory of operation: * * c_can core saves a received CAN message into the first free message * object it finds free (starting with the lowest). Bits NEWDAT and * INTPND are set for this message object indicating that a new message * has arrived. To work-around this issue, we keep two groups of message * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. * * To ensure in-order frame reception we use the following * approach while re-activating a message object to receive further * frames: * - if the current message object number is lower than * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing * the INTPND bit. * - if the current message object number is equal to * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower * receive message objects. * - if the current message object number is greater than * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of * only this message object. */ static int c_can_do_rx_poll(struct net_device *dev, int quota) { u32 num_rx_pkts = 0; unsigned int msg_obj, msg_ctrl_save; struct c_can_priv *priv = netdev_priv(dev); u32 val = c_can_read_reg32(priv, C_CAN_INTPND1_REG); for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST; msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0; val = c_can_read_reg32(priv, C_CAN_INTPND1_REG), msg_obj++) { /* * as interrupt pending register's bit n-1 corresponds to * message object n, we need to handle the same properly. */ if (val & (1 << (msg_obj - 1))) { c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL & ~IF_COMM_TXRQST); msg_ctrl_save = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, 0)); if (msg_ctrl_save & IF_MCONT_EOB) return num_rx_pkts; if (msg_ctrl_save & IF_MCONT_MSGLST) { c_can_handle_lost_msg_obj(dev, 0, msg_obj); num_rx_pkts++; quota--; continue; } if (!(msg_ctrl_save & IF_MCONT_NEWDAT)) continue; /* read the data from the message object */ c_can_read_msg_object(dev, 0, msg_ctrl_save); if (msg_obj < C_CAN_MSG_RX_LOW_LAST) c_can_mark_rx_msg_obj(dev, 0, msg_ctrl_save, msg_obj); else if (msg_obj > C_CAN_MSG_RX_LOW_LAST) /* activate this msg obj */ c_can_activate_rx_msg_obj(dev, 0, msg_ctrl_save, msg_obj); else if (msg_obj == C_CAN_MSG_RX_LOW_LAST) /* activate all lower message objects */ c_can_activate_all_lower_rx_msg_obj(dev, 0, msg_ctrl_save); num_rx_pkts++; quota--; } } return num_rx_pkts; }
static inline void c_can_rx_finalize(struct net_device *dev, struct c_can_priv *priv, u32 obj) { if (priv->type != BOSCH_D_CAN) c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT); }
static inline void c_can_rx_object_get(struct net_device *dev, struct c_can_priv *priv, u32 obj) { c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high); }