int ctcm_ch_alloc_buffer(struct channel *ch) { clear_normalized_cda(&ch->ccw[1]); ch->trans_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC | GFP_DMA); if (ch->trans_skb == NULL) { CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s(%s): %s trans_skb allocation error", CTCM_FUNTAIL, ch->id, (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); return -ENOMEM; } ch->ccw[1].count = ch->max_bufsize; if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { dev_kfree_skb(ch->trans_skb); ch->trans_skb = NULL; CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s(%s): %s set norm_cda failed", CTCM_FUNTAIL, ch->id, (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); return -ENOMEM; } ch->ccw[1].count = 0; ch->trans_skb_data = ch->trans_skb->data; ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED; return 0; }
/** * Transmit a packet. * This is a helper function for ctcm_tx(). * * ch Channel to be used for sending. * skb Pointer to struct sk_buff of packet to send. * The linklevel header has already been set up * by ctcm_tx(). * * returns 0 on success, -ERRNO on failure. (Never fails.) */ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) { unsigned long saveflags; struct ll_header header; int rc = 0; __u16 block_len; int ccw_idx; struct sk_buff *nskb; unsigned long hi; /* we need to acquire the lock for testing the state * otherwise we can have an IRQ changing the state to * TXIDLE after the test but before acquiring the lock. */ spin_lock_irqsave(&ch->collect_lock, saveflags); if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) { int l = skb->len + LL_HEADER_LENGTH; if (ch->collect_len + l > ch->max_bufsize - 2) { spin_unlock_irqrestore(&ch->collect_lock, saveflags); return -EBUSY; } else { atomic_inc(&skb->users); header.length = l; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); skb_queue_tail(&ch->collect_queue, skb); ch->collect_len += l; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); goto done; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); /* * Protect skb against beeing free'd by upper * layers. */ atomic_inc(&skb->users); ch->prof.txlen += skb->len; header.length = skb->len + LL_HEADER_LENGTH; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); block_len = skb->len + 2; *((__u16 *)skb_push(skb, 2)) = block_len; /* * IDAL support in CTCM is broken, so we have to * care about skb's above 2G ourselves. */ hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31; if (hi) { nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); if (!nskb) { atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } else { memcpy(skb_put(nskb, skb->len), skb->data, skb->len); atomic_inc(&nskb->users); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); skb = nskb; } } ch->ccw[4].count = block_len; if (set_normalized_cda(&ch->ccw[4], skb->data)) { /* * idal allocation failed, try via copying to * trans_skb. trans_skb usually has a pre-allocated * idal. */ if (ctcm_checkalloc_buffer(ch)) { /* * Remove our header. It gets added * again on retransmit. */ atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } skb_reset_tail_pointer(ch->trans_skb); ch->trans_skb->len = 0; ch->ccw[1].count = skb->len; skb_copy_from_linear_data(skb, skb_put(ch->trans_skb, skb->len), skb->len); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); ccw_idx = 0; } else { skb_queue_tail(&ch->io_queue, skb); ccw_idx = 3; } ch->retry = 0; fsm_newstate(ch->fsm, CTC_STATE_TX); fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ch->prof.send_stamp = current_kernel_time(); /* xtime */ rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], (unsigned long)ch, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (ccw_idx == 3) ch->prof.doios_single++; if (rc != 0) { fsm_deltimer(&ch->timer); ctcm_ccw_check_rc(ch, rc, "single skb TX"); if (ccw_idx == 3) skb_dequeue_tail(&ch->io_queue); /* * Remove our header. It gets added * again on retransmit. */ skb_pull(skb, LL_HEADER_LENGTH + 2); } else if (ccw_idx == 0) { struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; } done: ctcm_clear_busy(ch->netdev); return rc; }
static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) { unsigned long saveflags; struct ll_header header; int rc = 0; __u16 block_len; int ccw_idx; struct sk_buff *nskb; unsigned long hi; spin_lock_irqsave(&ch->collect_lock, saveflags); if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) { int l = skb->len + LL_HEADER_LENGTH; if (ch->collect_len + l > ch->max_bufsize - 2) { spin_unlock_irqrestore(&ch->collect_lock, saveflags); return -EBUSY; } else { atomic_inc(&skb->users); header.length = l; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); skb_queue_tail(&ch->collect_queue, skb); ch->collect_len += l; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); goto done; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); atomic_inc(&skb->users); ch->prof.txlen += skb->len; header.length = skb->len + LL_HEADER_LENGTH; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); block_len = skb->len + 2; *((__u16 *)skb_push(skb, 2)) = block_len; hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31; if (hi) { nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); if (!nskb) { atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } else { memcpy(skb_put(nskb, skb->len), skb->data, skb->len); atomic_inc(&nskb->users); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); skb = nskb; } } ch->ccw[4].count = block_len; if (set_normalized_cda(&ch->ccw[4], skb->data)) { if (ctcm_checkalloc_buffer(ch)) { atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } skb_reset_tail_pointer(ch->trans_skb); ch->trans_skb->len = 0; ch->ccw[1].count = skb->len; skb_copy_from_linear_data(skb, skb_put(ch->trans_skb, skb->len), skb->len); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); ccw_idx = 0; } else { skb_queue_tail(&ch->io_queue, skb); ccw_idx = 3; } ch->retry = 0; fsm_newstate(ch->fsm, CTC_STATE_TX); fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ch->prof.send_stamp = current_kernel_time(); rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], (unsigned long)ch, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (ccw_idx == 3) ch->prof.doios_single++; if (rc != 0) { fsm_deltimer(&ch->timer); ctcm_ccw_check_rc(ch, rc, "single skb TX"); if (ccw_idx == 3) skb_dequeue_tail(&ch->io_queue); skb_pull(skb, LL_HEADER_LENGTH + 2); } else if (ccw_idx == 0) { struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; } done: ctcm_clear_busy(ch->netdev); return rc; }