/** * Transmit a packet. * This is a helper function for ctcm_tx(). * * ch Channel to be used for sending. * skb Pointer to struct sk_buff of packet to send. * The linklevel header has already been set up * by ctcm_tx(). * * returns 0 on success, -ERRNO on failure. (Never fails.) */ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) { unsigned long saveflags; struct ll_header header; int rc = 0; __u16 block_len; int ccw_idx; struct sk_buff *nskb; unsigned long hi; /* we need to acquire the lock for testing the state * otherwise we can have an IRQ changing the state to * TXIDLE after the test but before acquiring the lock. */ spin_lock_irqsave(&ch->collect_lock, saveflags); if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) { int l = skb->len + LL_HEADER_LENGTH; if (ch->collect_len + l > ch->max_bufsize - 2) { spin_unlock_irqrestore(&ch->collect_lock, saveflags); return -EBUSY; } else { atomic_inc(&skb->users); header.length = l; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); skb_queue_tail(&ch->collect_queue, skb); ch->collect_len += l; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); goto done; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); /* * Protect skb against beeing free'd by upper * layers. */ atomic_inc(&skb->users); ch->prof.txlen += skb->len; header.length = skb->len + LL_HEADER_LENGTH; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); block_len = skb->len + 2; *((__u16 *)skb_push(skb, 2)) = block_len; /* * IDAL support in CTCM is broken, so we have to * care about skb's above 2G ourselves. */ hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31; if (hi) { nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); if (!nskb) { atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } else { memcpy(skb_put(nskb, skb->len), skb->data, skb->len); atomic_inc(&nskb->users); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); skb = nskb; } } ch->ccw[4].count = block_len; if (set_normalized_cda(&ch->ccw[4], skb->data)) { /* * idal allocation failed, try via copying to * trans_skb. trans_skb usually has a pre-allocated * idal. */ if (ctcm_checkalloc_buffer(ch)) { /* * Remove our header. It gets added * again on retransmit. */ atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } skb_reset_tail_pointer(ch->trans_skb); ch->trans_skb->len = 0; ch->ccw[1].count = skb->len; skb_copy_from_linear_data(skb, skb_put(ch->trans_skb, skb->len), skb->len); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); ccw_idx = 0; } else { skb_queue_tail(&ch->io_queue, skb); ccw_idx = 3; } ch->retry = 0; fsm_newstate(ch->fsm, CTC_STATE_TX); fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ch->prof.send_stamp = current_kernel_time(); /* xtime */ rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], (unsigned long)ch, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (ccw_idx == 3) ch->prof.doios_single++; if (rc != 0) { fsm_deltimer(&ch->timer); ctcm_ccw_check_rc(ch, rc, "single skb TX"); if (ccw_idx == 3) skb_dequeue_tail(&ch->io_queue); /* * Remove our header. It gets added * again on retransmit. */ skb_pull(skb, LL_HEADER_LENGTH + 2); } else if (ccw_idx == 0) { struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; } done: ctcm_clear_busy(ch->netdev); return rc; }
static void ctcmpc_send_sweep_req(struct channel *rch) { struct net_device *dev = rch->netdev; struct ctcm_priv *priv; struct mpc_group *grp; struct th_sweep *header; struct sk_buff *sweep_skb; struct channel *ch; /* int rc = 0; */ priv = dev->ml_priv; grp = priv->mpcg; ch = priv->channel[WRITE]; /* sweep processing is not complete until response and request */ /* has completed for all read channels in group */ if (grp->in_sweep == 0) { grp->in_sweep = 1; grp->sweep_rsp_pend_num = grp->active_channels[READ]; grp->sweep_req_pend_num = grp->active_channels[READ]; } sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA); if (sweep_skb == NULL) { /* rc = -ENOMEM; */ goto nomem; } header = kmalloc(TH_SWEEP_LENGTH, gfp_type()); if (!header) { dev_kfree_skb_any(sweep_skb); /* rc = -ENOMEM; */ goto nomem; } header->th.th_seg = 0x00 ; header->th.th_ch_flag = TH_SWEEP_REQ; /* 0x0f */ header->th.th_blk_flag = 0x00; header->th.th_is_xid = 0x00; header->th.th_seq_num = 0x00; header->sw.th_last_seq = ch->th_seq_num; memcpy(skb_put(sweep_skb, TH_SWEEP_LENGTH), header, TH_SWEEP_LENGTH); kfree(header); dev->trans_start = jiffies; skb_queue_tail(&ch->sweep_queue, sweep_skb); fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch); return; nomem: grp->in_sweep = 0; ctcm_clear_busy(dev); fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); return; }
static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) { unsigned long saveflags; struct ll_header header; int rc = 0; __u16 block_len; int ccw_idx; struct sk_buff *nskb; unsigned long hi; spin_lock_irqsave(&ch->collect_lock, saveflags); if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) { int l = skb->len + LL_HEADER_LENGTH; if (ch->collect_len + l > ch->max_bufsize - 2) { spin_unlock_irqrestore(&ch->collect_lock, saveflags); return -EBUSY; } else { atomic_inc(&skb->users); header.length = l; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); skb_queue_tail(&ch->collect_queue, skb); ch->collect_len += l; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); goto done; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); atomic_inc(&skb->users); ch->prof.txlen += skb->len; header.length = skb->len + LL_HEADER_LENGTH; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); block_len = skb->len + 2; *((__u16 *)skb_push(skb, 2)) = block_len; hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31; if (hi) { nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); if (!nskb) { atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } else { memcpy(skb_put(nskb, skb->len), skb->data, skb->len); atomic_inc(&nskb->users); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); skb = nskb; } } ch->ccw[4].count = block_len; if (set_normalized_cda(&ch->ccw[4], skb->data)) { if (ctcm_checkalloc_buffer(ch)) { atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } skb_reset_tail_pointer(ch->trans_skb); ch->trans_skb->len = 0; ch->ccw[1].count = skb->len; skb_copy_from_linear_data(skb, skb_put(ch->trans_skb, skb->len), skb->len); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); ccw_idx = 0; } else { skb_queue_tail(&ch->io_queue, skb); ccw_idx = 3; } ch->retry = 0; fsm_newstate(ch->fsm, CTC_STATE_TX); fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ch->prof.send_stamp = current_kernel_time(); rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], (unsigned long)ch, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (ccw_idx == 3) ch->prof.doios_single++; if (rc != 0) { fsm_deltimer(&ch->timer); ctcm_ccw_check_rc(ch, rc, "single skb TX"); if (ccw_idx == 3) skb_dequeue_tail(&ch->io_queue); skb_pull(skb, LL_HEADER_LENGTH + 2); } else if (ccw_idx == 0) { struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; } done: ctcm_clear_busy(ch->netdev); return rc; }