/***************************************************************************** 函 数 名 : IMM_ZcDequeueTail_Debug 功能描述 : 从队列尾部取元素 。 输入参数 : pstList - 指向IMM_ZC_HEAD_STRU的指针 输出参数 : 无 返 回 值 : 指向IMM_ZC_STRU的指针 调用函数 : 被调函数 : 修改历史 : 1.日 期 : 2011年12月1日 修改内容 : 新生成函数 *****************************************************************************/ IMM_ZC_STRU* IMM_ZcDequeueTail_Debug(unsigned short usFileID, unsigned short usLineNum, IMM_ZC_HEAD_STRU *pstList) { IMM_ZC_STRU *pstTail = NULL; pstTail = skb_dequeue_tail((pstList)); return pstTail; }
struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec) { struct sk_buff_head *q; struct sk_buff *p; q = &pq->q[prec].skblist; p = skb_dequeue_tail(q); if (p == NULL) return NULL; pq->len--; return p; }
static void nfc_shdlc_requeue_ack_pending(struct nfc_shdlc *shdlc) { struct sk_buff *skb; pr_debug("ns reset to %d\n", shdlc->dnr); while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) { skb_pull(skb, 2); /* remove len+control */ skb_trim(skb, skb->len - 2); /* remove crc */ skb_queue_head(&shdlc->send_q, skb); } shdlc->ns = shdlc->dnr; }
/***************************************************************************** 函 数 名 : IMM_ZcDequeueTail_Debug 功能描述 : 从队列尾部取元素 。 输入参数 : pstList - 指向IMM_ZC_HEAD_STRU的指针 输出参数 : 无 返 回 值 : 指向IMM_ZC_STRU的指针 调用函数 : 被调函数 : 修改历史 : 1.日 期 : 2011年12月1日 修改内容 : 新生成函数 *****************************************************************************/ IMM_ZC_STRU* IMM_ZcDequeueTail_Debug(unsigned short usFileID, unsigned short usLineNum, IMM_ZC_HEAD_STRU *pstList) { IMM_ZC_STRU *pstTail = NULL; #if (FEATURE_IMM_MEM_DEBUG == FEATURE_ON) pstTail = skb_dequeue_tail_debug(usFileID, usLineNum,(pstList)); #else pstTail = skb_dequeue_tail((pstList)); #endif return pstTail; }
static void llt_ndlc_requeue_data_pending(struct llt_ndlc *ndlc) { struct sk_buff *skb; u8 pcb; while ((skb = skb_dequeue_tail(&ndlc->ack_pending_q))) { pcb = skb->data[0]; switch (pcb & PCB_TYPE_MASK) { case PCB_TYPE_SUPERVISOR: skb->data[0] = (pcb & ~PCB_SUPERVISOR_RETRANSMIT_MASK) | PCB_SUPERVISOR_RETRANSMIT_YES; break; case PCB_TYPE_DATAFRAME: skb->data[0] = (pcb & ~PCB_DATAFRAME_RETRANSMIT_MASK) | PCB_DATAFRAME_RETRANSMIT_YES; break; default: pr_err("UNKNOWN Packet Control Byte=%d\n", pcb); kfree_skb(skb); break; } skb_queue_head(&ndlc->send_q, skb); } }
/** * Transmit a packet. * This is a helper function for ctcm_tx(). * * ch Channel to be used for sending. * skb Pointer to struct sk_buff of packet to send. * The linklevel header has already been set up * by ctcm_tx(). * * returns 0 on success, -ERRNO on failure. (Never fails.) */ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) { unsigned long saveflags; struct ll_header header; int rc = 0; __u16 block_len; int ccw_idx; struct sk_buff *nskb; unsigned long hi; /* we need to acquire the lock for testing the state * otherwise we can have an IRQ changing the state to * TXIDLE after the test but before acquiring the lock. */ spin_lock_irqsave(&ch->collect_lock, saveflags); if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) { int l = skb->len + LL_HEADER_LENGTH; if (ch->collect_len + l > ch->max_bufsize - 2) { spin_unlock_irqrestore(&ch->collect_lock, saveflags); return -EBUSY; } else { atomic_inc(&skb->users); header.length = l; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); skb_queue_tail(&ch->collect_queue, skb); ch->collect_len += l; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); goto done; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); /* * Protect skb against beeing free'd by upper * layers. */ atomic_inc(&skb->users); ch->prof.txlen += skb->len; header.length = skb->len + LL_HEADER_LENGTH; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); block_len = skb->len + 2; *((__u16 *)skb_push(skb, 2)) = block_len; /* * IDAL support in CTCM is broken, so we have to * care about skb's above 2G ourselves. */ hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31; if (hi) { nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); if (!nskb) { atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } else { memcpy(skb_put(nskb, skb->len), skb->data, skb->len); atomic_inc(&nskb->users); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); skb = nskb; } } ch->ccw[4].count = block_len; if (set_normalized_cda(&ch->ccw[4], skb->data)) { /* * idal allocation failed, try via copying to * trans_skb. trans_skb usually has a pre-allocated * idal. */ if (ctcm_checkalloc_buffer(ch)) { /* * Remove our header. It gets added * again on retransmit. */ atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } skb_reset_tail_pointer(ch->trans_skb); ch->trans_skb->len = 0; ch->ccw[1].count = skb->len; skb_copy_from_linear_data(skb, skb_put(ch->trans_skb, skb->len), skb->len); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); ccw_idx = 0; } else { skb_queue_tail(&ch->io_queue, skb); ccw_idx = 3; } ch->retry = 0; fsm_newstate(ch->fsm, CTC_STATE_TX); fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ch->prof.send_stamp = current_kernel_time(); /* xtime */ rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], (unsigned long)ch, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (ccw_idx == 3) ch->prof.doios_single++; if (rc != 0) { fsm_deltimer(&ch->timer); ctcm_ccw_check_rc(ch, rc, "single skb TX"); if (ccw_idx == 3) skb_dequeue_tail(&ch->io_queue); /* * Remove our header. It gets added * again on retransmit. */ skb_pull(skb, LL_HEADER_LENGTH + 2); } else if (ccw_idx == 0) { struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; } done: ctcm_clear_busy(ch->netdev); return rc; }
/** * set_baud_rate() - Sets new baud rate for the UART. * @baud: New baud rate. * * This function first sends the HCI command * Hci_Cmd_ST_Set_Uart_Baud_Rate. It then changes the baud rate in HW, and * finally it waits for the Command Complete event for the * Hci_Cmd_ST_Set_Uart_Baud_Rate command. * * Returns: * 0 if there is no error. * -EALREADY if baud rate change is already in progress. * -EFAULT if one or more of the UART related structs is not allocated. * -ENOMEM if skb allocation has failed. * -EPERM if setting the new baud rate has failed. * Error codes generated by create_work_item. */ static int set_baud_rate(int baud) { struct tty_struct *tty = NULL; int err = 0; struct sk_buff *skb; int old_baud_rate; CG2900_INFO("set_baud_rate (%d baud)", baud); if (uart_info->baud_rate_state != BAUD_IDLE) { CG2900_ERR("Trying to set new baud rate before old setting " "is finished"); return -EALREADY; } if (uart_info->tty) tty = uart_info->tty; else { CG2900_ERR("Important structs not allocated!"); return -EFAULT; } #ifdef BAUD_RATE_FIX /* Disable the RTS Flow */ //tty_throttle(tty); // Not supported by Host //cg29xx_rts_gpio_control(0); /* ++Hemant: Baudrate Workaround */ cg29xx_rts_gpio_control(0); /* --Hemant: Baudrate Workaround */ #endif /* BAUD_RATE_FIX */ /* * Store old baud rate so that we can restore it if something goes * wrong. */ old_baud_rate = uart_info->baud_rate; skb = alloc_set_baud_rate_cmd(&baud); if (!skb) { CG2900_ERR("alloc_set_baud_rate_cmd failed"); return -ENOMEM; } SET_BAUD_STATE(BAUD_START); uart_info->baud_rate = baud; /* Queue the sk_buffer... */ skb_queue_tail(&uart_info->tx_queue, skb); /* ... and call the common UART TX function */ CG2900_DBG_DATA_CONTENT("set_baud_rate", skb->data, skb->len); err = create_work_item(uart_info->wq, work_do_transmit, NULL); if (err) { CG2900_ERR("Failed to send change baud rate cmd, freeing " "skb."); skb = skb_dequeue_tail(&uart_info->tx_queue); SET_BAUD_STATE(BAUD_IDLE); uart_info->baud_rate = old_baud_rate; kfree_skb(skb); return err; } CG2900_DBG("Set baud rate cmd scheduled for sending."); printk(KERN_ERR "Set baud rate cmd scheduled for sending."); /* * Now wait for the command complete. * It will come at the new baudrate. */ wait_event_interruptible_timeout(uart_wait_queue, ((BAUD_SUCCESS == uart_info->baud_rate_state) || (BAUD_FAIL == uart_info->baud_rate_state)), msecs_to_jiffies(UART_RESP_TIMEOUT/50));/* ++ daniel - port fail recovery */ if (BAUD_SUCCESS == uart_info->baud_rate_state) //CG2900_DBG("Baudrate changed to %d baud", baud); printk(KERN_ERR "Baudrate changed to %d baud", baud); else { //CG2900_ERR("Failed to set new baudrate (%d)", // uart_info->baud_rate_state); printk(KERN_ERR "Failed to set new baudrate (%d)", uart_info->baud_rate_state); err = -EPERM; } /* Finally flush the TTY so we are sure that is no bad data there */ if (tty->ops->flush_buffer) { CG2900_DBG("Flushing TTY after baud rate change"); tty->ops->flush_buffer(tty); } /* Finished. Set state to IDLE */ SET_BAUD_STATE(BAUD_IDLE); return err; }
static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) { unsigned long saveflags; struct ll_header header; int rc = 0; __u16 block_len; int ccw_idx; struct sk_buff *nskb; unsigned long hi; spin_lock_irqsave(&ch->collect_lock, saveflags); if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) { int l = skb->len + LL_HEADER_LENGTH; if (ch->collect_len + l > ch->max_bufsize - 2) { spin_unlock_irqrestore(&ch->collect_lock, saveflags); return -EBUSY; } else { atomic_inc(&skb->users); header.length = l; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); skb_queue_tail(&ch->collect_queue, skb); ch->collect_len += l; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); goto done; } spin_unlock_irqrestore(&ch->collect_lock, saveflags); atomic_inc(&skb->users); ch->prof.txlen += skb->len; header.length = skb->len + LL_HEADER_LENGTH; header.type = skb->protocol; header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); block_len = skb->len + 2; *((__u16 *)skb_push(skb, 2)) = block_len; hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31; if (hi) { nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); if (!nskb) { atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } else { memcpy(skb_put(nskb, skb->len), skb->data, skb->len); atomic_inc(&nskb->users); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); skb = nskb; } } ch->ccw[4].count = block_len; if (set_normalized_cda(&ch->ccw[4], skb->data)) { if (ctcm_checkalloc_buffer(ch)) { atomic_dec(&skb->users); skb_pull(skb, LL_HEADER_LENGTH + 2); ctcm_clear_busy(ch->netdev); return -ENOMEM; } skb_reset_tail_pointer(ch->trans_skb); ch->trans_skb->len = 0; ch->ccw[1].count = skb->len; skb_copy_from_linear_data(skb, skb_put(ch->trans_skb, skb->len), skb->len); atomic_dec(&skb->users); dev_kfree_skb_irq(skb); ccw_idx = 0; } else { skb_queue_tail(&ch->io_queue, skb); ccw_idx = 3; } ch->retry = 0; fsm_newstate(ch->fsm, CTC_STATE_TX); fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ch->prof.send_stamp = current_kernel_time(); rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], (unsigned long)ch, 0xff, 0); spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); if (ccw_idx == 3) ch->prof.doios_single++; if (rc != 0) { fsm_deltimer(&ch->timer); ctcm_ccw_check_rc(ch, rc, "single skb TX"); if (ccw_idx == 3) skb_dequeue_tail(&ch->io_queue); skb_pull(skb, LL_HEADER_LENGTH + 2); } else if (ccw_idx == 0) { struct net_device *dev = ch->netdev; struct ctcm_priv *priv = dev->ml_priv; priv->stats.tx_packets++; priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; } done: ctcm_clear_busy(ch->netdev); return rc; }