static int request_queues(struct port *port) { int err; err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0, "%s:RX-free", port->netdev->name); if (err) return err; err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0, "%s:RX", port->netdev->name); if (err) goto rel_rxfree; err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0, "%s:TX", port->netdev->name); if (err) goto rel_rx; err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, "%s:TX-ready", port->netdev->name); if (err) goto rel_tx; /* TX-done queue handles skbs sent out by the NPEs */ if (!ports_open) { err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0, "%s:TX-done", DRV_NAME); if (err) goto rel_txready; } return 0; rel_txready: qmgr_release_queue(port->plat->txreadyq); rel_tx: qmgr_release_queue(TX_QUEUE(port->id)); rel_rx: qmgr_release_queue(port->plat->rxq); rel_rxfree: qmgr_release_queue(RXFREE_QUEUE(port->id)); printk(KERN_DEBUG "%s: unable to request hardware queues\n", port->netdev->name); return err; }
static void release_queues(struct port *port) { qmgr_release_queue(RXFREE_QUEUE(port->id)); qmgr_release_queue(port->plat->rxq); qmgr_release_queue(TX_QUEUE(port->id)); qmgr_release_queue(port->plat->txreadyq); if (!ports_open) qmgr_release_queue(TXDONE_QUEUE); }
void l2cap_sock_init(struct sock *sk, struct sock *parent) { struct l2cap_pinfo *pi = l2cap_pi(sk); BT_DBG("sk %p", sk); if (parent) { sk->sk_type = parent->sk_type; bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup; pi->imtu = l2cap_pi(parent)->imtu; pi->omtu = l2cap_pi(parent)->omtu; pi->conf_state = l2cap_pi(parent)->conf_state; pi->mode = l2cap_pi(parent)->mode; pi->fcs = l2cap_pi(parent)->fcs; pi->max_tx = l2cap_pi(parent)->max_tx; pi->tx_win = l2cap_pi(parent)->tx_win; pi->sec_level = l2cap_pi(parent)->sec_level; pi->role_switch = l2cap_pi(parent)->role_switch; pi->force_reliable = l2cap_pi(parent)->force_reliable; pi->flushable = l2cap_pi(parent)->flushable; pi->force_active = l2cap_pi(parent)->force_active; } else { pi->imtu = L2CAP_DEFAULT_MTU; pi->omtu = 0; if (!disable_ertm && sk->sk_type == SOCK_STREAM) { pi->mode = L2CAP_MODE_ERTM; pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; } else { pi->mode = L2CAP_MODE_BASIC; } pi->max_tx = L2CAP_DEFAULT_MAX_TX; pi->fcs = L2CAP_FCS_CRC16; pi->tx_win = L2CAP_DEFAULT_TX_WINDOW; pi->sec_level = BT_SECURITY_LOW; pi->role_switch = 0; pi->force_reliable = 0; pi->flushable = BT_FLUSHABLE_OFF; pi->force_active = BT_POWER_FORCE_ACTIVE_ON; } /* Default config options */ pi->conf_len = 0; pi->flush_to = L2CAP_DEFAULT_FLUSH_TO; skb_queue_head_init(TX_QUEUE(sk)); skb_queue_head_init(SREJ_QUEUE(sk)); skb_queue_head_init(BUSY_QUEUE(sk)); INIT_LIST_HEAD(SREJ_LIST(sk)); }
void l2cap_sock_init(struct sock *sk, struct sock *parent) { struct l2cap_pinfo *pi = l2cap_pi(sk); BT_DBG("sk %p parent %p", sk, parent); if (parent) { sk->sk_type = parent->sk_type; sk->sk_rcvbuf = parent->sk_rcvbuf; sk->sk_sndbuf = parent->sk_sndbuf; bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup; pi->imtu = l2cap_pi(parent)->imtu; pi->omtu = l2cap_pi(parent)->omtu; pi->conf_state = l2cap_pi(parent)->conf_state; pi->mode = l2cap_pi(parent)->mode; pi->fcs = l2cap_pi(parent)->fcs; pi->max_tx = l2cap_pi(parent)->max_tx; pi->tx_win = l2cap_pi(parent)->tx_win; pi->sec_level = l2cap_pi(parent)->sec_level; pi->role_switch = l2cap_pi(parent)->role_switch; pi->force_reliable = l2cap_pi(parent)->force_reliable; pi->flushable = l2cap_pi(parent)->flushable; pi->force_active = l2cap_pi(parent)->force_active; pi->amp_pref = l2cap_pi(parent)->amp_pref; } else { pi->imtu = L2CAP_DEFAULT_MTU; pi->omtu = 0; if (!disable_ertm && sk->sk_type == SOCK_STREAM) { pi->mode = L2CAP_MODE_ERTM; pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; } else { pi->mode = L2CAP_MODE_BASIC; } pi->reconf_state = L2CAP_RECONF_NONE; pi->max_tx = L2CAP_DEFAULT_MAX_TX; pi->fcs = L2CAP_FCS_CRC16; pi->tx_win = L2CAP_DEFAULT_TX_WINDOW; pi->sec_level = BT_SECURITY_LOW; pi->role_switch = 0; pi->force_reliable = 0; pi->flushable = 0; pi->force_active = 1; pi->amp_pref = BT_AMP_POLICY_REQUIRE_BR_EDR; } /* Default config options */ sk->sk_backlog_rcv = l2cap_data_channel; pi->ampcon = NULL; pi->ampchan = NULL; pi->conf_len = 0; pi->flush_to = L2CAP_DEFAULT_FLUSH_TO; pi->scid = 0; pi->dcid = 0; pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED; pi->ack_win = pi->tx_win; pi->extended_control = 0; pi->local_conf.fcs = pi->fcs; pi->local_conf.flush_to = pi->flush_to; set_default_config(&pi->remote_conf); skb_queue_head_init(TX_QUEUE(sk)); skb_queue_head_init(SREJ_QUEUE(sk)); }
static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct l2cap_pinfo *pi = l2cap_pi(sk); struct sk_buff *skb; u16 control; int err; BT_DBG("sock %p, sk %p", sock, sk); err = sock_error(sk); if (err) return err; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; lock_sock(sk); if (sk->sk_state != BT_CONNECTED) { err = -ENOTCONN; goto done; } /* Connectionless channel */ if (sk->sk_type == SOCK_DGRAM) { skb = l2cap_create_connless_pdu(sk, msg, len); if (IS_ERR(skb)) { err = PTR_ERR(skb); } else { l2cap_do_send(sk, skb); err = len; } goto done; } switch (pi->mode) { case L2CAP_MODE_BASIC: /* Check outgoing MTU */ if (len > pi->omtu) { err = -EMSGSIZE; goto done; } /* Create a basic PDU */ skb = l2cap_create_basic_pdu(sk, msg, len); if (IS_ERR(skb)) { err = PTR_ERR(skb); goto done; } l2cap_do_send(sk, skb); err = len; break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: /* Entire SDU fits into one PDU */ if (len <= pi->remote_mps) { control = L2CAP_SDU_UNSEGMENTED; skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0); if (IS_ERR(skb)) { err = PTR_ERR(skb); goto done; } __skb_queue_tail(TX_QUEUE(sk), skb); if (sk->sk_send_head == NULL) sk->sk_send_head = skb; } else { /* Segment SDU into multiples PDUs */ err = l2cap_sar_segment_sdu(sk, msg, len); if (err < 0) goto done; } if (pi->mode == L2CAP_MODE_STREAMING) { l2cap_streaming_send(sk); } else { if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && (pi->conn_state & L2CAP_CONN_WAIT_F)) { err = len; break; } err = l2cap_ertm_send(sk); } if (err >= 0) err = len; break; default: BT_DBG("bad state %1.1x", pi->mode); err = -EBADFD; } done: release_sock(sk); return err; }
static int eth_xmit(struct sk_buff *skb, struct net_device *dev) { struct port *port = netdev_priv(dev); unsigned int txreadyq = port->plat->txreadyq; int len, offset, bytes, n; void *mem; u32 phys; struct desc *desc; #if DEBUG_TX printk(KERN_DEBUG "%s: eth_xmit\n", dev->name); #endif if (unlikely(skb->len > MAX_MRU)) { dev_kfree_skb(skb); dev->stats.tx_errors++; return NETDEV_TX_OK; } debug_pkt(dev, "eth_xmit", skb->data, skb->len); len = skb->len; #ifdef __ARMEB__ offset = 0; /* no need to keep alignment */ bytes = len; mem = skb->data; #else offset = (int)skb->data & 3; /* keep 32-bit alignment */ bytes = ALIGN(offset + len, 4); if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); dev_kfree_skb(skb); #endif phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); if (dma_mapping_error(&dev->dev, phys)) { #ifdef __ARMEB__ dev_kfree_skb(skb); #else kfree(mem); #endif dev->stats.tx_dropped++; return NETDEV_TX_OK; } n = queue_get_desc(txreadyq, port, 1); BUG_ON(n < 0); desc = tx_desc_ptr(port, n); #ifdef __ARMEB__ port->tx_buff_tab[n] = skb; #else port->tx_buff_tab[n] = mem; #endif desc->data = phys + offset; desc->buf_len = desc->pkt_len = len; /* NPE firmware pads short frames with zeros internally */ wmb(); queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); dev->trans_start = jiffies; if (qmgr_stat_empty(txreadyq)) { #if DEBUG_TX printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name); #endif netif_stop_queue(dev); /* we could miss TX ready interrupt */ if (!qmgr_stat_empty(txreadyq)) { #if DEBUG_TX printk(KERN_DEBUG "%s: eth_xmit ready again\n", dev->name); #endif netif_wake_queue(dev); } } #if DEBUG_TX printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name); #endif return NETDEV_TX_OK; }
static int eth_close(struct net_device *dev) { struct port *port = netdev_priv(dev); struct msg msg; int buffs = RX_DESCS; /* allocated RX buffers */ int i; ports_open--; qmgr_disable_irq(port->plat->rxq); napi_disable(&port->napi); netif_stop_queue(dev); while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0) buffs--; memset(&msg, 0, sizeof(msg)); msg.cmd = NPE_SETLOOPBACK_MODE; msg.eth_id = port->id; msg.byte3 = 1; if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK")) printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name); i = 0; do { /* drain RX buffers */ while (queue_get_desc(port->plat->rxq, port, 0) >= 0) buffs--; if (!buffs) break; if (qmgr_stat_empty(TX_QUEUE(port->id))) { /* we have to inject some packet */ struct desc *desc; u32 phys; int n = queue_get_desc(port->plat->txreadyq, port, 1); BUG_ON(n < 0); desc = tx_desc_ptr(port, n); phys = tx_desc_phys(port, n); desc->buf_len = desc->pkt_len = 1; wmb(); queue_put_desc(TX_QUEUE(port->id), phys, desc); } udelay(1); } while (++i < MAX_CLOSE_WAIT); if (buffs) printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)" " left in NPE\n", dev->name, buffs); #if DEBUG_CLOSE if (!buffs) printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i); #endif buffs = TX_DESCS; while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0) buffs--; /* cancel TX */ i = 0; do { while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) buffs--; if (!buffs) break; } while (++i < MAX_CLOSE_WAIT); if (buffs) printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) " "left in NPE\n", dev->name, buffs); #if DEBUG_CLOSE if (!buffs) printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i); #endif msg.byte3 = 0; if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK")) printk(KERN_CRIT "%s: unable to disable loopback\n", dev->name); phy_stop(port->phydev); if (!ports_open) qmgr_disable_irq(TXDONE_QUEUE); destroy_queues(port); release_queues(port); return 0; }