int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card, enum dma_data_direction direction) { dma_addr_t address; int i; buffer->direction = direction; for (i = 0; i < buffer->page_count; i++) { address = dma_map_page(card->device, buffer->pages[i], 0, PAGE_SIZE, direction); if (dma_mapping_error(card->device, address)) break; set_page_private(buffer->pages[i], address); } buffer->page_count_mapped = i; if (i < buffer->page_count) return -ENOMEM; return 0; }
static int xilly_map_single_of(struct xilly_endpoint *ep, void *ptr, size_t size, int direction, dma_addr_t *ret_dma_handle ) { dma_addr_t addr; struct xilly_mapping *this; int rc; this = kzalloc(sizeof(*this), GFP_KERNEL); if (!this) return -ENOMEM; addr = dma_map_single(ep->dev, ptr, size, direction); if (dma_mapping_error(ep->dev, addr)) { kfree(this); return -ENODEV; } this->device = ep->dev; this->dma_addr = addr; this->size = size; this->direction = direction; *ret_dma_handle = addr; rc = devm_add_action(ep->dev, xilly_of_unmap, this); if (rc) { dma_unmap_single(ep->dev, addr, size, direction); kfree(this); return rc; } return 0; }
/** * i2o_dma_map_single - Map pointer to controller and fill in I2O message. * @c: I2O controller * @ptr: pointer to the data which should be mapped * @size: size of data in bytes * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE * @sg_ptr: pointer to the SG list inside the I2O message * * This function does all necessary DMA handling and also writes the I2O * SGL elements into the I2O message. For details on DMA handling see also * dma_map_single(). The pointer sg_ptr will only be set to the end of the * SG list if the allocation was successful. * * Returns DMA address which must be checked for failures using * dma_mapping_error(). */ dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, size_t size, enum dma_data_direction direction, u32 ** sg_ptr) { u32 sg_flags; u32 *mptr = *sg_ptr; dma_addr_t dma_addr; switch (direction) { case DMA_TO_DEVICE: sg_flags = 0xd4000000; break; case DMA_FROM_DEVICE: sg_flags = 0xd0000000; break; default: return 0; } dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); if (!dma_mapping_error(&c->pdev->dev, dma_addr)) { #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 if ((sizeof(dma_addr_t) > 4) && c->pae_support) { *mptr++ = cpu_to_le32(0x7C020002); *mptr++ = cpu_to_le32(PAGE_SIZE); } #endif *mptr++ = cpu_to_le32(sg_flags | size); *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr)); #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 if ((sizeof(dma_addr_t) > 4) && c->pae_support) *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr)); #endif *sg_ptr = mptr; } return dma_addr; }
/* * Put a TSO header into the TX queue. * * This is special-cased because we know that it is small enough to fit in * a single fragment, and we know it doesn't cross a page boundary. It * also allows us to not worry about end-of-packet etc. */ static int efx_tso_put_header(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer, u8 *header) { if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) { buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev, header, buffer->len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev, buffer->dma_addr))) { kfree(buffer->buf); buffer->len = 0; buffer->flags = 0; return -ENOMEM; } buffer->unmap_len = buffer->len; buffer->dma_offset = 0; buffer->flags |= EFX_TX_BUF_MAP_SINGLE; } ++tx_queue->insert_count; return 0; }
int mipi_dsi_cmd_dma_tx(struct dsi_buf *tp) { int len; #ifdef DSI_HOST_DEBUG int i; char *bp; bp = tp->data; pr_debug("%s: ", __func__); for (i = 0; i < tp->len; i++) pr_debug("%x ", *bp++); pr_debug("\n"); #endif len = tp->len; len += 3; len &= ~0x03; /* multipled by 4 */ tp->dmap = dma_map_single(&dsi_dev, tp->data, len, DMA_TO_DEVICE); if (dma_mapping_error(&dsi_dev, tp->dmap)) pr_err("%s: dmap mapp failed\n", __func__); INIT_COMPLETION(dsi_dma_comp); MIPI_OUTP(MIPI_DSI_BASE + 0x044, tp->dmap); MIPI_OUTP(MIPI_DSI_BASE + 0x048, len); wmb(); MIPI_OUTP(MIPI_DSI_BASE + 0x08c, 0x01); /* trigger */ wmb(); wait_for_completion(&dsi_dma_comp); dma_unmap_single(&dsi_dev, tp->dmap, len, DMA_TO_DEVICE); tp->dmap = 0; return tp->len; }
/** * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers * * @rx_queue: Efx RX queue * * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a * struct efx_rx_buffer for each one. Return a negative error code or 0 * on success. May fail having only inserted fewer than EFX_RX_BATCH * buffers. */ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) { struct efx_nic *efx = rx_queue->efx; struct net_device *net_dev = efx->net_dev; struct efx_rx_buffer *rx_buf; struct sk_buff *skb; int skb_len = efx->rx_buffer_len; unsigned index, count; for (count = 0; count < EFX_RX_BATCH; ++count) { index = rx_queue->added_count & rx_queue->ptr_mask; rx_buf = efx_rx_buffer(rx_queue, index); rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len); if (unlikely(!skb)) return -ENOMEM; /* Adjust the SKB for padding */ skb_reserve(skb, NET_IP_ALIGN); rx_buf->len = skb_len - NET_IP_ALIGN; rx_buf->flags = 0; rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev, skb->data, rx_buf->len, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&efx->pci_dev->dev, rx_buf->dma_addr))) { dev_kfree_skb_any(skb); rx_buf->u.skb = NULL; return -EIO; } ++rx_queue->added_count; ++rx_queue->alloc_skb_count; } return 0; }
static int mlx4_alloc_pages(struct mlx4_en_priv *priv, struct mlx4_en_rx_alloc *page_alloc, const struct mlx4_en_frag_info *frag_info, gfp_t _gfp) { int order; struct page *page; dma_addr_t dma; for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) { gfp_t gfp = _gfp; if (order) gfp |= __GFP_COMP | __GFP_NOWARN; page = alloc_pages(gfp, order); if (likely(page)) break; if (--order < 0 || ((PAGE_SIZE << order) < frag_info->frag_size)) return -ENOMEM; } dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order, PCI_DMA_FROMDEVICE); if (dma_mapping_error(priv->ddev, dma)) { put_page(page); return -ENOMEM; } page_alloc->page_size = PAGE_SIZE << order; page_alloc->page = page; page_alloc->dma = dma; page_alloc->page_offset = 0; /* Not doing get_page() for each frag is a big win * on asymetric workloads. Note we can not use atomic_set(). */ atomic_add(page_alloc->page_size / frag_info->frag_stride - 1, &page->_count); return 0; }
/** * nfp_net_rx_alloc_one() - Allocate and map skb for RX * @rx_ring: RX ring structure of the skb * @dma_addr: Pointer to storage for DMA address (output param) * @fl_bufsz: size of freelist buffers * * This function will allcate a new skb, map it for DMA. * * Return: allocated skb or NULL on failure. */ static struct sk_buff * nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr, unsigned int fl_bufsz) { struct nfp_net *nn = rx_ring->r_vec->nfp_net; struct sk_buff *skb; skb = netdev_alloc_skb(nn->netdev, fl_bufsz); if (!skb) { nn_warn_ratelimit(nn, "Failed to alloc receive SKB\n"); return NULL; } *dma_addr = dma_map_single(&nn->pdev->dev, skb->data, fl_bufsz, DMA_FROM_DEVICE); if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) { dev_kfree_skb_any(skb); nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n"); return NULL; } return skb; }
static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, struct bgmac_slot_info *slot) { struct device *dma_dev = bgmac->core->dma_dev; struct sk_buff *skb; dma_addr_t dma_addr; struct bgmac_rx_header *rx; /* Alloc skb */ skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); if (!skb) return -ENOMEM; /* Poison - if everything goes fine, hardware will overwrite it */ rx = (struct bgmac_rx_header *)skb->data; rx->len = cpu_to_le16(0xdead); rx->flags = cpu_to_le16(0xbeef); /* Map skb for the DMA */ dma_addr = dma_map_single(dma_dev, skb->data, BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(dma_dev, dma_addr)) { bgmac_err(bgmac, "DMA mapping error\n"); dev_kfree_skb(skb); return -ENOMEM; } /* Update the slot */ slot->skb = skb; slot->dma_addr = dma_addr; if (slot->dma_addr & 0xC0000000) bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); return 0; }
/** * dpaa2_io_store_create() - Create the dma memory storage for dequeue result. * @max_frames: the maximum number of dequeued result for frames, must be <= 16. * @dev: the device to allow mapping/unmapping the DMAable region. * * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)". * The 'dpaa2_io_store' returned is a DPIO service managed object. * * Return pointer to dpaa2_io_store struct for successfully created storage * memory, or NULL on error. */ struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, struct device *dev) { struct dpaa2_io_store *ret; size_t size; if (!max_frames || (max_frames > 16)) return NULL; ret = kmalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; ret->max = max_frames; size = max_frames * sizeof(struct dpaa2_dq) + 64; ret->alloced_addr = kzalloc(size, GFP_KERNEL); if (!ret->alloced_addr) { kfree(ret); return NULL; } ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64); ret->paddr = dma_map_single(dev, ret->vaddr, sizeof(struct dpaa2_dq) * max_frames, DMA_FROM_DEVICE); if (dma_mapping_error(dev, ret->paddr)) { kfree(ret->alloced_addr); kfree(ret); return NULL; } ret->idx = 0; ret->dev = dev; return ret; }
static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl, gfp_t gfp) { struct wcn36xx_dxe_desc *dxe = ctl->desc; struct sk_buff *skb; skb = alloc_skb(WCN36XX_PKT_SIZE, gfp); if (skb == NULL) return -ENOMEM; dxe->dst_addr_l = dma_map_single(dev, skb_tail_pointer(skb), WCN36XX_PKT_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(dev, dxe->dst_addr_l)) { dev_err(dev, "unable to map skb\n"); kfree_skb(skb); return -ENOMEM; } ctl->skb = skb; return 0; }
/** * dma_map_sg - map a set of SG buffers for streaming mode DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map * @dir: DMA transfer direction * * Map a set of buffers described by scatterlist in streaming mode for DMA. * This is the scatter-gather version of the dma_map_single interface. * Here the scatter gather list elements are each tagged with the * appropriate dma address and length. They are obtained via * sg_dma_{address,length}. * * Device ownership issues as mentioned for dma_map_single are the same * here. */ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { dma_addr_t dma_address; struct scatterlist *s; int i, j; BUG_ON(!valid_dma_direction(dir)); for_each_sg(sg, s, nents, i) { dma_address = __dma_map_page(dev, sg_page(s), s->offset, s->length, dir); /* When the page doesn't have a valid PFN, we assume that * dma_address is already present. */ if (pfn_valid(page_to_pfn(sg_page(s)))) s->dma_address = dma_address; #ifdef CONFIG_NEED_SG_DMA_LENGTH s->dma_length = s->length; #endif if (dma_mapping_error(dev, s->dma_address)) goto bad_mapping; }
/* * DMA read/write transfers with ECC support */ static int lpc32xx_dma_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages, int read) { struct nand_chip *chip = mtd->priv; struct lpc32xx_nand_host *host = chip->priv; uint32_t config, tmpreg; dma_addr_t buf_phy; int i, timeout, dma_mapped = 0, status = 0; /* Map DMA buffer */ if (likely((void *) buf < high_memory)) { buf_phy = dma_map_single(mtd->dev.parent, buf, mtd->writesize, read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); if (unlikely(dma_mapping_error(mtd->dev.parent, buf_phy))) { dev_err(mtd->dev.parent, "Unable to map DMA buffer\n"); dma_mapped = 0; } else dma_mapped = 1; } /* If a buffer can't be mapped, use the local buffer */ if (!dma_mapped) { buf_phy = host->data_buf_dma; if (!read) memcpy(host->data_buf, buf, mtd->writesize); } if (read) config = DMAC_CHAN_ITC | DMAC_CHAN_IE | DMAC_CHAN_FLOW_D_P2M | DMAC_DEST_PERIP (0) | DMAC_SRC_PERIP(DMA_PERID_NAND1) | DMAC_CHAN_ENABLE; else config = DMAC_CHAN_ITC | DMAC_CHAN_IE | DMAC_CHAN_FLOW_D_M2P | DMAC_DEST_PERIP(DMA_PERID_NAND1) | DMAC_SRC_PERIP (0) | DMAC_CHAN_ENABLE; /* DMA mode with ECC enabled */ tmpreg = __raw_readl(SLC_CFG(host->io_base)); __raw_writel(SLCCFG_ECC_EN | SLCCFG_DMA_ECC | tmpreg, SLC_CFG(host->io_base)); /* Clear initial ECC */ __raw_writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base)); /* Prepare DMA descriptors */ lpc32xx_nand_dma_configure(mtd, buf_phy, chip->ecc.steps, read); /* Setup DMA direction and burst mode */ if (read) __raw_writel(__raw_readl(SLC_CFG(host->io_base)) | SLCCFG_DMA_DIR, SLC_CFG(host->io_base)); else __raw_writel(__raw_readl(SLC_CFG(host->io_base)) & ~SLCCFG_DMA_DIR, SLC_CFG(host->io_base)); __raw_writel(__raw_readl(SLC_CFG(host->io_base)) | SLCCFG_DMA_BURST, SLC_CFG(host->io_base)); /* Transfer size is data area only */ __raw_writel(mtd->writesize, SLC_TC(host->io_base)); /* Start transfer in the NAND controller */ __raw_writel(__raw_readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START, SLC_CTRL(host->io_base)); /* Start DMA to process NAND controller DMA FIFO */ host->dmapending = 0; lpc32xx_dma_start_xfer(host->dmach, config); /* * On some systems, the DMA transfer will be very fast, so there is no * point in waiting for the transfer to complete using the interrupt * method. It's best to just poll the transfer here to prevent several * costly context changes. This is especially true for systems that * use small page devices or NAND devices with very fast access. */ if (host->ncfg->polled_completion) { timeout = LPC32XX_DMA_SIMPLE_TIMEOUT; while ((timeout > 0) && lpc32xx_dma_is_active(host->dmach)) timeout--; if (timeout == 0) { dev_err(mtd->dev.parent, "DMA transfer timeout error\n"); status = -EIO; /* Switch to non-polled mode */ host->ncfg->polled_completion = false; } } if (!host->ncfg->polled_completion) { /* Wait till DMA transfer is done or timeout occurs */ wait_event_timeout(host->dma_waitq, host->dmapending, msecs_to_jiffies(LPC32XX_DMA_WAIT_TIMEOUT_MS)); if (host->dma_xfer_status != 0) { dev_err(mtd->dev.parent, "DMA transfer error\n"); status = -EIO; } } /* * The DMA is finished, but the NAND controller may still have * buffered data. Wait until all the data is sent. */ timeout = LPC32XX_DMA_SIMPLE_TIMEOUT; while ((__raw_readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) && (timeout > 0)) timeout--; if (timeout == 0) { dev_err(mtd->dev.parent, "FIFO held data too long\n"); status = -EIO; } /* Read last calculated ECC value */ if (read) host->ecc_buf[chip->ecc.steps - 1] = __raw_readl(SLC_ECC(host->io_base)); else { for (i = 0; i < LPC32XX_DMA_ECC_REP_READ; i++) host->ecc_buf[chip->ecc.steps - 1] = __raw_readl(SLC_ECC(host->io_base)); } /* * For reads, get the OOB data. For writes, the data will be written * later */ if (read) chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); /* Flush DMA link list */ lpc32xx_dma_flush_llist(host->dmach); if (__raw_readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO || __raw_readl(SLC_TC(host->io_base))) { /* Something is left in the FIFO, something is wrong */ dev_err(mtd->dev.parent, "DMA FIFO failure\n"); status = -EIO; } if (dma_mapped) dma_unmap_single(mtd->dev.parent, buf_phy, mtd->writesize, read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); else if (read) memcpy(buf, host->data_buf, mtd->writesize); /* Stop DMA & HW ECC */ __raw_writel(__raw_readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START, SLC_CTRL(host->io_base)); __raw_writel(tmpreg, SLC_CFG(host->io_base)); return status; }
static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) { struct mlx5_wq_cyc *wq = &sq->wq; u16 pi = sq->pc & wq->sz_m1; struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_data_seg *dseg; u8 opcode = MLX5_OPCODE_SEND; dma_addr_t dma_addr = 0; bool bf = false; u16 headlen; u16 ds_cnt; u16 ihs; int i; memset(wqe, 0, sizeof(*wqe)); if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; else sq->stats.csum_offload_none++; if (sq->cc != sq->prev_cc) { sq->prev_cc = sq->cc; sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0; } if (skb_is_gso(skb)) { u32 payload_len; eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); opcode = MLX5_OPCODE_LSO; ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); payload_len = skb->len - ihs; MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; sq->stats.tso_packets++; sq->stats.tso_bytes += payload_len; } else { bf = sq->bf_budget && !skb->xmit_more && !skb_shinfo(skb)->nr_frags; ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); } skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs); skb_pull_inline(skb, ihs); eseg->inline_hdr_sz = cpu_to_be16(ihs); ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start), MLX5_SEND_WQE_DS); dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt; MLX5E_TX_SKB_CB(skb)->num_dma = 0; headlen = skb_headlen(skb); if (headlen) { dma_addr = dma_map_single(sq->pdev, skb->data, headlen, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) goto dma_unmap_wqe_err; dseg->addr = cpu_to_be64(dma_addr); dseg->lkey = sq->mkey_be; dseg->byte_count = cpu_to_be32(headlen); mlx5e_dma_push(sq, dma_addr, headlen); MLX5E_TX_SKB_CB(skb)->num_dma++; dseg++; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; int fsz = skb_frag_size(frag); dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) goto dma_unmap_wqe_err; dseg->addr = cpu_to_be64(dma_addr); dseg->lkey = sq->mkey_be; dseg->byte_count = cpu_to_be32(fsz); mlx5e_dma_push(sq, dma_addr, fsz); MLX5E_TX_SKB_CB(skb)->num_dma++; dseg++; } ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma; cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); sq->skb[pi] = skb; MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs; netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes); if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) { netif_tx_stop_queue(sq->txq); sq->stats.stopped++; } if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { int bf_sz = 0; if (bf && sq->uar_bf_map) bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3; cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; mlx5e_tx_notify_hw(sq, wqe, bf_sz); } /* fill sq edge with nops to avoid wqe wrap around */ while ((sq->pc & wq->sz_m1) > sq->edge) mlx5e_send_nop(sq, false); sq->bf_budget = bf ? sq->bf_budget - 1 : 0; sq->stats.packets++; return NETDEV_TX_OK; dma_unmap_wqe_err: sq->stats.dropped++; mlx5e_dma_unmap_wqe_err(sq, skb); dev_kfree_skb_any(skb); return NETDEV_TX_OK; }
int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct ath10k_hif_sg_item sg_items[2]; struct ath10k_htt_txbuf *txbuf; struct htt_data_tx_desc_frag *frags; bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); int prefetch_len; int res; u8 flags0 = 0; u16 msdu_id, flags1 = 0; u16 freq = 0; int skb_len; u32 frags_paddr = 0; u32 txbuf_paddr; struct htt_msdu_ext_desc *ext_desc = NULL; spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); spin_unlock_bh(&htt->tx_lock); if (res < 0) goto err; msdu_id = res; prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); txbuf = &htt->txbuf.vaddr[msdu_id]; txbuf_paddr = htt->txbuf.paddr + (sizeof(struct ath10k_htt_txbuf) * msdu_id); if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && txmode == ATH10K_HW_TXRX_RAW && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) { res = -EIO; goto err_free_msdu_id; } if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) freq = ar->scan.roc_freq; switch (txmode) { case ATH10K_HW_TXRX_RAW: case ATH10K_HW_TXRX_NATIVE_WIFI: flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; /* pass through */ case ATH10K_HW_TXRX_ETHERNET: if (ar->hw_params.continuous_frag_desc) { memset(&htt->frag_desc.vaddr[msdu_id], 0, sizeof(struct htt_msdu_ext_desc)); frags = (struct htt_data_tx_desc_frag *) &htt->frag_desc.vaddr[msdu_id].frags; ext_desc = &htt->frag_desc.vaddr[msdu_id]; frags[0].tword_addr.paddr_lo = __cpu_to_le32(skb_cb->paddr); frags[0].tword_addr.paddr_hi = 0; frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); frags_paddr = htt->frag_desc.paddr + (sizeof(struct htt_msdu_ext_desc) * msdu_id); } else { frags = txbuf->frags; frags[0].dword_addr.paddr = __cpu_to_le32(skb_cb->paddr); frags[0].dword_addr.len = __cpu_to_le32(msdu->len); frags[1].dword_addr.paddr = 0; frags[1].dword_addr.len = 0; frags_paddr = txbuf_paddr; } flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); break; case ATH10K_HW_TXRX_MGMT: flags0 |= SM(ATH10K_HW_TXRX_MGMT, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; frags_paddr = skb_cb->paddr; break; } /* Normally all commands go through HTC which manages tx credits for * each endpoint and notifies when tx is completed. * * HTT endpoint is creditless so there's no need to care about HTC * flags. In that case it is trivial to fill the HTC header here. * * MSDU transmission is considered completed upon HTT event. This * implies no relevant resources can be freed until after the event is * received. That's why HTC tx completion handler itself is ignored by * setting NULL to transfer_context for all sg items. * * There is simply no point in pushing HTT TX_FRM through HTC tx path * as it's a waste of resources. By bypassing HTC it is possible to * avoid extra memory allocations, compress data structures and thus * improve performance. */ txbuf->htc_hdr.eid = htt->eid; txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + sizeof(txbuf->cmd_tx) + prefetch_len); txbuf->htc_hdr.flags = 0; if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); if (msdu->ip_summed == CHECKSUM_PARTIAL && !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; if (ar->hw_params.continuous_frag_desc) ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; } /* Prevent firmware from sending up tx inspection requests. There's * nothing ath10k can do with frames requested for inspection so force * it to simply rely a regular tx completion with discard status. */ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; txbuf->cmd_tx.flags0 = flags0; txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); if (ath10k_mac_tx_frm_has_freq(ar)) { txbuf->cmd_tx.offchan_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID); txbuf->cmd_tx.offchan_tx.freq = __cpu_to_le16(freq); } else { txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); } skb_len = msdu->len; trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n", flags0, flags1, skb_len, msdu_id, frags_paddr, (u32)skb_cb->paddr, vdev_id, tid, freq); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", msdu->data, skb_len); trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); trace_ath10k_tx_payload(ar, msdu->data, msdu->len); sg_items[0].transfer_id = 0; sg_items[0].transfer_context = NULL; sg_items[0].vaddr = &txbuf->htc_hdr; sg_items[0].paddr = txbuf_paddr + sizeof(txbuf->frags); sg_items[0].len = sizeof(txbuf->htc_hdr) + sizeof(txbuf->cmd_hdr) + sizeof(txbuf->cmd_tx); sg_items[1].transfer_id = 0; sg_items[1].transfer_context = NULL; sg_items[1].vaddr = msdu->data; sg_items[1].paddr = skb_cb->paddr; sg_items[1].len = prefetch_len; res = ath10k_hif_tx_sg(htt->ar, htt->ar->htc.endpoint[htt->eid].ul_pipe_id, sg_items, ARRAY_SIZE(sg_items)); if (res) goto err_unmap_msdu; #ifdef CONFIG_ATH10K_DEBUGFS ar->debug.tx_bytes += skb_len; #endif return 0; err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: ath10k_htt_tx_free_msdu_id(htt, msdu_id); err: return res; }
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); int len = 0; int msdu_id = -1; int res; int skb_len; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); spin_unlock_bh(&htt->tx_lock); if (res < 0) goto err; msdu_id = res; if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; } skb_len = msdu->len; skb_cb->paddr = dma_map_single(dev, msdu->data, skb_len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) { res = -EIO; goto err_free_txdesc; } skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; memset(cmd, 0, len); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); cmd->mgmt_tx.len = __cpu_to_le32(skb_len); cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, skb_len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; #ifdef CONFIG_ATH10K_DEBUGFS ar->debug.tx_bytes += skb_len; #endif return 0; err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_txdesc: dev_kfree_skb_any(txdesc); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err: return res; }
static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) { struct page *block; dma_addr_t phys = 0; int blk_idx = 0; int order, num_of_pages; int dma_enabled; if (mvm->fw_paging_db[0].fw_paging_block) return 0; dma_enabled = is_device_dma_capable(mvm->trans->dev); /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */ BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; mvm->num_of_paging_blk = ((num_of_pages - 1) / NUM_OF_PAGE_PER_GROUP) + 1; mvm->num_of_pages_in_last_blk = num_of_pages - NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1); IWL_DEBUG_FW(mvm, "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n", mvm->num_of_paging_blk, mvm->num_of_pages_in_last_blk); /* allocate block of 4Kbytes for paging CSS */ order = get_order(FW_PAGING_SIZE); block = alloc_pages(GFP_KERNEL, order); if (!block) { /* free all the previous pages since we failed */ iwl_free_fw_paging(mvm); return -ENOMEM; } mvm->fw_paging_db[blk_idx].fw_paging_block = block; mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE; if (dma_enabled) { phys = dma_map_page(mvm->trans->dev, block, 0, PAGE_SIZE << order, DMA_BIDIRECTIONAL); if (dma_mapping_error(mvm->trans->dev, phys)) { /* * free the previous pages and the current one since * we failed to map_page. */ iwl_free_fw_paging(mvm); return -ENOMEM; } mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; } else { mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG | blk_idx << BLOCK_2_EXP_SIZE; } IWL_DEBUG_FW(mvm, "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", order); /* * allocate blocks in dram. * since that CSS allocated in fw_paging_db[0] loop start from index 1 */ for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { /* allocate block of PAGING_BLOCK_SIZE (32K) */ order = get_order(PAGING_BLOCK_SIZE); block = alloc_pages(GFP_KERNEL, order); if (!block) { /* free all the previous pages since we failed */ iwl_free_fw_paging(mvm); return -ENOMEM; } mvm->fw_paging_db[blk_idx].fw_paging_block = block; mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE; if (dma_enabled) { phys = dma_map_page(mvm->trans->dev, block, 0, PAGE_SIZE << order, DMA_BIDIRECTIONAL); if (dma_mapping_error(mvm->trans->dev, phys)) { /* * free the previous pages and the current one * since we failed to map_page. */ iwl_free_fw_paging(mvm); return -ENOMEM; } mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; } else { mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG | blk_idx << BLOCK_2_EXP_SIZE; } IWL_DEBUG_FW(mvm, "Paging: allocated 32K bytes (order %d) for firmware paging.\n", order); } return 0; }
/* * Init JobR independent of platform property detection */ static int caam_jr_init(struct device *dev) { struct caam_drv_private_jr *jrp; dma_addr_t inpbusaddr, outbusaddr; int i, error; jrp = dev_get_drvdata(dev); /* Connect job ring interrupt handler. */ for_each_possible_cpu(i) tasklet_init(&jrp->irqtask[i], caam_jr_dequeue, (unsigned long)dev); error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, "caam-jr", dev); if (error) { dev_err(dev, "can't connect JobR %d interrupt (%d)\n", jrp->ridx, jrp->irq); irq_dispose_mapping(jrp->irq); jrp->irq = 0; return -EINVAL; } error = caam_reset_hw_jr(dev); if (error) return error; jrp->inpring = kzalloc(sizeof(dma_addr_t) * JOBR_DEPTH, GFP_KERNEL | GFP_DMA); jrp->outring = kzalloc(sizeof(struct jr_outentry) * JOBR_DEPTH, GFP_KERNEL | GFP_DMA); jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH, GFP_KERNEL); if ((jrp->inpring == NULL) || (jrp->outring == NULL) || (jrp->entinfo == NULL)) { dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx); return -ENOMEM; } for (i = 0; i < JOBR_DEPTH; i++) jrp->entinfo[i].desc_addr_dma = !0; /* Setup rings */ inpbusaddr = dma_map_single(dev, jrp->inpring, sizeof(u32 *) * JOBR_DEPTH, DMA_TO_DEVICE); if (dma_mapping_error(dev, inpbusaddr)) { dev_err(dev, "caam_jr_init(): can't map input ring\n"); kfree(jrp->inpring); kfree(jrp->outring); kfree(jrp->entinfo); return -EIO; } outbusaddr = dma_map_single(dev, jrp->outring, sizeof(struct jr_outentry) * JOBR_DEPTH, DMA_FROM_DEVICE); if (dma_mapping_error(dev, outbusaddr)) { dev_err(dev, "caam_jr_init(): can't map output ring\n"); dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH, DMA_TO_DEVICE); kfree(jrp->inpring); kfree(jrp->outring); kfree(jrp->entinfo); return -EIO; } jrp->inp_ring_write_index = 0; jrp->out_ring_read_index = 0; jrp->head = 0; jrp->tail = 0; wr_reg64(&jrp->rregs->inpring_base, inpbusaddr); wr_reg64(&jrp->rregs->outring_base, outbusaddr); wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH); wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH); jrp->ringsize = JOBR_DEPTH; spin_lock_init(&jrp->inplock); spin_lock_init(&jrp->outlock); /* Select interrupt coalescing parameters */ setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC | (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); jrp->assign = JOBR_UNASSIGNED; return 0; }
static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); struct greth_bd *bdp; u32 status, dma_addr; int curr_tx, nr_frags, i, err = NETDEV_TX_OK; unsigned long flags; u16 tx_last; nr_frags = skb_shinfo(skb)->nr_frags; tx_last = greth->tx_last; rmb(); /* tx_last is updated by the poll task */ if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) { netif_stop_queue(dev); err = NETDEV_TX_BUSY; goto out; } if (netif_msg_pktdata(greth)) greth_print_tx_packet(skb); if (unlikely(skb->len > MAX_FRAME_SIZE)) { dev->stats.tx_errors++; goto out; } /* Save skb pointer. */ greth->tx_skbuff[greth->tx_next] = skb; /* Linear buf */ if (nr_frags != 0) status = GRETH_TXBD_MORE; else status = GRETH_BD_IE; if (skb->ip_summed == CHECKSUM_PARTIAL) status |= GRETH_TXBD_CSALL; status |= skb_headlen(skb) & GRETH_BD_LEN; if (greth->tx_next == GRETH_TXBD_NUM_MASK) status |= GRETH_BD_WR; bdp = greth->tx_bd_base + greth->tx_next; greth_write_bd(&bdp->stat, status); dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(greth->dev, dma_addr))) goto map_error; greth_write_bd(&bdp->addr, dma_addr); curr_tx = NEXT_TX(greth->tx_next); /* Frags */ for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; greth->tx_skbuff[curr_tx] = NULL; bdp = greth->tx_bd_base + curr_tx; status = GRETH_BD_EN; if (skb->ip_summed == CHECKSUM_PARTIAL) status |= GRETH_TXBD_CSALL; status |= skb_frag_size(frag) & GRETH_BD_LEN; /* Wrap around descriptor ring */ if (curr_tx == GRETH_TXBD_NUM_MASK) status |= GRETH_BD_WR; /* More fragments left */ if (i < nr_frags - 1) status |= GRETH_TXBD_MORE; else status |= GRETH_BD_IE; /* enable IRQ on last fragment */ greth_write_bd(&bdp->stat, status); dma_addr = skb_frag_dma_map(greth->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(greth->dev, dma_addr))) goto frag_map_error; greth_write_bd(&bdp->addr, dma_addr); curr_tx = NEXT_TX(curr_tx); } wmb(); /* Enable the descriptor chain by enabling the first descriptor */ bdp = greth->tx_bd_base + greth->tx_next; greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ greth->tx_next = curr_tx; greth_enable_tx_and_irq(greth); spin_unlock_irqrestore(&greth->devlock, flags); return NETDEV_TX_OK; frag_map_error: /* Unmap SKB mappings that succeeded and disable descriptor */ for (i = 0; greth->tx_next + i != curr_tx; i++) { bdp = greth->tx_bd_base + greth->tx_next + i; dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), greth_read_bd(&bdp->stat) & GRETH_BD_LEN, DMA_TO_DEVICE); greth_write_bd(&bdp->stat, 0); } map_error: if (net_ratelimit()) dev_warn(greth->dev, "Could not create TX DMA mapping\n"); dev_kfree_skb(skb); out: return err; }
/* get a split ipad/opad key Split key generation----------------------------------------------- [00] 0xb0810008 jobdesc: stidx=1 share=never len=8 [01] 0x04000014 key: class2->keyreg len=20 @0xffe01000 [03] 0x84410014 operation: cls2-op sha1 hmac init dec [04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm [05] 0xa4000001 jump: class2 local all ->1 [06] [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 @0xffe04000 */ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, int split_key_pad_len, const u8 *key_in, u32 keylen, u32 alg_op) { u32 *desc; struct split_key_result result; dma_addr_t dma_addr_in, dma_addr_out; int ret = -ENOMEM; desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); if (!desc) { dev_err(jrdev, "unable to allocate key input memory\n"); return ret; } dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, dma_addr_in)) { dev_err(jrdev, "unable to map key input memory\n"); goto out_free; } dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len, DMA_FROM_DEVICE); if (dma_mapping_error(jrdev, dma_addr_out)) { dev_err(jrdev, "unable to map key output memory\n"); goto out_unmap_in; } init_job_desc(desc, 0); append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); /* Sets MDHA up into an HMAC-INIT */ append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT); /* * do a FIFO_LOAD of zero, this will trigger the internal key expansion * into both pads inside MDHA */ append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); /* * FIFO_STORE with the explicit split-key content store * (0x26 output type) */ append_fifo_store(desc, dma_addr_out, split_key_len, LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); #ifdef DEBUG print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif result.err = 0; init_completion(&result.completion); ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); if (!ret) { /* in progress */ wait_for_completion_interruptible(&result.completion); ret = result.err; #ifdef DEBUG print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key_out, split_key_pad_len, 1); #endif } dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len, DMA_FROM_DEVICE); out_unmap_in: dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE); out_free: kfree(desc); return ret; }
static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev) { struct port *port = dev_to_port(dev); unsigned int txreadyq = port->plat->txreadyq; int len, offset, bytes, n; void *mem; u32 phys; struct desc *desc; #if DEBUG_TX printk(KERN_DEBUG "%s: hss_hdlc_xmit\n", dev->name); #endif if (unlikely(skb->len > HDLC_MAX_MRU)) { dev_kfree_skb(skb); dev->stats.tx_errors++; return NETDEV_TX_OK; } debug_pkt(dev, "hss_hdlc_xmit", skb->data, skb->len); len = skb->len; #ifdef __ARMEB__ offset = 0; bytes = len; mem = skb->data; #else offset = (int)skb->data & 3; bytes = ALIGN(offset + len, 4); if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); dev_kfree_skb(skb); #endif phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); if (dma_mapping_error(&dev->dev, phys)) { #ifdef __ARMEB__ dev_kfree_skb(skb); #else kfree(mem); #endif dev->stats.tx_dropped++; return NETDEV_TX_OK; } n = queue_get_desc(txreadyq, port, 1); BUG_ON(n < 0); desc = tx_desc_ptr(port, n); #ifdef __ARMEB__ port->tx_buff_tab[n] = skb; #else port->tx_buff_tab[n] = mem; #endif desc->data = phys + offset; desc->buf_len = desc->pkt_len = len; wmb(); queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc); if (qmgr_stat_below_low_watermark(txreadyq)) { #if DEBUG_TX printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name); #endif netif_stop_queue(dev); if (!qmgr_stat_below_low_watermark(txreadyq)) { #if DEBUG_TX printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n", dev->name); #endif netif_wake_queue(dev); } } #if DEBUG_TX printk(KERN_DEBUG "%s: hss_hdlc_xmit end\n", dev->name); #endif return NETDEV_TX_OK; }
static int hss_hdlc_poll(struct napi_struct *napi, int budget) { struct port *port = container_of(napi, struct port, napi); struct net_device *dev = port->netdev; unsigned int rxq = queue_ids[port->id].rx; unsigned int rxfreeq = queue_ids[port->id].rxfree; int received = 0; #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name); #endif while (received < budget) { struct sk_buff *skb; struct desc *desc; int n; #ifdef __ARMEB__ struct sk_buff *temp; u32 phys; #endif if ((n = queue_get_desc(rxq, port, 0)) < 0) { #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_poll" " napi_complete\n", dev->name); #endif napi_complete(napi); qmgr_enable_irq(rxq); if (!qmgr_stat_empty(rxq) && napi_reschedule(napi)) { #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_poll" " napi_reschedule succeeded\n", dev->name); #endif qmgr_disable_irq(rxq); continue; } #if DEBUG_RX printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n", dev->name); #endif return received; } desc = rx_desc_ptr(port, n); #if 0 if (desc->error_count) printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X" " errors %u\n", dev->name, desc->status, desc->error_count); #endif skb = NULL; switch (desc->status) { case 0: #ifdef __ARMEB__ if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) { phys = dma_map_single(&dev->dev, skb->data, RX_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&dev->dev, phys)) { dev_kfree_skb(skb); skb = NULL; } } #else skb = netdev_alloc_skb(dev, desc->pkt_len); #endif if (!skb) dev->stats.rx_dropped++; break; case ERR_HDLC_ALIGN: case ERR_HDLC_ABORT: dev->stats.rx_frame_errors++; dev->stats.rx_errors++; break; case ERR_HDLC_FCS: dev->stats.rx_crc_errors++; dev->stats.rx_errors++; break; case ERR_HDLC_TOO_LONG: dev->stats.rx_length_errors++; dev->stats.rx_errors++; break; default: netdev_err(dev, "hss_hdlc_poll: status 0x%02X errors %u\n", desc->status, desc->error_count); dev->stats.rx_errors++; } if (!skb) { desc->buf_len = RX_SIZE; desc->pkt_len = desc->status = 0; queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); continue; } #ifdef __ARMEB__ temp = skb; skb = port->rx_buff_tab[n]; dma_unmap_single(&dev->dev, desc->data, RX_SIZE, DMA_FROM_DEVICE); #else dma_sync_single_for_cpu(&dev->dev, desc->data, RX_SIZE, DMA_FROM_DEVICE); memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], ALIGN(desc->pkt_len, 4) / 4); #endif skb_put(skb, desc->pkt_len); debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len); skb->protocol = hdlc_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; netif_receive_skb(skb); #ifdef __ARMEB__ port->rx_buff_tab[n] = temp; desc->data = phys; #endif desc->buf_len = RX_SIZE; desc->pkt_len = 0; queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); received++; } #if DEBUG_RX printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n"); #endif return received; }
static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev_priv(netdev); union ibmveth_buf_desc desc; unsigned long lpar_rc; unsigned long correlator; unsigned long flags; unsigned int retry_count; unsigned int tx_dropped = 0; unsigned int tx_bytes = 0; unsigned int tx_packets = 0; unsigned int tx_send_failed = 0; unsigned int tx_map_failed = 0; int used_bounce = 0; unsigned long data_dma_addr; desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; if (skb->ip_summed == CHECKSUM_PARTIAL && ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { ibmveth_error_printk("tx: failed to checksum packet\n"); tx_dropped++; goto out; } if (skb->ip_summed == CHECKSUM_PARTIAL) { unsigned char *buf = skb_transport_header(skb) + skb->csum_offset; desc.fields.flags_len |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD); buf[0] = 0; buf[1] = 0; } data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) { if (!firmware_has_feature(FW_FEATURE_CMO)) ibmveth_error_printk("tx: unable to map xmit buffer\n"); skb_copy_from_linear_data(skb, adapter->bounce_buffer, skb->len); desc.fields.address = adapter->bounce_buffer_dma; tx_map_failed++; used_bounce = 1; wmb(); } else desc.fields.address = data_dma_addr; correlator = 0; retry_count = 1024; do { lpar_rc = h_send_logical_lan(adapter->vdev->unit_address, desc.desc, 0, 0, 0, 0, 0, correlator, &correlator); } while ((lpar_rc == H_BUSY) && (retry_count--)); if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n", (desc.fields.flags_len & IBMVETH_BUF_VALID) ? 1 : 0, skb->len, desc.fields.address); tx_send_failed++; tx_dropped++; } else { tx_packets++; tx_bytes += skb->len; netdev->trans_start = jiffies; } if (!used_bounce) dma_unmap_single(&adapter->vdev->dev, data_dma_addr, skb->len, DMA_TO_DEVICE); out: spin_lock_irqsave(&adapter->stats_lock, flags); netdev->stats.tx_dropped += tx_dropped; netdev->stats.tx_bytes += tx_bytes; netdev->stats.tx_packets += tx_packets; adapter->tx_send_failed += tx_send_failed; adapter->tx_map_failed += tx_map_failed; spin_unlock_irqrestore(&adapter->stats_lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; }
/** * nfp_net_tx() - Main transmit entry point * @skb: SKB to transmit * @netdev: netdev structure * * Return: NETDEV_TX_OK on success. */ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); const struct skb_frag_struct *frag; struct nfp_net_r_vector *r_vec; struct nfp_net_tx_desc *txd, txdg; struct nfp_net_tx_buf *txbuf; struct nfp_net_tx_ring *tx_ring; struct netdev_queue *nd_q; dma_addr_t dma_addr; unsigned int fsize; int f, nr_frags; int wr_idx; u16 qidx; qidx = skb_get_queue_mapping(skb); tx_ring = &nn->tx_rings[qidx]; r_vec = tx_ring->r_vec; nd_q = netdev_get_tx_queue(nn->netdev, qidx); nr_frags = skb_shinfo(skb)->nr_frags; if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) { nn_warn_ratelimit(nn, "TX ring %d busy. wrp=%u rdp=%u\n", qidx, tx_ring->wr_p, tx_ring->rd_p); netif_tx_stop_queue(nd_q); u64_stats_update_begin(&r_vec->tx_sync); r_vec->tx_busy++; u64_stats_update_end(&r_vec->tx_sync); return NETDEV_TX_BUSY; } /* Start with the head skbuf */ dma_addr = dma_map_single(&nn->pdev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (dma_mapping_error(&nn->pdev->dev, dma_addr)) goto err_free; wr_idx = tx_ring->wr_p % tx_ring->cnt; /* Stash the soft descriptor of the head then initialize it */ txbuf = &tx_ring->txbufs[wr_idx]; txbuf->skb = skb; txbuf->dma_addr = dma_addr; txbuf->fidx = -1; txbuf->pkt_cnt = 1; txbuf->real_len = skb->len; /* Build TX descriptor */ txd = &tx_ring->txds[wr_idx]; txd->offset_eop = (nr_frags == 0) ? PCIE_DESC_TX_EOP : 0; txd->dma_len = cpu_to_le16(skb_headlen(skb)); nfp_desc_set_dma_addr(txd, dma_addr); txd->data_len = cpu_to_le16(skb->len); txd->flags = 0; txd->mss = 0; txd->l4_offset = 0; nfp_net_tx_tso(nn, r_vec, txbuf, txd, skb); nfp_net_tx_csum(nn, r_vec, txbuf, txd, skb); if (skb_vlan_tag_present(skb) && nn->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { txd->flags |= PCIE_DESC_TX_VLAN; txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); } /* Gather DMA */ if (nr_frags > 0) { /* all descs must match except for in addr, length and eop */ txdg = *txd; for (f = 0; f < nr_frags; f++) { frag = &skb_shinfo(skb)->frags[f]; fsize = skb_frag_size(frag); dma_addr = skb_frag_dma_map(&nn->pdev->dev, frag, 0, fsize, DMA_TO_DEVICE); if (dma_mapping_error(&nn->pdev->dev, dma_addr)) goto err_unmap; wr_idx = (wr_idx + 1) % tx_ring->cnt; tx_ring->txbufs[wr_idx].skb = skb; tx_ring->txbufs[wr_idx].dma_addr = dma_addr; tx_ring->txbufs[wr_idx].fidx = f; txd = &tx_ring->txds[wr_idx]; *txd = txdg; txd->dma_len = cpu_to_le16(fsize); nfp_desc_set_dma_addr(txd, dma_addr); txd->offset_eop = (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0; } u64_stats_update_begin(&r_vec->tx_sync); r_vec->tx_gather++; u64_stats_update_end(&r_vec->tx_sync); } netdev_tx_sent_queue(nd_q, txbuf->real_len); tx_ring->wr_p += nr_frags + 1; if (nfp_net_tx_ring_should_stop(tx_ring)) nfp_net_tx_ring_stop(nd_q, tx_ring); tx_ring->wr_ptr_add += nr_frags + 1; if (!skb->xmit_more || netif_xmit_stopped(nd_q)) { /* force memory write before we let HW know */ wmb(); nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add); tx_ring->wr_ptr_add = 0; } skb_tx_timestamp(skb); return NETDEV_TX_OK; err_unmap: --f; while (f >= 0) { frag = &skb_shinfo(skb)->frags[f]; dma_unmap_page(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr, skb_frag_size(frag), DMA_TO_DEVICE); tx_ring->txbufs[wr_idx].skb = NULL; tx_ring->txbufs[wr_idx].dma_addr = 0; tx_ring->txbufs[wr_idx].fidx = -2; wr_idx = wr_idx - 1; if (wr_idx < 0) wr_idx += tx_ring->cnt; } dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr, skb_headlen(skb), DMA_TO_DEVICE); tx_ring->txbufs[wr_idx].skb = NULL; tx_ring->txbufs[wr_idx].dma_addr = 0; tx_ring->txbufs[wr_idx].fidx = -2; err_free: nn_warn_ratelimit(nn, "Failed to map DMA TX buffer\n"); u64_stats_update_begin(&r_vec->tx_sync); r_vec->tx_errors++; u64_stats_update_end(&r_vec->tx_sync); dev_kfree_skb_any(skb); return NETDEV_TX_OK; }
static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) { u32 i; u32 count = pool->size - atomic_read(&pool->available); u32 buffers_added = 0; struct sk_buff *skb; unsigned int free_index, index; u64 correlator; unsigned long lpar_rc; dma_addr_t dma_addr; mb(); for(i = 0; i < count; ++i) { union ibmveth_buf_desc desc; skb = alloc_skb(pool->buff_size, GFP_ATOMIC); if(!skb) { ibmveth_debug_printk("replenish: unable to allocate skb\n"); adapter->replenish_no_mem++; break; } free_index = pool->consumer_index; pool->consumer_index = (pool->consumer_index + 1) % pool->size; index = pool->free_map[free_index]; ibmveth_assert(index != IBM_VETH_INVALID_MAP); ibmveth_assert(pool->skbuff[index] == NULL); dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, pool->buff_size, DMA_FROM_DEVICE); if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) goto failure; pool->free_map[free_index] = IBM_VETH_INVALID_MAP; pool->dma_addr[index] = dma_addr; pool->skbuff[index] = skb; correlator = ((u64)pool->index << 32) | index; *(u64*)skb->data = correlator; desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; desc.fields.address = dma_addr; lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); if (lpar_rc != H_SUCCESS) goto failure; else { buffers_added++; adapter->replenish_add_buff_success++; } } mb(); atomic_add(buffers_added, &(pool->available)); return; failure: pool->free_map[free_index] = index; pool->skbuff[index] = NULL; if (pool->consumer_index == 0) pool->consumer_index = pool->size - 1; else pool->consumer_index--; if (!dma_mapping_error(&adapter->vdev->dev, dma_addr)) dma_unmap_single(&adapter->vdev->dev, pool->dma_addr[index], pool->buff_size, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); adapter->replenish_add_buff_failure++; mb(); atomic_add(buffers_added, &(pool->available)); }
/** * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, * -EBUSY if the queue is full, -EIO if it cannot map the caller's * descriptor. * @dev: device of the job ring to be used. This device should have * been assigned prior by caam_jr_register(). * @desc: points to a job descriptor that execute our request. All * descriptors (and all referenced data) must be in a DMAable * region, and all data references must be physical addresses * accessible to CAAM (i.e. within a PAMU window granted * to it). * @cbk: pointer to a callback function to be invoked upon completion * of this request. This has the form: * callback(struct device *dev, u32 *desc, u32 stat, void *arg) * where: * @dev: contains the job ring device that processed this * response. * @desc: descriptor that initiated the request, same as * "desc" being argued to caam_jr_enqueue(). * @status: untranslated status received from CAAM. See the * reference manual for a detailed description of * error meaning, or see the JRSTA definitions in the * register header file * @areq: optional pointer to an argument passed with the * original request * @areq: optional pointer to a user argument for use at callback * time. **/ int caam_jr_enqueue(struct device *dev, u32 *desc, void (*cbk)(struct device *dev, u32 *desc, u32 status, void *areq), void *areq) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); struct caam_jrentry_info *head_entry; unsigned long flags; int head, tail, desc_size; dma_addr_t desc_dma, inpbusaddr; desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32); desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE); if (dma_mapping_error(dev, desc_dma)) { dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n"); return -EIO; } dma_sync_single_for_device(dev, desc_dma, desc_size, DMA_TO_DEVICE); inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); dma_sync_single_for_device(dev, inpbusaddr, sizeof(dma_addr_t) * JOBR_DEPTH, DMA_TO_DEVICE); spin_lock_irqsave(&jrp->inplock, flags); head = jrp->head; tail = ACCESS_ONCE(jrp->tail); if (!rd_reg32(&jrp->rregs->inpring_avail) || CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { spin_unlock_irqrestore(&jrp->inplock, flags); dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); return -EBUSY; } head_entry = &jrp->entinfo[head]; head_entry->desc_addr_virt = desc; head_entry->desc_size = desc_size; head_entry->callbk = (void *)cbk; head_entry->cbkarg = areq; head_entry->desc_addr_dma = desc_dma; jrp->inpring[jrp->inp_ring_write_index] = desc_dma; dma_sync_single_for_device(dev, inpbusaddr, sizeof(dma_addr_t) * JOBR_DEPTH, DMA_TO_DEVICE); smp_wmb(); jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) & (JOBR_DEPTH - 1); jrp->head = (head + 1) & (JOBR_DEPTH - 1); wmb(); wr_reg32(&jrp->rregs->inpring_jobadd, 1); spin_unlock_irqrestore(&jrp->inplock, flags); return 0; }
static int ibmveth_open(struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev_priv(netdev); u64 mac_address = 0; int rxq_entries = 1; unsigned long lpar_rc; int rc; union ibmveth_buf_desc rxq_desc; int i; struct device *dev; ibmveth_debug_printk("open starting\n"); napi_enable(&adapter->napi); for(i = 0; i<IbmVethNumBufferPools; i++) rxq_entries += adapter->rx_buff_pool[i].size; adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); ibmveth_cleanup(adapter); napi_disable(&adapter->napi); return -ENOMEM; } adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries; adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL); if(!adapter->rx_queue.queue_addr) { ibmveth_error_printk("unable to allocate rx queue pages\n"); ibmveth_cleanup(adapter); napi_disable(&adapter->napi); return -ENOMEM; } dev = &adapter->vdev->dev; adapter->buffer_list_dma = dma_map_single(dev, adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); adapter->filter_list_dma = dma_map_single(dev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); adapter->rx_queue.queue_dma = dma_map_single(dev, adapter->rx_queue.queue_addr, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || (dma_mapping_error(dev, adapter->filter_list_dma)) || (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { ibmveth_error_printk("unable to map filter or buffer list pages\n"); ibmveth_cleanup(adapter); napi_disable(&adapter->napi); return -ENOMEM; } adapter->rx_queue.index = 0; adapter->rx_queue.num_slots = rxq_entries; adapter->rx_queue.toggle = 1; memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); mac_address = mac_address >> 16; rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len; rxq_desc.fields.address = adapter->rx_queue.queue_dma; ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr); ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); if(lpar_rc != H_SUCCESS) { ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); ibmveth_error_printk("buffer TCE:0x%llx filter TCE:0x%llx rxq desc:0x%llx MAC:0x%llx\n", adapter->buffer_list_dma, adapter->filter_list_dma, rxq_desc.desc, mac_address); ibmveth_cleanup(adapter); napi_disable(&adapter->napi); return -ENONET; } for(i = 0; i<IbmVethNumBufferPools; i++) { if(!adapter->rx_buff_pool[i].active) continue; if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { ibmveth_error_printk("unable to alloc pool\n"); adapter->rx_buff_pool[i].active = 0; ibmveth_cleanup(adapter); napi_disable(&adapter->napi); return -ENOMEM ; } } ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq); if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) { ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc); do { rc = h_free_logical_lan(adapter->vdev->unit_address); } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); ibmveth_cleanup(adapter); napi_disable(&adapter->napi); return rc; } adapter->bounce_buffer = kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); if (!adapter->bounce_buffer) { ibmveth_error_printk("unable to allocate bounce buffer\n"); ibmveth_cleanup(adapter); napi_disable(&adapter->napi); return -ENOMEM; } adapter->bounce_buffer_dma = dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { ibmveth_error_printk("unable to map bounce buffer\n"); ibmveth_cleanup(adapter); napi_disable(&adapter->napi); return -ENOMEM; } ibmveth_debug_printk("initial replenish cycle\n"); ibmveth_interrupt(netdev->irq, netdev); netif_start_queue(netdev); ibmveth_debug_printk("open complete\n"); return 0; }
static int greth_rx_gbit(struct net_device *dev, int limit) { struct greth_private *greth; struct greth_bd *bdp; struct sk_buff *skb, *newskb; int pkt_len; int bad, count = 0; u32 status, dma_addr; unsigned long flags; greth = netdev_priv(dev); for (count = 0; count < limit; ++count) { bdp = greth->rx_bd_base + greth->rx_cur; skb = greth->rx_skbuff[greth->rx_cur]; GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); mb(); status = greth_read_bd(&bdp->stat); bad = 0; if (status & GRETH_BD_EN) break; /* Check status for errors. */ if (unlikely(status & GRETH_RXBD_STATUS)) { if (status & GRETH_RXBD_ERR_FT) { dev->stats.rx_length_errors++; bad = 1; } else if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) { dev->stats.rx_frame_errors++; bad = 1; } else if (status & GRETH_RXBD_ERR_CRC) { dev->stats.rx_crc_errors++; bad = 1; } } /* Allocate new skb to replace current, not needed if the * current skb can be reused */ if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) { skb_reserve(newskb, NET_IP_ALIGN); dma_addr = dma_map_single(greth->dev, newskb->data, MAX_FRAME_SIZE + NET_IP_ALIGN, DMA_FROM_DEVICE); if (!dma_mapping_error(greth->dev, dma_addr)) { /* Process the incoming frame. */ pkt_len = status & GRETH_BD_LEN; dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), MAX_FRAME_SIZE + NET_IP_ALIGN, DMA_FROM_DEVICE); if (netif_msg_pktdata(greth)) greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len); skb_put(skb, pkt_len); if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; netif_receive_skb(skb); greth->rx_skbuff[greth->rx_cur] = newskb; greth_write_bd(&bdp->addr, dma_addr); } else { if (net_ratelimit()) dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n"); dev_kfree_skb(newskb); /* reusing current skb, so it is a drop */ dev->stats.rx_dropped++; } } else if (bad) { /* Bad Frame transfer, the skb is reused */ dev->stats.rx_dropped++; } else { /* Failed Allocating a new skb. This is rather stupid * but the current "filled" skb is reused, as if * transfer failure. One could argue that RX descriptor * table handling should be divided into cleaning and * filling as the TX part of the driver */ if (net_ratelimit()) dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n"); /* reusing current skb, so it is a drop */ dev->stats.rx_dropped++; } status = GRETH_BD_EN | GRETH_BD_IE; if (greth->rx_cur == GRETH_RXBD_NUM_MASK) { status |= GRETH_BD_WR; } wmb(); greth_write_bd(&bdp->stat, status); spin_lock_irqsave(&greth->devlock, flags); greth_enable_rx(greth); spin_unlock_irqrestore(&greth->devlock, flags); greth->rx_cur = NEXT_RX(greth->rx_cur); } return count; }
static int single_step_get_dev_desc(struct usb_hcd *hcd, u8 port) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct urb *urb; struct usb_device *hdev; struct usb_device *udev = NULL; struct usb_hub *hub = NULL; struct usb_ctrlrequest setup_packet; char data_buffer[USB_DT_DEVICE_SIZE]; int ret = 0; xhci_info(xhci, "Testing SINGLE_STEP_GET_DEV_DESC\n"); hdev = hcd->self.root_hub; if (!hdev) { xhci_err(xhci, "EHSET: root_hub pointer is NULL\n"); ret = -EPIPE; goto error; } hub = usb_hub_to_struct_hub(hdev); if (hub == NULL) { xhci_err(xhci, "EHSET: hub pointer is NULL\n"); ret = -EPIPE; goto error; } if (hub->ports[port]->child != NULL) udev = hub->ports[port]->child; if (!udev) { xhci_err(xhci, "EHSET: device available is NOT found\n"); ret = -EPIPE; goto error; } urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { xhci_err(xhci, "urb : get alloc failed\n"); ret = -ENOMEM; goto error; } setup_packet.bRequestType = USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE; setup_packet.bRequest = USB_REQ_GET_DESCRIPTOR; setup_packet.wValue = (USB_DT_DEVICE << 8); setup_packet.wIndex = 0; setup_packet.wLength = USB_DT_DEVICE_SIZE; urb->dev = udev; urb->hcpriv = udev->ep0.hcpriv; urb->setup_packet = (unsigned char *)&setup_packet; urb->transfer_buffer = data_buffer; urb->transfer_buffer_length = USB_DT_DEVICE_SIZE; urb->actual_length = 0; urb->transfer_flags = URB_DIR_IN | URB_HCD_DRIVER_TEST; urb->pipe = usb_rcvctrlpipe(udev, 0); urb->ep = usb_pipe_endpoint(udev, urb->pipe); if (!urb->ep) { xhci_err(xhci, "urb->ep is NULL\n"); ret = -ENOENT; goto error_urb_ep; } urb->setup_dma = dma_map_single( hcd->self.controller, urb->setup_packet, sizeof(struct usb_ctrlrequest), DMA_TO_DEVICE); if (dma_mapping_error(hcd->self.controller, urb->setup_dma)) { xhci_err(xhci, "setup : dma_map_single failed\n"); ret = -EBUSY; goto error_setup_dma; } urb->transfer_dma = dma_map_single( hcd->self.controller, urb->transfer_buffer, urb->transfer_buffer_length, DMA_TO_DEVICE); if (dma_mapping_error(hcd->self.controller, urb->transfer_dma)) { xhci_err(xhci, "xfer : dma_map_single failed\n"); ret = -EBUSY; goto error_xfer_dma; } ret = xhci_urb_enqueue_single_step(hcd, urb, GFP_ATOMIC, 1); dma_unmap_single(hcd->self.controller, urb->transfer_dma, sizeof(struct usb_ctrlrequest), DMA_TO_DEVICE); error_xfer_dma: dma_unmap_single(hcd->self.controller, urb->setup_dma, sizeof(struct usb_ctrlrequest), DMA_TO_DEVICE); error_setup_dma: error_urb_ep: usb_free_urb(urb); error: return ret; }
static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_buf *bf; struct ath_vif *avp; struct sk_buff *skb; struct ath_txq *cabq; struct ieee80211_tx_info *info; int cabq_depth; ath9k_reset_beacon_status(sc); avp = (void *)vif->drv_priv; cabq = sc->beacon.cabq; if ((avp->av_bcbuf == NULL) || !avp->is_bslot_active) return NULL; /* Release the old beacon first */ bf = avp->av_bcbuf; skb = bf->bf_mpdu; if (skb) { dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); bf->bf_buf_addr = 0; } /* Get a new beacon from mac80211 */ skb = ieee80211_beacon_get(hw, vif); bf->bf_mpdu = skb; if (skb == NULL) return NULL; ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp = avp->tsf_adjust; info = IEEE80211_SKB_CB(skb); if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { /* * TODO: make sure the seq# gets assigned properly (vs. other * TX frames) */ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; sc->tx.seq_no += 0x10; hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); } bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, skb->len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { dev_kfree_skb_any(skb); bf->bf_mpdu = NULL; bf->bf_buf_addr = 0; ath_err(common, "dma_mapping_error on beaconing\n"); return NULL; } skb = ieee80211_get_buffered_bc(hw, vif); /* * if the CABQ traffic from previous DTIM is pending and the current * beacon is also a DTIM. * 1) if there is only one vif let the cab traffic continue. * 2) if there are more than one vif and we are using staggered * beacons, then drain the cabq by dropping all the frames in * the cabq so that the current vifs cab traffic can be scheduled. */ spin_lock_bh(&cabq->axq_lock); cabq_depth = cabq->axq_depth; spin_unlock_bh(&cabq->axq_lock); if (skb && cabq_depth) { if (sc->nvifs > 1) { ath_dbg(common, BEACON, "Flushing previous cabq traffic\n"); ath_draintxq(sc, cabq, false); } } ath_beacon_setup(sc, vif, bf, info->control.rates[0].idx); while (skb) { ath_tx_cabq(hw, skb); skb = ieee80211_get_buffered_bc(hw, vif); } return bf; }