static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, size_t q_size, size_t msg_size) { u8 *pool_start; if (q_size > SIZE_MAX / msg_size) return -EINVAL; pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size, &mq->host_dma, GFP_KERNEL); if (!pool_start) return -ENOMEM; c2_mq_rep_init(mq, 0, /* index (currently unknown) */ q_size, msg_size, pool_start, NULL, /* peer (currently unknown) */ C2_MQ_HOST_TARGET); dma_unmap_addr_set(mq, mapping, mq->host_dma); return 0; }
int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel) { struct rdma_cq_setup setup; int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); size += 1; /* one extra page for storing cq-in-err state */ cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); if (!cq->cqid) return -ENOMEM; if (kernel) { cq->sw_queue = kzalloc(size, GFP_KERNEL); if (!cq->sw_queue) return -ENOMEM; } cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size, &(cq->dma_addr), GFP_KERNEL); if (!cq->queue) { kfree(cq->sw_queue); return -ENOMEM; } dma_unmap_addr_set(cq, mapping, cq->dma_addr); memset(cq->queue, 0, size); setup.id = cq->cqid; setup.base_addr = (u64) (cq->dma_addr); setup.size = 1UL << cq->size_log2; setup.credits = 65535; setup.credit_thres = 1; if (rdev_p->t3cdev_p->type != T3A) setup.ovfl_mode = 0; else setup.ovfl_mode = 1; return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); }
static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask, struct sp_chunk **head) { int i; struct sp_chunk *new_head; dma_addr_t dma_addr; new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE, &dma_addr, gfp_mask); if (new_head == NULL) return -ENOMEM; new_head->dma_addr = dma_addr; dma_unmap_addr_set(new_head, mapping, new_head->dma_addr); new_head->next = NULL; new_head->head = 0; /* build list where each index is the next free slot */ for (i = 0; i < (PAGE_SIZE - sizeof(struct sp_chunk) - sizeof(u16)) / sizeof(u16) - 1; i++) { new_head->shared_ptr[i] = i + 1; } /* terminate list */ new_head->shared_ptr[i] = 0xFFFF; *head = new_head; return 0; }
/** * arc_emac_tx - Starts the data transmission. * @skb: sk_buff pointer that contains data to be Transmitted. * @ndev: Pointer to net_device structure. * * returns: NETDEV_TX_OK, on success * NETDEV_TX_BUSY, if any of the descriptors are not free. * * This function is invoked from upper layers to initiate transmission. */ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); unsigned int len, *txbd_curr = &priv->txbd_curr; struct net_device_stats *stats = &priv->stats; __le32 *info = &priv->txbd[*txbd_curr].info; dma_addr_t addr; if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; len = max_t(unsigned int, ETH_ZLEN, skb->len); /* EMAC still holds this buffer in its possession. * CPU must not modify this buffer descriptor */ if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) { netif_stop_queue(ndev); return NETDEV_TX_BUSY; } addr = dma_map_single(&ndev->dev, (void *)skb->data, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&ndev->dev, addr))) { stats->tx_dropped++; stats->tx_errors++; dev_kfree_skb(skb); return NETDEV_TX_OK; } dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); priv->tx_buff[*txbd_curr].skb = skb; priv->txbd[*txbd_curr].data = cpu_to_le32(addr); /* Make sure pointer to data buffer is set */ wmb(); skb_tx_timestamp(skb); *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); /* Increment index to point to the next BD */ *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; /* Get "info" of the next BD */ info = &priv->txbd[*txbd_curr].info; /* Check if if Tx BD ring is full - next BD is still owned by EMAC */ if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) netif_stop_queue(ndev); arc_reg_set(priv, R_STATUS, TXPL_MASK); return NETDEV_TX_OK; }
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, struct c4iw_dev_ucontext *uctx) { struct fw_ri_res_wr *res_wr; struct fw_ri_res *res; int wr_len; int user = (uctx != &rdev->uctx); struct c4iw_wr_wait wr_wait; int ret; struct sk_buff *skb; cq->cqid = c4iw_get_cqid(rdev, uctx); if (!cq->cqid) { ret = -ENOMEM; goto err1; } if (!user) { cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL); if (!cq->sw_queue) { ret = -ENOMEM; goto err2; } } cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize, &cq->dma_addr, GFP_KERNEL); if (!cq->queue) { ret = -ENOMEM; goto err3; } dma_unmap_addr_set(cq, mapping, cq->dma_addr); memset(cq->queue, 0, cq->memsize); /* build fw_ri_res_wr */ wr_len = sizeof *res_wr + sizeof *res; skb = alloc_skb(wr_len, GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto err4; } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); memset(res_wr, 0, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP(FW_RI_RES_WR) | V_FW_RI_RES_WR_NRES(1) | FW_WR_COMPL(1)); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (unsigned long) &wr_wait; res = res_wr->res; res->u.cq.restype = FW_RI_RES_TYPE_CQ; res->u.cq.op = FW_RI_RES_OP_WRITE; res->u.cq.iqid = cpu_to_be32(cq->cqid); res->u.cq.iqandst_to_iqandstindex = cpu_to_be32( V_FW_RI_RES_WR_IQANUS(0) | V_FW_RI_RES_WR_IQANUD(1) | F_FW_RI_RES_WR_IQANDST | V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids)); res->u.cq.iqdroprss_to_iqesize = cpu_to_be16( F_FW_RI_RES_WR_IQDROPRSS | V_FW_RI_RES_WR_IQPCIECH(2) | V_FW_RI_RES_WR_IQINTCNTTHRESH(0) | F_FW_RI_RES_WR_IQO | V_FW_RI_RES_WR_IQESIZE(1)); res->u.cq.iqsize = cpu_to_be16(cq->size); res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr); c4iw_init_wr_wait(&wr_wait); ret = c4iw_ofld_send(rdev, skb); if (ret) goto err4; PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait); ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); if (ret) goto err4; cq->gen = 1; cq->gts = rdev->lldi.gts_reg; cq->rdev = rdev; if (user) { cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) + (cq->cqid << rdev->cqshift); cq->ugts &= PAGE_MASK; } return 0; err4: dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, dma_unmap_addr(cq, mapping)); err3: kfree(cq->sw_queue); err2: c4iw_put_cqid(rdev, cq->cqid, uctx); err1: return ret; }
int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct, union mthca_buf *buf, int *is_direct, struct mthca_pd *pd, int hca_write, struct mthca_mr *mr) { int err = -ENOMEM; int npages, shift; u64 *dma_list = NULL; dma_addr_t t; int i; if (size <= max_direct) { *is_direct = 1; npages = 1; shift = get_order(size) + PAGE_SHIFT; buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, size, &t, GFP_KERNEL); if (!buf->direct.buf) return -ENOMEM; dma_unmap_addr_set(&buf->direct, mapping, t); memset(buf->direct.buf, 0, size); while (t & ((1 << shift) - 1)) { --shift; npages *= 2; } dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); if (!dma_list) goto err_free; for (i = 0; i < npages; ++i) dma_list[i] = t + i * (1 << shift); } else { *is_direct = 0; npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; shift = PAGE_SHIFT; dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); if (!dma_list) return -ENOMEM; buf->page_list = kmalloc(npages * sizeof *buf->page_list, GFP_KERNEL); if (!buf->page_list) goto err_out; for (i = 0; i < npages; ++i) buf->page_list[i].buf = NULL; for (i = 0; i < npages; ++i) { buf->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, &t, GFP_KERNEL); if (!buf->page_list[i].buf) goto err_free; dma_list[i] = t; dma_unmap_addr_set(&buf->page_list[i], mapping, t); clear_page(buf->page_list[i].buf); } } err = mthca_mr_alloc_phys(dev, pd->pd_num, dma_list, shift, npages, 0, size, MTHCA_MPT_FLAG_LOCAL_READ | (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0), mr); if (err) goto err_free; kfree(dma_list); return 0; err_free: mthca_buf_free(dev, size, buf, *is_direct, NULL); err_out: kfree(dma_list); return err; }
/** * arc_emac_open - Open the network device. * @ndev: Pointer to the network device. * * returns: 0, on success or non-zero error value on failure. * * This function sets the MAC address, requests and enables an IRQ * for the EMAC device and starts the Tx queue. * It also connects to the phy device. */ static int arc_emac_open(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); struct phy_device *phy_dev = ndev->phydev; int i; phy_dev->autoneg = AUTONEG_ENABLE; phy_dev->speed = 0; phy_dev->duplex = 0; linkmode_and(phy_dev->advertising, phy_dev->advertising, phy_dev->supported); priv->last_rx_bd = 0; /* Allocate and set buffers for Rx BD's */ for (i = 0; i < RX_BD_NUM; i++) { dma_addr_t addr; unsigned int *last_rx_bd = &priv->last_rx_bd; struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; rx_buff->skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE); if (unlikely(!rx_buff->skb)) return -ENOMEM; addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&ndev->dev, addr)) { netdev_err(ndev, "cannot dma map\n"); dev_kfree_skb(rx_buff->skb); return -ENOMEM; } dma_unmap_addr_set(rx_buff, addr, addr); dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); rxbd->data = cpu_to_le32(addr); /* Make sure pointer to data buffer is set */ wmb(); /* Return ownership to EMAC */ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; } priv->txbd_curr = 0; priv->txbd_dirty = 0; /* Clean Tx BD's */ memset(priv->txbd, 0, TX_RING_SZ); /* Initialize logical address filter */ arc_reg_set(priv, R_LAFL, 0); arc_reg_set(priv, R_LAFH, 0); /* Set BD ring pointers for device side */ arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma); arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); /* Enable interrupts */ arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); /* Set CONTROL */ arc_reg_set(priv, R_CTRL, (RX_BD_NUM << 24) | /* RX BD table length */ (TX_BD_NUM << 16) | /* TX BD table length */ TXRN_MASK | RXRN_MASK); napi_enable(&priv->napi); /* Enable EMAC */ arc_reg_or(priv, R_CTRL, EN_MASK); phy_start(ndev->phydev); netif_start_queue(ndev); return 0; }
/** * arc_emac_rx - processing of Rx packets. * @ndev: Pointer to the network device. * @budget: How many BDs to process on 1 call. * * returns: Number of processed BDs * * Iterate through Rx BDs and deliver received packages to upper layer. */ static int arc_emac_rx(struct net_device *ndev, int budget) { struct arc_emac_priv *priv = netdev_priv(ndev); unsigned int work_done; for (work_done = 0; work_done < budget; work_done++) { unsigned int *last_rx_bd = &priv->last_rx_bd; struct net_device_stats *stats = &ndev->stats; struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; unsigned int pktlen, info = le32_to_cpu(rxbd->info); struct sk_buff *skb; dma_addr_t addr; if (unlikely((info & OWN_MASK) == FOR_EMAC)) break; /* Make a note that we saw a packet at this BD. * So next time, driver starts from this + 1 */ *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; if (unlikely((info & FIRST_OR_LAST_MASK) != FIRST_OR_LAST_MASK)) { /* We pre-allocate buffers of MTU size so incoming * packets won't be split/chained. */ if (net_ratelimit()) netdev_err(ndev, "incomplete packet received\n"); /* Return ownership to EMAC */ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); stats->rx_errors++; stats->rx_length_errors++; continue; } /* Prepare the BD for next cycle. netif_receive_skb() * only if new skb was allocated and mapped to avoid holes * in the RX fifo. */ skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE); if (unlikely(!skb)) { if (net_ratelimit()) netdev_err(ndev, "cannot allocate skb\n"); /* Return ownership to EMAC */ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); stats->rx_errors++; stats->rx_dropped++; continue; } addr = dma_map_single(&ndev->dev, (void *)skb->data, EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&ndev->dev, addr)) { if (net_ratelimit()) netdev_err(ndev, "cannot map dma buffer\n"); dev_kfree_skb(skb); /* Return ownership to EMAC */ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); stats->rx_errors++; stats->rx_dropped++; continue; } /* unmap previosly mapped skb */ dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); pktlen = info & LEN_MASK; stats->rx_packets++; stats->rx_bytes += pktlen; skb_put(rx_buff->skb, pktlen); rx_buff->skb->dev = ndev; rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev); netif_receive_skb(rx_buff->skb); rx_buff->skb = skb; dma_unmap_addr_set(rx_buff, addr, addr); dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); rxbd->data = cpu_to_le32(addr); /* Make sure pointer to data buffer is set */ wmb(); /* Return ownership to EMAC */ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); } return work_done; }
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct t4_cq *rcq, struct t4_cq *scq, struct c4iw_dev_ucontext *uctx) { int user = (uctx != &rdev->uctx); struct fw_ri_res_wr *res_wr; struct fw_ri_res *res; int wr_len; struct c4iw_wr_wait wr_wait; struct sk_buff *skb; int ret; int eqsize; wq->sq.qid = c4iw_get_qpid(rdev, uctx); if (!wq->sq.qid) return -ENOMEM; wq->rq.qid = c4iw_get_qpid(rdev, uctx); if (!wq->rq.qid) { ret = -ENOMEM; goto free_sq_qid; } if (!user) { wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq, GFP_KERNEL); if (!wq->sq.sw_sq) { ret = -ENOMEM; goto free_rq_qid; } wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq, GFP_KERNEL); if (!wq->rq.sw_rq) { ret = -ENOMEM; goto free_sw_sq; } } /* * RQT must be a power of 2. */ wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size); wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); if (!wq->rq.rqt_hwaddr) { ret = -ENOMEM; goto free_sw_rq; } if (user) { ret = alloc_oc_sq(rdev, &wq->sq); if (ret) goto free_hwaddr; ret = alloc_host_sq(rdev, &wq->sq); if (ret) goto free_sq; } else ret = alloc_host_sq(rdev, &wq->sq); if (ret) goto free_hwaddr; memset(wq->sq.queue, 0, wq->sq.memsize); dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), wq->rq.memsize, &(wq->rq.dma_addr), GFP_KERNEL); if (!wq->rq.queue) goto free_sq; PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", __func__, wq->sq.queue, (unsigned long long)virt_to_phys(wq->sq.queue), wq->rq.queue, (unsigned long long)virt_to_phys(wq->rq.queue)); memset(wq->rq.queue, 0, wq->rq.memsize); dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); wq->db = rdev->lldi.db_reg; wq->gts = rdev->lldi.gts_reg; if (user) { wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) + (wq->sq.qid << rdev->qpshift); wq->sq.udb &= PAGE_MASK; wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) + (wq->rq.qid << rdev->qpshift); wq->rq.udb &= PAGE_MASK; } wq->rdev = rdev; wq->rq.msn = 1; /* build fw_ri_res_wr */ wr_len = sizeof *res_wr + 2 * sizeof *res; skb = alloc_skb(wr_len, GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto free_dma; } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); memset(res_wr, 0, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP(FW_RI_RES_WR) | V_FW_RI_RES_WR_NRES(2) | FW_WR_COMPL(1)); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (unsigned long) &wr_wait; res = res_wr->res; res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; res->u.sqrq.op = FW_RI_RES_OP_WRITE; /* * eqsize is the number of 64B entries plus the status page size. */ eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES; res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) | V_FW_RI_RES_WR_IQID(scq->cqid)); res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( V_FW_RI_RES_WR_DCAEN(0) | V_FW_RI_RES_WR_DCACPU(0) | V_FW_RI_RES_WR_FBMIN(2) | V_FW_RI_RES_WR_FBMAX(2) | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | V_FW_RI_RES_WR_CIDXFTHRESH(0) | V_FW_RI_RES_WR_EQSIZE(eqsize)); res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); res++; res->u.sqrq.restype = FW_RI_RES_TYPE_RQ; res->u.sqrq.op = FW_RI_RES_OP_WRITE; /* * eqsize is the number of 64B entries plus the status page size. */ eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES; res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ V_FW_RI_RES_WR_IQID(rcq->cqid)); res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( V_FW_RI_RES_WR_DCAEN(0) | V_FW_RI_RES_WR_DCACPU(0) | V_FW_RI_RES_WR_FBMIN(2) | V_FW_RI_RES_WR_FBMAX(2) | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | V_FW_RI_RES_WR_CIDXFTHRESH(0) | V_FW_RI_RES_WR_EQSIZE(eqsize)); res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); c4iw_init_wr_wait(&wr_wait); ret = c4iw_ofld_send(rdev, skb); if (ret) goto free_dma; ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__); if (ret) goto free_dma; PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n", __func__, wq->sq.qid, wq->rq.qid, wq->db, (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb); return 0; free_dma: dma_free_coherent(&(rdev->lldi.pdev->dev), wq->rq.memsize, wq->rq.queue, dma_unmap_addr(&wq->rq, mapping)); free_sq: dealloc_sq(rdev, &wq->sq); free_hwaddr: c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); free_sw_rq: kfree(wq->rq.sw_rq); free_sw_sq: kfree(wq->sq.sw_sq); free_rq_qid: c4iw_put_qpid(rdev, wq->rq.qid, uctx); free_sq_qid: c4iw_put_qpid(rdev, wq->sq.qid, uctx); return ret; }
/** * iwl_enqueue_hcmd - enqueue a uCode command * @priv: device private data point * @cmd: a point to the ucode command structure * * The function returns < 0 values to indicate the operation is * failed. On success, it turns the index (> 0) of command in the * command queue. */ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) { struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; struct iwl_queue *q = &txq->q; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; dma_addr_t phys_addr; unsigned long flags; u32 idx; u16 copy_size, cmd_size; bool is_ct_kill = false; bool had_nocopy = false; int i; u8 *cmd_dest; #ifdef CONFIG_IWLWIFI_DEVICE_TRACING const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {}; int trace_lens[IWL_MAX_CMD_TFDS + 1] = {}; int trace_idx; #endif if (test_bit(STATUS_FW_ERROR, &priv->status)) { IWL_WARN(priv, "fw recovery, no hcmd send\n"); return -EIO; } if ((priv->ucode_owner == IWL_OWNERSHIP_TM) && !(cmd->flags & CMD_ON_DEMAND)) { IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n"); return -EIO; } copy_size = sizeof(out_cmd->hdr); cmd_size = sizeof(out_cmd->hdr); /* need one for the header if the first is NOCOPY */ BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { if (!cmd->len[i]) continue; if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { had_nocopy = true; } else { /* NOCOPY must not be followed by normal! */ if (WARN_ON(had_nocopy)) return -EINVAL; copy_size += cmd->len[i]; } cmd_size += cmd->len[i]; } /* * If any of the command structures end up being larger than * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically * allocated into separate TFDs, then we will need to * increase the size of the buffers. */ if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE)) return -EINVAL; if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { IWL_WARN(priv, "Not sending command - %s KILL\n", iwl_is_rfkill(priv) ? "RF" : "CT"); return -EIO; } spin_lock_irqsave(&priv->hcmd_lock, flags); if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_irqrestore(&priv->hcmd_lock, flags); IWL_ERR(priv, "No space in command queue\n"); is_ct_kill = iwl_check_for_ct_kill(priv); if (!is_ct_kill) { IWL_ERR(priv, "Restarting adapter due to queue full\n"); iwlagn_fw_error(priv, false); } return -ENOSPC; } idx = get_cmd_index(q, q->write_ptr); out_cmd = txq->cmd[idx]; out_meta = &txq->meta[idx]; memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ if (cmd->flags & CMD_WANT_SKB) out_meta->source = cmd; if (cmd->flags & CMD_ASYNC) out_meta->callback = cmd->callback; /* set up the header */ out_cmd->hdr.cmd = cmd->id; out_cmd->hdr.flags = 0; out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) | INDEX_TO_SEQ(q->write_ptr)); /* and copy the data that needs to be copied */ cmd_dest = &out_cmd->cmd.payload[0]; for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { if (!cmd->len[i]) continue; if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) break; memcpy(cmd_dest, cmd->data[i], cmd->len[i]); cmd_dest += cmd->len[i]; } IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " "%d bytes at %d[%d]:%d\n", get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, q->write_ptr, idx, priv->cmd_queue); phys_addr = dma_map_single(priv->bus->dev, &out_cmd->hdr, copy_size, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) { idx = -ENOMEM; goto out; } dma_unmap_addr_set(out_meta, mapping, phys_addr); dma_unmap_len_set(out_meta, len, copy_size); iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1); #ifdef CONFIG_IWLWIFI_DEVICE_TRACING trace_bufs[0] = &out_cmd->hdr; trace_lens[0] = copy_size; trace_idx = 1; #endif for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { if (!cmd->len[i]) continue; if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) continue; phys_addr = dma_map_single(priv->bus->dev, (void *)cmd->data[i], cmd->len[i], DMA_BIDIRECTIONAL); if (dma_mapping_error(priv->bus->dev, phys_addr)) { iwlagn_unmap_tfd(priv, out_meta, &txq->tfds[q->write_ptr], DMA_BIDIRECTIONAL); idx = -ENOMEM; goto out; } iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, cmd->len[i], 0); #ifdef CONFIG_IWLWIFI_DEVICE_TRACING trace_bufs[trace_idx] = cmd->data[i]; trace_lens[trace_idx] = cmd->len[i]; trace_idx++; #endif } out_meta->flags = cmd->flags; txq->need_update = 1; /* check that tracing gets all possible blocks */ BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3); #ifdef CONFIG_IWLWIFI_DEVICE_TRACING trace_iwlwifi_dev_hcmd(priv, cmd->flags, trace_bufs[0], trace_lens[0], trace_bufs[1], trace_lens[1], trace_bufs[2], trace_lens[2]); #endif /* Increment and update queue's write index */ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); iwl_txq_update_write_ptr(priv, txq); out: spin_unlock_irqrestore(&priv->hcmd_lock, flags); return idx; }
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, struct c4iw_dev_ucontext *uctx, struct c4iw_wr_wait *wr_waitp) { struct fw_ri_res_wr *res_wr; struct fw_ri_res *res; int wr_len; int user = (uctx != &rdev->uctx); int ret; struct sk_buff *skb; struct c4iw_ucontext *ucontext = NULL; if (user) ucontext = container_of(uctx, struct c4iw_ucontext, uctx); cq->cqid = c4iw_get_cqid(rdev, uctx); if (!cq->cqid) { ret = -ENOMEM; goto err1; } if (!user) { cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL); if (!cq->sw_queue) { ret = -ENOMEM; goto err2; } } cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize, &cq->dma_addr, GFP_KERNEL); if (!cq->queue) { ret = -ENOMEM; goto err3; } dma_unmap_addr_set(cq, mapping, cq->dma_addr); memset(cq->queue, 0, cq->memsize); if (user && ucontext->is_32b_cqe) { cq->qp_errp = &((struct t4_status_page *) ((u8 *)cq->queue + (cq->size - 1) * (sizeof(*cq->queue) / 2)))->qp_err; } else { cq->qp_errp = &((struct t4_status_page *) ((u8 *)cq->queue + (cq->size - 1) * sizeof(*cq->queue)))->qp_err; } /* build fw_ri_res_wr */ wr_len = sizeof *res_wr + sizeof *res; skb = alloc_skb(wr_len, GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto err4; } set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); res_wr = __skb_put_zero(skb, wr_len); res_wr->op_nres = cpu_to_be32( FW_WR_OP_V(FW_RI_RES_WR) | FW_RI_RES_WR_NRES_V(1) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); res_wr->cookie = (uintptr_t)wr_waitp; res = res_wr->res; res->u.cq.restype = FW_RI_RES_TYPE_CQ; res->u.cq.op = FW_RI_RES_OP_WRITE; res->u.cq.iqid = cpu_to_be32(cq->cqid); res->u.cq.iqandst_to_iqandstindex = cpu_to_be32( FW_RI_RES_WR_IQANUS_V(0) | FW_RI_RES_WR_IQANUD_V(1) | FW_RI_RES_WR_IQANDST_F | FW_RI_RES_WR_IQANDSTINDEX_V( rdev->lldi.ciq_ids[cq->vector])); res->u.cq.iqdroprss_to_iqesize = cpu_to_be16( FW_RI_RES_WR_IQDROPRSS_F | FW_RI_RES_WR_IQPCIECH_V(2) | FW_RI_RES_WR_IQINTCNTTHRESH_V(0) | FW_RI_RES_WR_IQO_F | ((user && ucontext->is_32b_cqe) ? FW_RI_RES_WR_IQESIZE_V(1) : FW_RI_RES_WR_IQESIZE_V(2))); res->u.cq.iqsize = cpu_to_be16(cq->size); res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr); c4iw_init_wr_wait(wr_waitp); ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__); if (ret) goto err4; cq->gen = 1; cq->gts = rdev->lldi.gts_reg; cq->rdev = rdev; cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, CXGB4_BAR2_QTYPE_INGRESS, &cq->bar2_qid, user ? &cq->bar2_pa : NULL); if (user && !cq->bar2_pa) { pr_warn("%s: cqid %u not in BAR2 range\n", pci_name(rdev->lldi.pdev), cq->cqid); ret = -EINVAL; goto err4; } return 0; err4: dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, dma_unmap_addr(cq, mapping)); err3: kfree(cq->sw_queue); err2: c4iw_put_cqid(rdev, cq->cqid, uctx); err1: return ret; }
/** * iwl_enqueue_hcmd - enqueue a uCode command * @priv: device private data point * @cmd: a point to the ucode command structure * * The function returns < 0 values to indicate the operation is * failed. On success, it turns the index (> 0) of command in the * command queue. */ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_queue *q = &txq->q; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; dma_addr_t phys_addr; u32 idx; u16 copy_size, cmd_size; bool had_nocopy = false; int i; u8 *cmd_dest; #ifdef CONFIG_IWLWIFI_DEVICE_TRACING const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {}; int trace_lens[IWL_MAX_CMD_TFDS + 1] = {}; int trace_idx; #endif copy_size = sizeof(out_cmd->hdr); cmd_size = sizeof(out_cmd->hdr); /* need one for the header if the first is NOCOPY */ BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { if (!cmd->len[i]) continue; if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { had_nocopy = true; } else { /* NOCOPY must not be followed by normal! */ if (WARN_ON(had_nocopy)) return -EINVAL; copy_size += cmd->len[i]; } cmd_size += cmd->len[i]; } /* * If any of the command structures end up being larger than * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically * allocated into separate TFDs, then we will need to * increase the size of the buffers. */ if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE)) return -EINVAL; spin_lock_bh(&txq->lock); if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_bh(&txq->lock); IWL_ERR(trans, "No space in command queue\n"); iwl_op_mode_cmd_queue_full(trans->op_mode); return -ENOSPC; } idx = get_cmd_index(q, q->write_ptr); out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ if (cmd->flags & CMD_WANT_SKB) out_meta->source = cmd; /* set up the header */ out_cmd->hdr.cmd = cmd->id; out_cmd->hdr.flags = 0; out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | INDEX_TO_SEQ(q->write_ptr)); /* and copy the data that needs to be copied */ cmd_dest = out_cmd->payload; for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { if (!cmd->len[i]) continue; if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) break; memcpy(cmd_dest, cmd->data[i], cmd->len[i]); cmd_dest += cmd->len[i]; } IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd), out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { idx = -ENOMEM; goto out; } dma_unmap_addr_set(out_meta, mapping, phys_addr); dma_unmap_len_set(out_meta, len, copy_size); iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1); #ifdef CONFIG_IWLWIFI_DEVICE_TRACING trace_bufs[0] = &out_cmd->hdr; trace_lens[0] = copy_size; trace_idx = 1; #endif for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { if (!cmd->len[i]) continue; if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) continue; phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i], cmd->len[i], DMA_BIDIRECTIONAL); if (dma_mapping_error(trans->dev, phys_addr)) { iwl_unmap_tfd(trans, out_meta, &txq->tfds[q->write_ptr], DMA_BIDIRECTIONAL); idx = -ENOMEM; goto out; } iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, cmd->len[i], 0); #ifdef CONFIG_IWLWIFI_DEVICE_TRACING trace_bufs[trace_idx] = cmd->data[i]; trace_lens[trace_idx] = cmd->len[i]; trace_idx++; #endif } out_meta->flags = cmd->flags; txq->need_update = 1; /* check that tracing gets all possible blocks */ BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3); #ifdef CONFIG_IWLWIFI_DEVICE_TRACING trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags, trace_bufs[0], trace_lens[0], trace_bufs[1], trace_lens[1], trace_bufs[2], trace_lens[2]); #endif /* start timer if queue currently empty */ if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); /* Increment and update queue's write index */ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); iwl_txq_update_write_ptr(trans, txq); out: spin_unlock_bh(&txq->lock); return idx; }
/** * iwl_legacy_enqueue_hcmd - enqueue a uCode command * @priv: device private data point * @cmd: a point to the ucode command structure * * The function returns < 0 values to indicate the operation is * failed. On success, it turns the index (> 0) of command in the * command queue. */ int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) { struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; struct iwl_queue *q = &txq->q; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; dma_addr_t phys_addr; unsigned long flags; int len; u32 idx; u16 fix_size; cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); /* If any of the command structures end up being larger than * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then * we will need to increase the size of the TFD entries * Also, check to see if command buffer should not exceed the size * of device_cmd and max_cmd_size. */ BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && !(cmd->flags & CMD_SIZE_HUGE)); BUG_ON(fix_size > IWL_MAX_CMD_SIZE); if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) { IWL_WARN(priv, "Not sending command - %s KILL\n", iwl_legacy_is_rfkill(priv) ? "RF" : "CT"); return -EIO; } spin_lock_irqsave(&priv->hcmd_lock, flags); if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_irqrestore(&priv->hcmd_lock, flags); IWL_ERR(priv, "Restarting adapter due to command queue full\n"); queue_work(priv->workqueue, &priv->restart); return -ENOSPC; } idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); out_cmd = txq->cmd[idx]; out_meta = &txq->meta[idx]; if (WARN_ON(out_meta->flags & CMD_MAPPED)) { spin_unlock_irqrestore(&priv->hcmd_lock, flags); return -ENOSPC; } memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ out_meta->flags = cmd->flags | CMD_MAPPED; if (cmd->flags & CMD_WANT_SKB) out_meta->source = cmd; if (cmd->flags & CMD_ASYNC) out_meta->callback = cmd->callback; out_cmd->hdr.cmd = cmd->id; memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); /* At this point, the out_cmd now has all of the incoming cmd * information */ out_cmd->hdr.flags = 0; out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) | INDEX_TO_SEQ(q->write_ptr)); if (cmd->flags & CMD_SIZE_HUGE) out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; len = sizeof(struct iwl_device_cmd); if (idx == TFD_CMD_SLOTS) len = IWL_MAX_CMD_SIZE; #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG switch (out_cmd->hdr.cmd) { case REPLY_TX_LINK_QUALITY_CMD: case SENSITIVITY_CMD: IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, " "%d bytes at %d[%d]:%d\n", iwl_legacy_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr, idx, priv->cmd_queue); break; default: IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " "%d bytes at %d[%d]:%d\n", iwl_legacy_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr, idx, priv->cmd_queue); } #endif txq->need_update = 1; if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl) /* Set up entry in queue's byte count circular buffer */ priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, fix_size, PCI_DMA_BIDIRECTIONAL); dma_unmap_addr_set(out_meta, mapping, phys_addr); dma_unmap_len_set(out_meta, len, fix_size); trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags); priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, phys_addr, fix_size, 1, U32_PAD(cmd->len)); /* Increment and update queue's write index */ q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd); iwl_legacy_txq_update_write_ptr(priv, txq); spin_unlock_irqrestore(&priv->hcmd_lock, flags); return idx; }
int __devinit c2_rnic_init(struct c2_dev *c2dev) { int err; u32 qsize, msgsize; void *q1_pages; void *q2_pages; void __iomem *mmio_regs; /* Device capabilities */ c2dev->device_cap_flags = (IB_DEVICE_RESIZE_MAX_WR | IB_DEVICE_CURR_QP_STATE_MOD | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW); /* Allocate the qptr_array */ c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *)); if (!c2dev->qptr_array) { return -ENOMEM; } /* Inialize the qptr_array */ memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *)); c2dev->qptr_array[0] = (void *) &c2dev->req_vq; c2dev->qptr_array[1] = (void *) &c2dev->rep_vq; c2dev->qptr_array[2] = (void *) &c2dev->aeq; /* Initialize data structures */ init_waitqueue_head(&c2dev->req_vq_wo); spin_lock_init(&c2dev->vqlock); spin_lock_init(&c2dev->lock); /* Allocate MQ shared pointer pool for kernel clients. User * mode client pools are hung off the user context */ err = c2_init_mqsp_pool(c2dev, GFP_KERNEL, &c2dev->kern_mqsp_pool); if (err) { goto bail0; } /* Allocate shared pointers for Q0, Q1, and Q2 from * the shared pointer pool. */ c2dev->hint_count = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, &c2dev->hint_count_dma, GFP_KERNEL); c2dev->req_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, &c2dev->req_vq.shared_dma, GFP_KERNEL); c2dev->rep_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, &c2dev->rep_vq.shared_dma, GFP_KERNEL); c2dev->aeq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, &c2dev->aeq.shared_dma, GFP_KERNEL); if (!c2dev->hint_count || !c2dev->req_vq.shared || !c2dev->rep_vq.shared || !c2dev->aeq.shared) { err = -ENOMEM; goto bail1; } mmio_regs = c2dev->kva; /* Initialize the Verbs Request Queue */ c2_mq_req_init(&c2dev->req_vq, 0, be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_QSIZE)), be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_MSGSIZE)), mmio_regs + be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_POOLSTART)), mmio_regs + be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_SHARED)), C2_MQ_ADAPTER_TARGET); /* Initialize the Verbs Reply Queue */ qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_QSIZE)); msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, &c2dev->rep_vq.host_dma, GFP_KERNEL); if (!q1_pages) { err = -ENOMEM; goto bail1; } dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages, (unsigned long long) c2dev->rep_vq.host_dma); c2_mq_rep_init(&c2dev->rep_vq, 1, qsize, msgsize, q1_pages, mmio_regs + be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_SHARED)), C2_MQ_HOST_TARGET); /* Initialize the Asynchronus Event Queue */ qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_QSIZE)); msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, &c2dev->aeq.host_dma, GFP_KERNEL); if (!q2_pages) { err = -ENOMEM; goto bail2; } dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages, (unsigned long long) c2dev->aeq.host_dma); c2_mq_rep_init(&c2dev->aeq, 2, qsize, msgsize, q2_pages, mmio_regs + be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_SHARED)), C2_MQ_HOST_TARGET); /* Initialize the verbs request allocator */ err = vq_init(c2dev); if (err) goto bail3; /* Enable interrupts on the adapter */ writel(0, c2dev->regs + C2_IDIS); /* create the WR init message */ err = c2_adapter_init(c2dev); if (err) goto bail4; c2dev->init++; /* open an adapter instance */ err = c2_rnic_open(c2dev); if (err) goto bail4; /* Initialize cached the adapter limits */ if (c2_rnic_query(c2dev, &c2dev->props)) goto bail5; /* Initialize the PD pool */ err = c2_init_pd_table(c2dev); if (err) goto bail5; /* Initialize the QP pool */ c2_init_qp_table(c2dev); return 0; bail5: c2_rnic_close(c2dev); bail4: vq_term(c2dev); bail3: dma_free_coherent(&c2dev->pcidev->dev, c2dev->aeq.q_size * c2dev->aeq.msg_size, q2_pages, dma_unmap_addr(&c2dev->aeq, mapping)); bail2: dma_free_coherent(&c2dev->pcidev->dev, c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping)); bail1: c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); bail0: vfree(c2dev->qptr_array); return err; }
/** * iwl_enqueue_hcmd - enqueue a uCode command * @priv: device private data point * @cmd: a point to the ucode command structure * * The function returns < 0 values to indicate the operation is * failed. On success, it turns the index (> 0) of command in the * command queue. */ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_queue *q = &txq->q; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; dma_addr_t phys_addr; u32 idx; u16 copy_size, cmd_size, dma_size; bool had_nocopy = false; int i; u8 *cmd_dest; const u8 *cmddata[IWL_MAX_CMD_TFDS]; u16 cmdlen[IWL_MAX_CMD_TFDS]; #ifdef CONFIG_IWLWIFI_DEVICE_TRACING const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {}; int trace_lens[IWL_MAX_CMD_TFDS + 1] = {}; int trace_idx; #endif if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) { IWL_WARN(trans, "fw recovery, no hcmd send\n"); return -EIO; } copy_size = sizeof(out_cmd->hdr); cmd_size = sizeof(out_cmd->hdr); /* need one for the header if the first is NOCOPY */ BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { cmddata[i] = cmd->data[i]; cmdlen[i] = cmd->len[i]; if (!cmd->len[i]) continue; /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; if (copy > cmdlen[i]) copy = cmdlen[i]; cmdlen[i] -= copy; cmddata[i] += copy; copy_size += copy; } if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { had_nocopy = true; } else { /* NOCOPY must not be followed by normal! */ if (WARN_ON(had_nocopy)) return -EINVAL; copy_size += cmdlen[i]; } cmd_size += cmd->len[i]; } /* * If any of the command structures end up being larger than * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically * allocated into separate TFDs, then we will need to * increase the size of the buffers. */ if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE)) return -EINVAL; spin_lock_bh(&txq->lock); if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_bh(&txq->lock); IWL_ERR(trans, "No space in command queue\n"); iwl_op_mode_cmd_queue_full(trans->op_mode); return -ENOSPC; } idx = get_cmd_index(q, q->write_ptr); out_cmd = txq->cmd[idx]; out_meta = &txq->meta[idx]; memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ if (cmd->flags & CMD_WANT_SKB) out_meta->source = cmd; /* set up the header */ out_cmd->hdr.cmd = cmd->id; out_cmd->hdr.flags = 0; out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | INDEX_TO_SEQ(q->write_ptr)); /* and copy the data that needs to be copied */ cmd_dest = out_cmd->payload; copy_size = sizeof(out_cmd->hdr); for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { int copy = 0; if (!cmd->len) continue; /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; if (copy > cmd->len[i]) copy = cmd->len[i]; } /* copy everything if not nocopy/dup */ if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) copy = cmd->len[i]; if (copy) { memcpy(cmd_dest, cmd->data[i], copy); cmd_dest += copy; copy_size += copy; } } IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, " "%d bytes at %d[%d]:%d\n", get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); /* * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must * still map at least that many bytes for the hardware to write back to. * We have enough space, so that's not a problem. */ dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE); phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { idx = -ENOMEM; goto out; } dma_unmap_addr_set(out_meta, mapping, phys_addr); dma_unmap_len_set(out_meta, len, dma_size); iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1); #ifdef CONFIG_IWLWIFI_DEVICE_TRACING trace_bufs[0] = &out_cmd->hdr; trace_lens[0] = copy_size; trace_idx = 1; #endif /* map the remaining (adjusted) nocopy/dup fragments */ for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { if (!cmdlen[i]) continue; if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) continue; phys_addr = dma_map_single(trans->dev, (void *)cmddata[i], cmdlen[i], DMA_BIDIRECTIONAL); if (dma_mapping_error(trans->dev, phys_addr)) { iwlagn_unmap_tfd(trans, out_meta, &txq->tfds[q->write_ptr], DMA_BIDIRECTIONAL); idx = -ENOMEM; goto out; } iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, cmdlen[i], 0); #ifdef CONFIG_IWLWIFI_DEVICE_TRACING trace_bufs[trace_idx] = cmddata[i]; trace_lens[trace_idx] = cmdlen[i]; trace_idx++; #endif } out_meta->flags = cmd->flags; txq->need_update = 1; /* check that tracing gets all possible blocks */ BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3); #ifdef CONFIG_IWLWIFI_DEVICE_TRACING trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags, trace_bufs[0], trace_lens[0], trace_bufs[1], trace_lens[1], trace_bufs[2], trace_lens[2]); #endif /* Increment and update queue's write index */ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); iwl_txq_update_write_ptr(trans, txq); out: spin_unlock_bh(&txq->lock); return idx; }
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_queue *q = &txq->q; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; dma_addr_t phys_addr; u32 idx; u16 copy_size, cmd_size; bool had_nocopy = false; int i; u8 *cmd_dest; #ifdef CONFIG_IWLWIFI_DEVICE_TRACING const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {}; int trace_lens[IWL_MAX_CMD_TFDS + 1] = {}; int trace_idx; #endif if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) { IWL_WARN(trans, "fw recovery, no hcmd send\n"); return -EIO; } copy_size = sizeof(out_cmd->hdr); cmd_size = sizeof(out_cmd->hdr); BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { if (!cmd->len[i]) continue; if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { had_nocopy = true; } else { if (WARN_ON(had_nocopy)) return -EINVAL; copy_size += cmd->len[i]; } cmd_size += cmd->len[i]; } if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE)) return -EINVAL; spin_lock_bh(&txq->lock); if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_bh(&txq->lock); IWL_ERR(trans, "No space in command queue\n"); iwl_op_mode_cmd_queue_full(trans->op_mode); return -ENOSPC; } idx = get_cmd_index(q, q->write_ptr); out_cmd = txq->cmd[idx]; out_meta = &txq->meta[idx]; memset(out_meta, 0, sizeof(*out_meta)); if (cmd->flags & CMD_WANT_SKB) out_meta->source = cmd; out_cmd->hdr.cmd = cmd->id; out_cmd->hdr.flags = 0; out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | INDEX_TO_SEQ(q->write_ptr)); cmd_dest = out_cmd->payload; for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { if (!cmd->len[i]) continue; if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) break; memcpy(cmd_dest, cmd->data[i], cmd->len[i]); cmd_dest += cmd->len[i]; } IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, " "%d bytes at %d[%d]:%d\n", get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { idx = -ENOMEM; goto out; } dma_unmap_addr_set(out_meta, mapping, phys_addr); dma_unmap_len_set(out_meta, len, copy_size); iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1); #ifdef CONFIG_IWLWIFI_DEVICE_TRACING trace_bufs[0] = &out_cmd->hdr; trace_lens[0] = copy_size; trace_idx = 1; #endif for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { if (!cmd->len[i]) continue; if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) continue; phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i], cmd->len[i], DMA_BIDIRECTIONAL); if (dma_mapping_error(trans->dev, phys_addr)) { iwlagn_unmap_tfd(trans, out_meta, &txq->tfds[q->write_ptr], DMA_BIDIRECTIONAL); idx = -ENOMEM; goto out; } iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, cmd->len[i], 0); #ifdef CONFIG_IWLWIFI_DEVICE_TRACING trace_bufs[trace_idx] = cmd->data[i]; trace_lens[trace_idx] = cmd->len[i]; trace_idx++; #endif } out_meta->flags = cmd->flags; txq->need_update = 1; BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3); #ifdef CONFIG_IWLWIFI_DEVICE_TRACING trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags, trace_bufs[0], trace_lens[0], trace_bufs[1], trace_lens[1], trace_bufs[2], trace_lens[2]); #endif q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); iwl_txq_update_write_ptr(trans, txq); out: spin_unlock_bh(&txq->lock); return idx; }