/** * Allocate one skb for Rx VRING * * Safe to call from IRQ */ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, u32 i, int headroom) { struct device *dev = wil_to_dev(wil); unsigned int sz = RX_BUF_LEN; struct vring_rx_desc dd, *d = ⅆ volatile struct vring_rx_desc *_d = &(vring->va[i].rx); dma_addr_t pa; /* TODO align */ struct sk_buff *skb = dev_alloc_skb(sz + headroom); if (unlikely(!skb)) return -ENOMEM; skb_reserve(skb, headroom); skb_put(skb, sz); pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(dev, pa))) { kfree_skb(skb); return -ENOMEM; } d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT; wil_desc_addr_set(&d->dma.addr, pa); /* ip_length don't care */ /* b11 don't care */ /* error don't care */ d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */ d->dma.length = cpu_to_le16(sz); *_d = *d; vring->ctx[i] = skb; return 0; }
void wil_pm_runtime_put(struct wil6210_priv *wil) { struct device *dev = wil_to_dev(wil); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); }
void wil_pm_runtime_forbid(struct wil6210_priv *wil) { struct device *dev = wil_to_dev(wil); pm_runtime_forbid(dev); pm_runtime_get_noresume(dev); }
void wil_pm_runtime_allow(struct wil6210_priv *wil) { struct device *dev = wil_to_dev(wil); pm_runtime_put_noidle(dev); pm_runtime_set_autosuspend_delay(dev, WIL6210_AUTOSUSPEND_DELAY_MS); pm_runtime_use_autosuspend(dev); pm_runtime_allow(dev); }
static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, int tx) { struct device *dev = wil_to_dev(wil); size_t sz = vring->size * sizeof(vring->va[0]); if (tx) { int vring_index = vring - wil->vring_tx; wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n", vring_index, vring->size, vring->va, &vring->pa, vring->ctx); } else { wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n", vring->size, vring->va, &vring->pa, vring->ctx); } while (!wil_vring_is_empty(vring)) { dma_addr_t pa; u16 dmalen; struct wil_ctx *ctx; if (tx) { struct vring_tx_desc dd, *d = ⅆ volatile struct vring_tx_desc *_d = &vring->va[vring->swtail].tx; ctx = &vring->ctx[vring->swtail]; *d = *_d; wil_txdesc_unmap(dev, d, ctx); if (ctx->skb) dev_kfree_skb_any(ctx->skb); vring->swtail = wil_vring_next_tail(vring); } else { /* rx */ struct vring_rx_desc dd, *d = ⅆ volatile struct vring_rx_desc *_d = &vring->va[vring->swhead].rx; ctx = &vring->ctx[vring->swhead]; *d = *_d; pa = wil_desc_addr(&d->dma.addr); dmalen = le16_to_cpu(d->dma.length); dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); kfree_skb(ctx->skb); wil_vring_advance_head(vring, 1); } } dma_free_coherent(dev, sz, (void *)vring->va, vring->pa); kfree(vring->ctx); vring->pa = 0; vring->va = NULL; vring->ctx = NULL; }
void wil_wdev_free(struct wil6210_priv *wil) { struct wireless_dev *wdev = wil_to_wdev(wil); dev_dbg(wil_to_dev(wil), "%s()\n", __func__); if (!wdev) return; wiphy_unregister(wdev->wiphy); wiphy_free(wdev->wiphy); kfree(wdev); }
static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, int tx) { struct device *dev = wil_to_dev(wil); size_t sz = vring->size * sizeof(vring->va[0]); while (!wil_vring_is_empty(vring)) { dma_addr_t pa; struct sk_buff *skb; u16 dmalen; if (tx) { struct vring_tx_desc dd, *d = ⅆ volatile struct vring_tx_desc *_d = &vring->va[vring->swtail].tx; *d = *_d; pa = wil_desc_addr(&d->dma.addr); dmalen = le16_to_cpu(d->dma.length); skb = vring->ctx[vring->swtail]; if (skb) { dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE); dev_kfree_skb_any(skb); vring->ctx[vring->swtail] = NULL; } else { dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE); } vring->swtail = wil_vring_next_tail(vring); } else { /* rx */ struct vring_rx_desc dd, *d = ⅆ volatile struct vring_rx_desc *_d = &vring->va[vring->swtail].rx; *d = *_d; pa = wil_desc_addr(&d->dma.addr); dmalen = le16_to_cpu(d->dma.length); skb = vring->ctx[vring->swhead]; dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); kfree_skb(skb); wil_vring_advance_head(vring, 1); } } dma_free_coherent(dev, sz, (void *)vring->va, vring->pa); kfree(vring->ctx); vring->pa = 0; vring->va = NULL; vring->ctx = NULL; }
int wil_pm_runtime_get(struct wil6210_priv *wil) { int rc; struct device *dev = wil_to_dev(wil); rc = pm_runtime_get_sync(dev); if (rc < 0) { wil_err(wil, "pm_runtime_get_sync() failed, rc = %d\n", rc); pm_runtime_put_noidle(dev); return rc; } return 0; }
static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) { struct device *dev = wil_to_dev(wil); size_t sz = vring->size * sizeof(vring->va[0]); uint i; wil_dbg_misc(wil, "%s()\n", __func__); BUILD_BUG_ON(sizeof(vring->va[0]) != 32); vring->swhead = 0; vring->swtail = 0; vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL); if (!vring->ctx) { vring->va = NULL; return -ENOMEM; } /* * vring->va should be aligned on its size rounded up to power of 2 * This is granted by the dma_alloc_coherent */ vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); if (!vring->va) { kfree(vring->ctx); vring->ctx = NULL; return -ENOMEM; } /* initially, all descriptors are SW owned * For Tx and Rx, ownership bit is at the same location, thus * we can use any */ for (i = 0; i < vring->size; i++) { volatile struct vring_tx_desc *_d = &vring->va[i].tx; _d->dma.status = TX_DMA_STATUS_DU; } wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size, vring->va, &vring->pa, vring->ctx); return 0; }
static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, int tx) { struct device *dev = wil_to_dev(wil); size_t sz = vring->size * sizeof(vring->va[0]); while (!wil_vring_is_empty(vring)) { if (tx) { volatile struct vring_tx_desc *d = &vring->va[vring->swtail].tx; dma_addr_t pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32); struct sk_buff *skb = vring->ctx[vring->swtail]; if (skb) { dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE); dev_kfree_skb_any(skb); vring->ctx[vring->swtail] = NULL; } else { dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE); } vring->swtail = wil_vring_next_tail(vring); } else { /* rx */ volatile struct vring_rx_desc *d = &vring->va[vring->swtail].rx; dma_addr_t pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32); struct sk_buff *skb = vring->ctx[vring->swhead]; dma_unmap_single(dev, pa, d->dma.length, DMA_FROM_DEVICE); kfree_skb(skb); wil_vring_advance_head(vring, 1); } } dma_free_coherent(dev, sz, (void *)vring->va, vring->pa); kfree(vring->ctx); vring->pa = 0; vring->va = NULL; vring->ctx = NULL; }
/** * Allocate the physical ring (p-ring) and the required * number of descriptors of required size. * Initialize the descriptors as required by pmc dma. * The descriptors' buffers dwords are initialized to hold * dword's serial number in the lsw and reserved value * PCM_DATA_INVALID_DW_VAL in the msw. */ void wil_pmc_alloc(struct wil6210_priv *wil, int num_descriptors, int descriptor_size) { u32 i; struct pmc_ctx *pmc = &wil->pmc; struct device *dev = wil_to_dev(wil); struct wmi_pmc_cmd pmc_cmd = {0}; mutex_lock(&pmc->lock); if (wil_is_pmc_allocated(pmc)) { /* sanity check */ wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__); goto no_release_err; } pmc->num_descriptors = num_descriptors; pmc->descriptor_size = descriptor_size; wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n", __func__, num_descriptors, descriptor_size); /* allocate descriptors info list in pmc context*/ pmc->descriptors = kcalloc(num_descriptors, sizeof(struct desc_alloc_info), GFP_KERNEL); if (!pmc->descriptors) { wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__); goto no_release_err; } wil_dbg_misc(wil, "%s: allocated descriptors info list %p\n", __func__, pmc->descriptors); /* Allocate pring buffer and descriptors. * vring->va should be aligned on its size rounded up to power of 2 * This is granted by the dma_alloc_coherent */ pmc->pring_va = dma_alloc_coherent(dev, sizeof(struct vring_tx_desc) * num_descriptors, &pmc->pring_pa, GFP_KERNEL); wil_dbg_misc(wil, "%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", __func__, pmc->pring_va, &pmc->pring_pa, sizeof(struct vring_tx_desc), num_descriptors, sizeof(struct vring_tx_desc) * num_descriptors); if (!pmc->pring_va) { wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__); goto release_pmc_skb_list; } /* initially, all descriptors are SW owned * For Tx, Rx, and PMC, ownership bit is at the same location, thus * we can use any */ for (i = 0; i < num_descriptors; i++) { struct vring_tx_desc *_d = &pmc->pring_va[i]; struct vring_tx_desc dd, *d = ⅆ int j = 0; pmc->descriptors[i].va = dma_alloc_coherent(dev, descriptor_size, &pmc->descriptors[i].pa, GFP_KERNEL); if (unlikely(!pmc->descriptors[i].va)) { wil_err(wil, "%s: ERROR allocating pmc descriptor %d", __func__, i); goto release_pmc_skbs; } for (j = 0; j < descriptor_size / sizeof(u32); j++) { u32 *p = (u32 *)pmc->descriptors[i].va + j; *p = PCM_DATA_INVALID_DW_VAL | j; } /* configure dma descriptor */ d->dma.addr.addr_low = cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa)); d->dma.addr.addr_high = cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa)); d->dma.status = 0; /* 0 = HW_OWNED */ d->dma.length = cpu_to_le16(descriptor_size); d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT; *_d = *d; } wil_dbg_misc(wil, "%s: allocated successfully\n", __func__); pmc_cmd.op = WMI_PMC_ALLOCATE; pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors); pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa); wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__); pmc->last_cmd_status = wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd, sizeof(pmc_cmd)); if (pmc->last_cmd_status) { wil_err(wil, "%s: WMI_PMC_CMD with ALLOCATE op failed with status %d", __func__, pmc->last_cmd_status); goto release_pmc_skbs; } mutex_unlock(&pmc->lock); return; release_pmc_skbs: wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__); for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) { dma_free_coherent(dev, descriptor_size, pmc->descriptors[i].va, pmc->descriptors[i].pa); pmc->descriptors[i].va = NULL; } wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__); dma_free_coherent(dev, sizeof(struct vring_tx_desc) * num_descriptors, pmc->pring_va, pmc->pring_pa); pmc->pring_va = NULL; release_pmc_skb_list: wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n", __func__); kfree(pmc->descriptors); pmc->descriptors = NULL; no_release_err: pmc->last_cmd_status = -ENOMEM; mutex_unlock(&pmc->lock); }
/** * Traverse the p-ring and release all buffers. * At the end release the p-ring memory */ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) { struct pmc_ctx *pmc = &wil->pmc; struct device *dev = wil_to_dev(wil); struct wmi_pmc_cmd pmc_cmd = {0}; mutex_lock(&pmc->lock); pmc->last_cmd_status = 0; if (!wil_is_pmc_allocated(pmc)) { wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n", __func__); pmc->last_cmd_status = -EPERM; mutex_unlock(&pmc->lock); return; } if (send_pmc_cmd) { wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n", __func__); pmc_cmd.op = WMI_PMC_RELEASE; pmc->last_cmd_status = wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd, sizeof(pmc_cmd)); if (pmc->last_cmd_status) { wil_err(wil, "%s WMI_PMC_CMD with RELEASE op failed, status %d", __func__, pmc->last_cmd_status); /* There's nothing we can do with this error. * Normally, it should never occur. * Continue to freeing all memory allocated for pmc. */ } } if (pmc->pring_va) { size_t buf_size = sizeof(struct vring_tx_desc) * pmc->num_descriptors; wil_dbg_misc(wil, "%s: free pring va %p\n", __func__, pmc->pring_va); dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa); pmc->pring_va = NULL; } else { pmc->last_cmd_status = -ENOENT; } if (pmc->descriptors) { int i; for (i = 0; pmc->descriptors[i].va && i < pmc->num_descriptors; i++) { dma_free_coherent(dev, pmc->descriptor_size, pmc->descriptors[i].va, pmc->descriptors[i].pa); pmc->descriptors[i].va = NULL; } wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n", __func__, i, pmc->num_descriptors); wil_dbg_misc(wil, "%s: free pmc descriptors info list %p\n", __func__, pmc->descriptors); kfree(pmc->descriptors); pmc->descriptors = NULL; } else { pmc->last_cmd_status = -ENOENT; } mutex_unlock(&pmc->lock); }