void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; unsigned long flags; /*if rxq->bd is NULL, it means that nothing has been allocated, * exit now */ if (!rxq->bd) { IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); return; } cancel_work_sync(&trans_pcie->rx_replenish); spin_lock_irqsave(&rxq->lock, flags); iwl_pcie_rxq_free_rbs(trans); spin_unlock_irqrestore(&rxq->lock, flags); dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, rxq->bd, rxq->bd_dma); rxq->bd_dma = 0; rxq->bd = NULL; if (rxq->rb_stts) dma_free_coherent(trans->dev, sizeof(struct iwl_rb_status), rxq->rb_stts, rxq->rb_stts_dma); else IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); rxq->rb_stts_dma = 0; rxq->rb_stts = NULL; }
int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, err; if (!rxq->bd) { err = iwl_pcie_rx_alloc(trans); if (err) return err; } if (!rba->alloc_wq) rba->alloc_wq = alloc_workqueue("rb_allocator", WQ_HIGHPRI | WQ_UNBOUND, 1); INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); spin_lock(&rba->lock); atomic_set(&rba->req_pending, 0); atomic_set(&rba->req_ready, 0); /* free all first - we might be reconfigured for a different size */ iwl_pcie_rx_free_rba(trans); iwl_pcie_rx_init_rba(rba); spin_unlock(&rba->lock); spin_lock(&rxq->lock); /* free all first - we might be reconfigured for a different size */ iwl_pcie_rxq_free_rbs(trans); iwl_pcie_rx_init_rxb_lists(rxq); for (i = 0; i < RX_QUEUE_SIZE; i++) rxq->queue[i] = NULL; /* Set us so that we have processed and used all buffers, but have * not restocked the Rx queue with fresh buffers */ rxq->read = rxq->write = 0; rxq->write_actual = 0; memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); spin_unlock(&rxq->lock); iwl_pcie_rx_replenish(trans); iwl_pcie_rx_hw_init(trans, rxq); spin_lock(&rxq->lock); iwl_pcie_rxq_inc_wr_ptr(trans); spin_unlock(&rxq->lock); return 0; }
int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; int i, err; unsigned long flags; if (!rxq->bd) { err = iwl_pcie_rx_alloc(trans); if (err) return err; } spin_lock_irqsave(&rxq->lock, flags); INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work); iwl_pcie_rxq_free_rbs(trans); for (i = 0; i < RX_QUEUE_SIZE; i++) rxq->queue[i] = NULL; /* Set us so that we have processed and used all buffers, but have * not restocked the Rx queue with fresh buffers */ rxq->read = rxq->write = 0; rxq->write_actual = 0; rxq->free_count = 0; memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); spin_unlock_irqrestore(&rxq->lock, flags); iwl_pcie_rx_replenish(trans); iwl_pcie_rx_hw_init(trans, rxq); spin_lock_irqsave(&trans_pcie->irq_lock, flags); rxq->need_update = 1; iwl_pcie_rxq_inc_wr_ptr(trans, rxq); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); return 0; }
void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rb_allocator *rba = &trans_pcie->rba; /*if rxq->bd is NULL, it means that nothing has been allocated, * exit now */ if (!rxq->bd) { IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); return; } cancel_work_sync(&rba->rx_alloc); if (rba->alloc_wq) { destroy_workqueue(rba->alloc_wq); rba->alloc_wq = NULL; } spin_lock(&rba->lock); iwl_pcie_rx_free_rba(trans); spin_unlock(&rba->lock); spin_lock(&rxq->lock); iwl_pcie_rxq_free_rbs(trans); spin_unlock(&rxq->lock); dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, rxq->bd, rxq->bd_dma); rxq->bd_dma = 0; rxq->bd = NULL; if (rxq->rb_stts) dma_free_coherent(trans->dev, sizeof(struct iwl_rb_status), rxq->rb_stts, rxq->rb_stts_dma); else IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); rxq->rb_stts_dma = 0; rxq->rb_stts = NULL; }
int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; int i, err; if (!rxq->bd) { err = iwl_pcie_rx_alloc(trans); if (err) return err; } spin_lock(&rxq->lock); INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work); /* free all first - we might be reconfigured for a different size */ iwl_pcie_rxq_free_rbs(trans); iwl_pcie_rx_init_rxb_lists(rxq); for (i = 0; i < RX_QUEUE_SIZE; i++) rxq->queue[i] = NULL; /* Set us so that we have processed and used all buffers, but have * not restocked the Rx queue with fresh buffers */ rxq->read = rxq->write = 0; rxq->write_actual = 0; memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); spin_unlock(&rxq->lock); iwl_pcie_rx_replenish(trans); iwl_pcie_rx_hw_init(trans, rxq); spin_lock(&trans_pcie->irq_lock); rxq->need_update = 1; iwl_pcie_rxq_inc_wr_ptr(trans, rxq); spin_unlock(&trans_pcie->irq_lock); return 0; }