/** * \brief Delete gather lists * @param lio per-network private data */ void lio_delete_glists(struct lio *lio) { struct octnic_gather *g; int i; kfree(lio->glist_lock); lio->glist_lock = NULL; if (!lio->glist) return; for (i = 0; i < lio->oct_dev->num_iqs; i++) { do { g = (struct octnic_gather *) lio_list_delete_head(&lio->glist[i]); kfree(g); } while (g); if (lio->glists_virt_base && lio->glists_virt_base[i] && lio->glists_dma_base && lio->glists_dma_base[i]) { lio_dma_free(lio->oct_dev, lio->glist_entry_size * lio->tx_qsize, lio->glists_virt_base[i], lio->glists_dma_base[i]); } } kfree(lio->glists_virt_base); lio->glists_virt_base = NULL; kfree(lio->glists_dma_base); lio->glists_dma_base = NULL; kfree(lio->glist); lio->glist = NULL; }
int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no) { u64 desc_size = 0, q_size; struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work); flush_workqueue(oct->check_db_wq[iq_no].wq); destroy_workqueue(oct->check_db_wq[iq_no].wq); if (OCTEON_CN6XXX(oct)) desc_size = CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf)); if (iq->request_list) vfree(iq->request_list); if (iq->base_addr) { q_size = iq->max_count * desc_size; lio_dma_free(oct, (u32)q_size, iq->base_addr, iq->base_addr_dma); return 0; } return 1; }
/* Return 0 on success, 1 on failure */ int octeon_init_instr_queue(struct octeon_device *oct, u32 iq_no, u32 num_descs) { struct octeon_instr_queue *iq; struct octeon_iq_config *conf = NULL; u32 q_size; struct cavium_wq *db_wq; if (OCTEON_CN6XXX(oct)) conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf))); if (!conf) { dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n", oct->chip_id); return 1; } if (num_descs & (num_descs - 1)) { dev_err(&oct->pci_dev->dev, "Number of descriptors for instr queue %d not in power of 2.\n", iq_no); return 1; } q_size = (u32)conf->instr_type * num_descs; iq = oct->instr_queue[iq_no]; iq->base_addr = lio_dma_alloc(oct, q_size, (dma_addr_t *)&iq->base_addr_dma); if (!iq->base_addr) { dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n", iq_no); return 1; } iq->max_count = num_descs; /* Initialize a list to holds requests that have been posted to Octeon * but has yet to be fetched by octeon */ iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs); if (!iq->request_list) { lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n", iq_no); return 1; } memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs); dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n", iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count); iq->iq_no = iq_no; iq->fill_threshold = (u32)conf->db_min; iq->fill_cnt = 0; iq->host_write_index = 0; iq->octeon_read_index = 0; iq->flush_index = 0; iq->last_db_time = 0; iq->do_auto_flush = 1; iq->db_timeout = (u32)conf->db_timeout; atomic_set(&iq->instr_pending, 0); /* Initialize the spinlock for this instruction queue */ spin_lock_init(&iq->lock); oct->io_qmask.iq |= (1 << iq_no); /* Set the 32B/64B mode for each input queue */ oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); iq->iqcmd_64B = (conf->instr_type == 64); oct->fn_list.setup_iq_regs(oct, iq_no); oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db"); if (!oct->check_db_wq[iq_no].wq) { lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n", iq_no); return 1; } db_wq = &oct->check_db_wq[iq_no]; INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout); db_wq->wk.ctxptr = oct; db_wq->wk.ctxul = iq_no; queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1)); return 0; }