/** * \brief Setup gather lists * @param lio per-network private data */ int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) { struct octnic_gather *g; int i, j; lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL); if (!lio->glist_lock) return -ENOMEM; lio->glist = kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL); if (!lio->glist) { kfree(lio->glist_lock); lio->glist_lock = NULL; return -ENOMEM; } lio->glist_entry_size = ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); /* allocate memory to store virtual and dma base address of * per glist consistent memory */ lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), GFP_KERNEL); lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), GFP_KERNEL); if (!lio->glists_virt_base || !lio->glists_dma_base) { lio_delete_glists(lio); return -ENOMEM; } for (i = 0; i < num_iqs; i++) { int numa_node = dev_to_node(&oct->pci_dev->dev); spin_lock_init(&lio->glist_lock[i]); INIT_LIST_HEAD(&lio->glist[i]); lio->glists_virt_base[i] = lio_dma_alloc(oct, lio->glist_entry_size * lio->tx_qsize, &lio->glists_dma_base[i]); if (!lio->glists_virt_base[i]) { lio_delete_glists(lio); return -ENOMEM; } for (j = 0; j < lio->tx_qsize; j++) { g = kzalloc_node(sizeof(*g), GFP_KERNEL, numa_node); if (!g) g = kzalloc(sizeof(*g), GFP_KERNEL); if (!g) break; g->sg = lio->glists_virt_base[i] + (j * lio->glist_entry_size); g->sg_dma_ptr = lio->glists_dma_base[i] + (j * lio->glist_entry_size); list_add_tail(&g->list, &lio->glist[i]); } if (j != lio->tx_qsize) { lio_delete_glists(lio); return -ENOMEM; } } return 0; }
/* Return 0 on success, 1 on failure */ int octeon_init_instr_queue(struct octeon_device *oct, u32 iq_no, u32 num_descs) { struct octeon_instr_queue *iq; struct octeon_iq_config *conf = NULL; u32 q_size; struct cavium_wq *db_wq; if (OCTEON_CN6XXX(oct)) conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf))); if (!conf) { dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n", oct->chip_id); return 1; } if (num_descs & (num_descs - 1)) { dev_err(&oct->pci_dev->dev, "Number of descriptors for instr queue %d not in power of 2.\n", iq_no); return 1; } q_size = (u32)conf->instr_type * num_descs; iq = oct->instr_queue[iq_no]; iq->base_addr = lio_dma_alloc(oct, q_size, (dma_addr_t *)&iq->base_addr_dma); if (!iq->base_addr) { dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n", iq_no); return 1; } iq->max_count = num_descs; /* Initialize a list to holds requests that have been posted to Octeon * but has yet to be fetched by octeon */ iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs); if (!iq->request_list) { lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n", iq_no); return 1; } memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs); dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n", iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count); iq->iq_no = iq_no; iq->fill_threshold = (u32)conf->db_min; iq->fill_cnt = 0; iq->host_write_index = 0; iq->octeon_read_index = 0; iq->flush_index = 0; iq->last_db_time = 0; iq->do_auto_flush = 1; iq->db_timeout = (u32)conf->db_timeout; atomic_set(&iq->instr_pending, 0); /* Initialize the spinlock for this instruction queue */ spin_lock_init(&iq->lock); oct->io_qmask.iq |= (1 << iq_no); /* Set the 32B/64B mode for each input queue */ oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); iq->iqcmd_64B = (conf->instr_type == 64); oct->fn_list.setup_iq_regs(oct, iq_no); oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db"); if (!oct->check_db_wq[iq_no].wq) { lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n", iq_no); return 1; } db_wq = &oct->check_db_wq[iq_no]; INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout); db_wq->wk.ctxptr = oct; db_wq->wk.ctxul = iq_no; queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1)); return 0; }