int lio_setup_cn68xx_octeon_device(struct octeon_device *oct) { struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip; u16 card_type = LIO_410NV; if (octeon_map_pci_barx(oct, 0, 0)) return 1; if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) { dev_err(&oct->pci_dev->dev, "%s CN68XX BAR1 map failed\n", __func__); octeon_unmap_pci_barx(oct, 0); return 1; } spin_lock_init(&cn68xx->lock_for_droq_int_enb_reg); oct->fn_list.setup_iq_regs = lio_cn6xxx_setup_iq_regs; oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs; oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs; oct->fn_list.soft_reset = lio_cn68xx_soft_reset; oct->fn_list.setup_device_regs = lio_cn68xx_setup_device_regs; oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs; oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index; oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup; oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write; oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read; oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt; oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt; oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues; oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues; lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list); /* Determine variant of card */ if (lio_is_210nv(oct)) card_type = LIO_210NV; cn68xx->conf = (struct octeon_config *) oct_get_config_info(oct, card_type); if (!cn68xx->conf) { dev_err(&oct->pci_dev->dev, "%s No Config found for CN68XX %s\n", __func__, (card_type == LIO_410NV) ? LIO_410NV_NAME : LIO_210NV_NAME); octeon_unmap_pci_barx(oct, 0); octeon_unmap_pci_barx(oct, 1); return 1; } oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct); lio_cn68xx_vendor_message_fix(oct); return 0; }
int cn23xx_setup_octeon_vf_device(struct octeon_device *oct) { struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip; u32 rings_per_vf, ring_flag; u64 reg_val; if (octeon_map_pci_barx(oct, 0, 0)) return 1; /* INPUT_CONTROL[RPVF] gives the VF IOq count */ reg_val = octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(0)); oct->pf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) & CN23XX_PKT_INPUT_CTL_PF_NUM_MASK; oct->vf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_VF_NUM_POS) & CN23XX_PKT_INPUT_CTL_VF_NUM_MASK; reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; rings_per_vf = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; ring_flag = 0; cn23xx->conf = oct_get_config_info(oct, LIO_23XX); if (!cn23xx->conf) { dev_err(&oct->pci_dev->dev, "%s No Config found for CN23XX\n", __func__); octeon_unmap_pci_barx(oct, 0); return 1; } if (oct->sriov_info.rings_per_vf > rings_per_vf) { dev_warn(&oct->pci_dev->dev, "num_queues:%d greater than PF configured rings_per_vf:%d. Reducing to %d.\n", oct->sriov_info.rings_per_vf, rings_per_vf, rings_per_vf); oct->sriov_info.rings_per_vf = rings_per_vf; } else { if (rings_per_vf > num_present_cpus()) { dev_warn(&oct->pci_dev->dev, "PF configured rings_per_vf:%d greater than num_cpu:%d. Using rings_per_vf:%d equal to num cpus\n", rings_per_vf, num_present_cpus(), num_present_cpus()); oct->sriov_info.rings_per_vf = num_present_cpus(); } else { oct->sriov_info.rings_per_vf = rings_per_vf; } } oct->fn_list.setup_iq_regs = cn23xx_setup_vf_iq_regs; oct->fn_list.setup_oq_regs = cn23xx_setup_vf_oq_regs; oct->fn_list.setup_mbox = cn23xx_setup_vf_mbox; oct->fn_list.free_mbox = cn23xx_free_vf_mbox; oct->fn_list.msix_interrupt_handler = cn23xx_vf_msix_interrupt_handler; oct->fn_list.setup_device_regs = cn23xx_setup_vf_device_regs; oct->fn_list.update_iq_read_idx = cn23xx_update_read_index; oct->fn_list.enable_interrupt = cn23xx_enable_vf_interrupt; oct->fn_list.disable_interrupt = cn23xx_disable_vf_interrupt; oct->fn_list.enable_io_queues = cn23xx_enable_vf_io_queues; oct->fn_list.disable_io_queues = cn23xx_disable_vf_io_queues; return 0; }