static int lio_cn23xx_pf_reset_io_queues(struct octeon_device *oct) { uint64_t d64; uint32_t ern, loop = BUSY_READING_REG_PF_LOOP_COUNT; uint32_t q_no, srn; int ret_val = 0; srn = oct->sriov_info.pf_srn; ern = srn + oct->sriov_info.num_pf_rings; /* As per HRM reg description, s/w cant write 0 to ENB. */ /* to make the queue off, need to set the RST bit. */ /* Reset the Enable bit for all the 64 IQs. */ for (q_no = srn; q_no < ern; q_no++) { /* set RST bit to 1. This bit applies to both IQ and OQ */ d64 = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); d64 = d64 | LIO_CN23XX_PKT_INPUT_CTL_RST; lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64); } /* wait until the RST bit is clear or the RST and quiet bits are set */ for (q_no = srn; q_no < ern; q_no++) { volatile uint64_t reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); while ((reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) && !(reg_val & LIO_CN23XX_PKT_INPUT_CTL_QUIET) && loop) { reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); loop--; } if (!loop) { lio_dev_err(oct, "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n", q_no); return (-1); } reg_val &= ~LIO_CN23XX_PKT_INPUT_CTL_RST; lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val); reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); if (reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) { lio_dev_err(oct, "clearing the reset failed for qno: %u\n", q_no); ret_val = -1; } } return (ret_val); }
static int cn23xx_vf_reset_io_queues(struct lio_device *lio_dev, uint32_t num_queues) { uint32_t loop = CN23XX_VF_BUSY_READING_REG_LOOP_COUNT; uint64_t d64, q_no; int ret_val = 0; PMD_INIT_FUNC_TRACE(); for (q_no = 0; q_no < num_queues; q_no++) { /* set RST bit to 1. This bit applies to both IQ and OQ */ d64 = lio_read_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); d64 = d64 | CN23XX_PKT_INPUT_CTL_RST; lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64); } /* wait until the RST bit is clear or the RST and QUIET bits are set */ for (q_no = 0; q_no < num_queues; q_no++) { volatile uint64_t reg_val; reg_val = lio_read_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) && !(reg_val & CN23XX_PKT_INPUT_CTL_QUIET) && loop) { reg_val = lio_read_csr64( lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); loop = loop - 1; } if (loop == 0) { lio_dev_err(lio_dev, "clearing the reset reg failed or setting the quiet reg failed for qno: %lu\n", (unsigned long)q_no); return -1; } reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST; lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val); reg_val = lio_read_csr64( lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); if (reg_val & CN23XX_PKT_INPUT_CTL_RST) { lio_dev_err(lio_dev, "clearing the reset failed for qno: %lu\n", (unsigned long)q_no); ret_val = -1; } } return ret_val; }
int cn23xx_vf_set_io_queues_off(struct lio_device *lio_dev) { uint32_t loop = CN23XX_VF_BUSY_READING_REG_LOOP_COUNT; uint64_t q_no; /* Disable the i/p and o/p queues for this Octeon. * IOQs will already be in reset. * If RST bit is set, wait for Quiet bit to be set * Once Quiet bit is set, clear the RST bit */ PMD_INIT_FUNC_TRACE(); for (q_no = 0; q_no < lio_dev->sriov_info.rings_per_vf; q_no++) { volatile uint64_t reg_val; reg_val = lio_read_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) && !(reg_val & CN23XX_PKT_INPUT_CTL_QUIET) && loop) { reg_val = lio_read_csr64( lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); loop = loop - 1; } if (loop == 0) { lio_dev_err(lio_dev, "clearing the reset reg failed or setting the quiet reg failed for qno %lu\n", (unsigned long)q_no); return -1; } reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST; lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val); reg_val = lio_read_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); if (reg_val & CN23XX_PKT_INPUT_CTL_RST) { lio_dev_err(lio_dev, "unable to reset qno %lu\n", (unsigned long)q_no); return -1; } } return 0; }
static void lio_cn23xx_pf_enable_error_reporting(struct octeon_device *oct) { uint32_t corrtable_err_status, uncorrectable_err_mask, regval; regval = lio_read_pci_cfg(oct, LIO_CN23XX_CFG_PCIE_DEVCTL); if (regval & LIO_CN23XX_CFG_PCIE_DEVCTL_MASK) { uncorrectable_err_mask = 0; corrtable_err_status = 0; uncorrectable_err_mask = lio_read_pci_cfg(oct, LIO_CN23XX_CFG_PCIE_UNCORRECT_ERR_MASK); corrtable_err_status = lio_read_pci_cfg(oct, LIO_CN23XX_CFG_PCIE_CORRECT_ERR_STATUS); lio_dev_err(oct, "PCI-E Fatal error detected;\n" "\tdev_ctl_status_reg = 0x%08x\n" "\tuncorrectable_error_mask_reg = 0x%08x\n" "\tcorrectable_error_status_reg = 0x%08x\n", regval, uncorrectable_err_mask, corrtable_err_status); } regval |= 0xf; /* Enable Link error reporting */ lio_dev_dbg(oct, "Enabling PCI-E error reporting..\n"); lio_write_pci_cfg(oct, LIO_CN23XX_CFG_PCIE_DEVCTL, regval); }
static void lio_cn23xx_pf_interrupt_handler(void *dev) { struct octeon_device *oct = (struct octeon_device *)dev; struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; uint64_t intr64; lio_dev_dbg(oct, "In %s octeon_dev @ %p\n", __func__, oct); intr64 = lio_read_csr64(oct, cn23xx->intr_sum_reg64); oct->int_status = 0; if (intr64 & LIO_CN23XX_INTR_ERR) lio_dev_err(oct, "Error Intr: 0x%016llx\n", LIO_CAST64(intr64)); if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) { if (intr64 & LIO_CN23XX_INTR_PKT_DATA) oct->int_status |= LIO_DEV_INTR_PKT_DATA; } if (intr64 & (LIO_CN23XX_INTR_DMA0_FORCE)) oct->int_status |= LIO_DEV_INTR_DMA0_FORCE; if (intr64 & (LIO_CN23XX_INTR_DMA1_FORCE)) oct->int_status |= LIO_DEV_INTR_DMA1_FORCE; /* Clear the current interrupts */ lio_write_csr64(oct, cn23xx->intr_sum_reg64, intr64); }
static int lio_cn23xx_pf_soft_reset(struct octeon_device *oct) { lio_write_csr64(oct, LIO_CN23XX_SLI_WIN_WR_MASK_REG, 0xFF); lio_dev_dbg(oct, "BIST enabled for CN23XX soft reset\n"); lio_write_csr64(oct, LIO_CN23XX_SLI_SCRATCH1, 0x1234ULL); /* Initiate chip-wide soft reset */ lio_pci_readq(oct, LIO_CN23XX_RST_SOFT_RST); lio_pci_writeq(oct, 1, LIO_CN23XX_RST_SOFT_RST); /* Wait for 100ms as Octeon resets. */ lio_mdelay(100); if (lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH1)) { lio_dev_err(oct, "Soft reset failed\n"); return (1); } lio_dev_dbg(oct, "Reset completed\n"); /* restore the reset value */ lio_write_csr64(oct, LIO_CN23XX_SLI_WIN_WR_MASK_REG, 0xFF); return (0); }
/** * lio_mbox_process_cmd: * @mbox: Pointer mailbox * @mbox_cmd: Pointer to command received * * Process the cmd received in mbox */ static int lio_mbox_process_cmd(struct lio_mbox *mbox, struct lio_mbox_cmd *mbox_cmd) { struct lio_device *lio_dev = mbox->lio_dev; if (mbox_cmd->msg.s.cmd == LIO_CORES_CRASHED) lio_dev_err(lio_dev, "Octeon core(s) crashed or got stuck!\n"); return 0; }
static uint64_t lio_cn23xx_pf_msix_interrupt_handler(void *dev) { struct lio_ioq_vector *ioq_vector = (struct lio_ioq_vector *)dev; struct octeon_device *oct = ioq_vector->oct_dev; struct lio_droq *droq = oct->droq[ioq_vector->droq_index]; uint64_t pkts_sent; uint64_t ret = 0; if (droq == NULL) { lio_dev_err(oct, "23XX bringup FIXME: oct pfnum:%d ioq_vector->ioq_num :%d droq is NULL\n", oct->pf_num, ioq_vector->ioq_num); return (0); } pkts_sent = lio_read_csr64(oct, droq->pkts_sent_reg); /* * If our device has interrupted, then proceed. Also check * for all f's if interrupt was triggered on an error * and the PCI read fails. */ if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL)) return (ret); /* Write count reg in sli_pkt_cnts to clear these int. */ if (pkts_sent & LIO_CN23XX_INTR_PO_INT) ret |= LIO_MSIX_PO_INT; if (pkts_sent & LIO_CN23XX_INTR_PI_INT) /* We will clear the count when we update the read_index. */ ret |= LIO_MSIX_PI_INT; /* * Never need to handle msix mbox intr for pf. They arrive on the last * msix */ return (ret); }
int cn23xx_pfvf_handshake(struct lio_device *lio_dev) { struct lio_mbox_cmd mbox_cmd; struct lio_version *lio_ver = (struct lio_version *)&mbox_cmd.data[0]; uint32_t q_no, count = 0; rte_atomic64_t status; uint32_t pfmajor; uint32_t vfmajor; uint32_t ret; PMD_INIT_FUNC_TRACE(); /* Sending VF_ACTIVE indication to the PF driver */ lio_dev_dbg(lio_dev, "requesting info from PF\n"); mbox_cmd.msg.mbox_msg64 = 0; mbox_cmd.msg.s.type = LIO_MBOX_REQUEST; mbox_cmd.msg.s.resp_needed = 1; mbox_cmd.msg.s.cmd = LIO_VF_ACTIVE; mbox_cmd.msg.s.len = 2; mbox_cmd.data[0] = 0; lio_ver->major = LIO_BASE_MAJOR_VERSION; lio_ver->minor = LIO_BASE_MINOR_VERSION; lio_ver->micro = LIO_BASE_MICRO_VERSION; mbox_cmd.q_no = 0; mbox_cmd.recv_len = 0; mbox_cmd.recv_status = 0; mbox_cmd.fn = (lio_mbox_callback)cn23xx_pfvf_hs_callback; mbox_cmd.fn_arg = (void *)&status; if (lio_mbox_write(lio_dev, &mbox_cmd)) { lio_dev_err(lio_dev, "Write to mailbox failed\n"); return -1; } rte_atomic64_set(&status, 0); do { rte_delay_ms(1); } while ((rte_atomic64_read(&status) == 0) && (count++ < 10000)); ret = rte_atomic64_read(&status); if (ret == 0) { lio_dev_err(lio_dev, "cn23xx_pfvf_handshake timeout\n"); return -1; } for (q_no = 0; q_no < lio_dev->num_iqs; q_no++) lio_dev->instr_queue[q_no]->txpciq.s.pkind = lio_dev->pfvf_hsword.pkind; vfmajor = LIO_BASE_MAJOR_VERSION; pfmajor = ret >> 16; if (pfmajor != vfmajor) { lio_dev_err(lio_dev, "VF LiquidIO driver (major version %d) is not compatible with LiquidIO PF driver (major version %d)\n", vfmajor, pfmajor); ret = -EPERM; } else { lio_dev_dbg(lio_dev, "VF LiquidIO driver (major version %d), LiquidIO PF driver (major version %d)\n", vfmajor, pfmajor); ret = 0; } lio_dev_dbg(lio_dev, "got data from PF pkind is %d\n", lio_dev->pfvf_hsword.pkind); return ret; }
int lio_cn23xx_pf_setup_device(struct octeon_device *oct) { uint64_t BAR0, BAR1; uint32_t data32; data32 = lio_read_pci_cfg(oct, 0x10); BAR0 = (uint64_t)(data32 & ~0xf); data32 = lio_read_pci_cfg(oct, 0x14); BAR0 |= ((uint64_t)data32 << 32); data32 = lio_read_pci_cfg(oct, 0x18); BAR1 = (uint64_t)(data32 & ~0xf); data32 = lio_read_pci_cfg(oct, 0x1c); BAR1 |= ((uint64_t)data32 << 32); if (!BAR0 || !BAR1) { if (!BAR0) lio_dev_err(oct, "Device BAR0 unassigned\n"); if (!BAR1) lio_dev_err(oct, "Device BAR1 unassigned\n"); return (1); } if (lio_map_pci_barx(oct, 0)) return (1); if (lio_map_pci_barx(oct, 1)) { lio_dev_err(oct, "%s CN23XX BAR1 map failed\n", __func__); lio_unmap_pci_barx(oct, 0); return (1); } lio_cn23xx_pf_get_pf_num(oct); if (lio_cn23xx_pf_sriov_config(oct)) { lio_unmap_pci_barx(oct, 0); lio_unmap_pci_barx(oct, 1); return (1); } lio_write_csr64(oct, LIO_CN23XX_SLI_MAC_CREDIT_CNT, 0x3F802080802080ULL); oct->fn_list.setup_iq_regs = lio_cn23xx_pf_setup_iq_regs; oct->fn_list.setup_oq_regs = lio_cn23xx_pf_setup_oq_regs; oct->fn_list.process_interrupt_regs = lio_cn23xx_pf_interrupt_handler; oct->fn_list.msix_interrupt_handler = lio_cn23xx_pf_msix_interrupt_handler; oct->fn_list.soft_reset = lio_cn23xx_pf_soft_reset; oct->fn_list.setup_device_regs = lio_cn23xx_pf_setup_device_regs; oct->fn_list.update_iq_read_idx = lio_cn23xx_pf_update_read_index; oct->fn_list.bar1_idx_setup = lio_cn23xx_pf_bar1_idx_setup; oct->fn_list.bar1_idx_write = lio_cn23xx_pf_bar1_idx_write; oct->fn_list.bar1_idx_read = lio_cn23xx_pf_bar1_idx_read; oct->fn_list.enable_interrupt = lio_cn23xx_pf_enable_interrupt; oct->fn_list.disable_interrupt = lio_cn23xx_pf_disable_interrupt; oct->fn_list.enable_io_queues = lio_cn23xx_pf_enable_io_queues; oct->fn_list.disable_io_queues = lio_cn23xx_pf_disable_io_queues; lio_cn23xx_pf_setup_reg_address(oct); oct->coproc_clock_rate = 1000000ULL * lio_cn23xx_pf_coprocessor_clock(oct); return (0); }
static int lio_cn23xx_pf_enable_io_queues(struct octeon_device *oct) { uint64_t reg_val; uint32_t ern, loop = BUSY_READING_REG_PF_LOOP_COUNT; uint32_t q_no, srn; srn = oct->sriov_info.pf_srn; ern = srn + oct->num_iqs; for (q_no = srn; q_no < ern; q_no++) { /* set the corresponding IQ IS_64B bit */ if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) { reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); reg_val = reg_val | LIO_CN23XX_PKT_INPUT_CTL_IS_64B; lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val); } /* set the corresponding IQ ENB bit */ if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) { /* * IOQs are in reset by default in PEM2 mode, * clearing reset bit */ reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); if (reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) { while ((reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) && !(reg_val & LIO_CN23XX_PKT_INPUT_CTL_QUIET) && loop) { reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); loop--; } if (!loop) { lio_dev_err(oct, "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n", q_no); return (-1); } reg_val = reg_val & ~LIO_CN23XX_PKT_INPUT_CTL_RST; lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val); reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); if (reg_val & LIO_CN23XX_PKT_INPUT_CTL_RST) { lio_dev_err(oct, "clearing the reset failed for qno: %u\n", q_no); return (-1); } } reg_val = lio_read_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); reg_val = reg_val | LIO_CN23XX_PKT_INPUT_CTL_RING_ENB; lio_write_csr64(oct, LIO_CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val); } } for (q_no = srn; q_no < ern; q_no++) { uint32_t reg_val; /* set the corresponding OQ ENB bit */ if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) { reg_val = lio_read_csr32(oct, LIO_CN23XX_SLI_OQ_PKT_CONTROL(q_no)); reg_val = reg_val | LIO_CN23XX_PKT_OUTPUT_CTL_RING_ENB; lio_write_csr32(oct, LIO_CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val); } } return (0); }