/** * @brief Test if the device is ready * * @param device[IN ] device context * * @return MHI_STATUS */ MHI_STATUS mhi_test_for_device_ready(mhi_device_ctxt *mhi_dev_ctxt) { u32 pcie_word_val = 0; u32 expiry_counter; mhi_log(MHI_MSG_INFO, "Waiting for MMIO Ready bit to be set\n"); /* Read MMIO and poll for READY bit to be set */ pcie_read(mhi_dev_ctxt->mmio_addr, MHISTATUS, pcie_word_val); MHI_READ_FIELD(pcie_word_val, MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT); if (0xFFFFFFFF == pcie_word_val) return MHI_STATUS_LINK_DOWN; expiry_counter = 0; while (MHI_STATE_READY != pcie_word_val && expiry_counter < 30) { expiry_counter++; mhi_log(MHI_MSG_ERROR, "Device is not ready, sleeping and retrying.\n"); msleep(MHI_READY_STATUS_TIMEOUT_MS); pcie_read(mhi_dev_ctxt->mmio_addr, MHISTATUS, pcie_word_val); MHI_READ_FIELD(pcie_word_val, MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT); } if (MHI_STATE_READY != pcie_word_val) return MHI_STATUS_DEVICE_NOT_READY; return MHI_STATUS_SUCCESS; }
int wmxssd_pcie_read(char *buf, int size, unsigned int *lba) { struct ssd_header headerh; struct ssd_header *header = &headerh; int pci_size; /* pci_size = size; int i; //create the command header // pci_size = size; // if(pci_size % 8) // pci_size += (8 - size %8); // if(pci_size % 8) // printk("Will is Retarted \r\n"); //size */ /*if((size%8) != 0) { pci_size += (8-(size%8)); // next largest modulo-of-8 size } if((size/4) <= MAX_TLP_SIZE){ } else { pci_size += (128-(size%128)); }*/ if(size>0x2000) printk("WHAT???! ERROR - size %d\n", size); else if(size>0x1000) { wmxssd_pcie_read(buf, 0x1000, lba); wmxssd_pcie_read(buf+0x1000, size-0x1000, (unsigned int *)((char *)lba+0x1000)); } else { pci_size = 0x1000; header->lba = htonl((int)lba); header->command = htonl(SSD_HEADER_READ); header->size = htonl(size); //printk("--sending read command: %d bytes to addr %x\n", ntohl(header->size), ntohl(header->lba)); //send the read command to the PCIE device pcie_write((char *)header, sizeof(struct ssd_header)); //read the response(data) back pcie_read(temp_buf, pci_size); memcpy(buf, temp_buf, size); //for(i=0; i<size; i++) buf[i] = temp_buf[i]; //memcpy( buf, (void *)(((unsigned int )bram_disk | (unsigned int )header->lba)), header->size); } return 0; }
MHI_STATUS mhi_wait_for_mdm(mhi_device_ctxt *mhi_dev_ctxt) { u32 j = 0; u32 val; pcie_read(mhi_dev_ctxt->mmio_addr, MHIREGLEN, val); while (0xFFFFffff == val && j <= MHI_MAX_LINK_RETRIES) { mhi_log(MHI_MSG_CRITICAL, "Could not access MDM retry %d\n", j); msleep(MHI_LINK_STABILITY_WAIT_MS); if (MHI_MAX_LINK_RETRIES == j) { mhi_log(MHI_MSG_CRITICAL, "Could not access MDM, FAILING!\n"); return MHI_STATUS_ERROR; } j++; pcie_read(mhi_dev_ctxt->mmio_addr, MHIREGLEN, val); } return MHI_STATUS_SUCCESS; }
/** * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts * @port: PCIe port information */ static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port) { struct device *dev = port->dev; unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR); if (val & XILINX_PCIE_RPEFR_ERR_VALID) { dev_dbg(dev, "Requester ID %lu\n", val & XILINX_PCIE_RPEFR_REQ_ID); pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK, XILINX_PCIE_REG_RPEFR); } }
/** * xilinx_pcie_intr_handler - Interrupt Service Handler * @irq: IRQ number * @data: PCIe port information * * Return: IRQ_HANDLED on success and IRQ_NONE on failure */ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) { struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data; struct device *dev = port->dev; u32 val, mask, status; /* Read interrupt decode and mask registers */ val = pcie_read(port, XILINX_PCIE_REG_IDR); mask = pcie_read(port, XILINX_PCIE_REG_IMR); status = val & mask; if (!status) return IRQ_NONE; if (status & XILINX_PCIE_INTR_LINK_DOWN) dev_warn(dev, "Link Down\n"); if (status & XILINX_PCIE_INTR_ECRC_ERR) dev_warn(dev, "ECRC failed\n"); if (status & XILINX_PCIE_INTR_STR_ERR) dev_warn(dev, "Streaming error\n"); if (status & XILINX_PCIE_INTR_HOT_RESET) dev_info(dev, "Hot reset\n"); if (status & XILINX_PCIE_INTR_CFG_TIMEOUT) dev_warn(dev, "ECAM access timeout\n"); if (status & XILINX_PCIE_INTR_CORRECTABLE) { dev_warn(dev, "Correctable error message\n"); xilinx_pcie_clear_err_interrupts(port); } if (status & XILINX_PCIE_INTR_NONFATAL) { dev_warn(dev, "Non fatal error message\n"); xilinx_pcie_clear_err_interrupts(port); } if (status & XILINX_PCIE_INTR_FATAL) { dev_warn(dev, "Fatal error message\n"); xilinx_pcie_clear_err_interrupts(port); } if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) { val = pcie_read(port, XILINX_PCIE_REG_RPIFR1); /* Check whether interrupt valid */ if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) { dev_warn(dev, "RP Intr FIFO1 read error\n"); goto error; } /* Decode the IRQ number */ if (val & XILINX_PCIE_RPIFR1_MSI_INTR) { val = pcie_read(port, XILINX_PCIE_REG_RPIFR2) & XILINX_PCIE_RPIFR2_MSG_DATA; } else { val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >> XILINX_PCIE_RPIFR1_INTR_SHIFT; val = irq_find_mapping(port->leg_domain, val); } /* Clear interrupt FIFO register 1 */ pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK, XILINX_PCIE_REG_RPIFR1); /* Handle the interrupt */ if (IS_ENABLED(CONFIG_PCI_MSI) || !(val & XILINX_PCIE_RPIFR1_MSI_INTR)) generic_handle_irq(val); }
static inline bool xilinx_pcie_link_up(struct xilinx_pcie_port *port) { return (pcie_read(port, XILINX_PCIE_REG_PSCR) & XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0; }
MHI_STATUS mhi_init_mmio(mhi_device_ctxt *mhi_dev_ctxt) { u64 pcie_bar0_addr = 0; u64 pcie_dword_val = 0; u32 pcie_word_val = 0; u32 i = 0; MHI_STATUS ret_val; mhi_log(MHI_MSG_INFO, "~~~ Initializing MMIO ~~~\n"); pcie_bar0_addr = mhi_dev_ctxt->dev_props->bar0_base; mhi_dev_ctxt->mmio_addr = pcie_bar0_addr; mhi_log(MHI_MSG_INFO, "Bar 0 address is at: 0x%lX\n", (mhi_dev_ctxt->mmio_addr)); pcie_read(mhi_dev_ctxt->mmio_addr, MHIREGLEN, mhi_dev_ctxt->mmio_len); if (0 == mhi_dev_ctxt->mmio_len) { mhi_log(MHI_MSG_ERROR, "Received mmio length as zero\n"); return MHI_STATUS_ERROR; } mhi_log(MHI_MSG_INFO, "Testing MHI Ver\n"); pcie_read(mhi_dev_ctxt->mmio_addr, MHIVER, mhi_dev_ctxt->dev_props->mhi_ver); if (MHI_VERSION != mhi_dev_ctxt->dev_props->mhi_ver) { mhi_log(MHI_MSG_CRITICAL, "Bad MMIO version, 0x%x\n", mhi_dev_ctxt->dev_props->mhi_ver); if (0xFFFFffff == mhi_dev_ctxt->dev_props->mhi_ver) ret_val = mhi_wait_for_mdm(mhi_dev_ctxt); if (ret_val) return MHI_STATUS_ERROR; } /* Enable the channels */ for (i = 0; i < MHI_MAX_CHANNELS; ++i) { mhi_chan_ctxt *chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i]; if (VALID_CHAN_NR(i)) chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED; else chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_DISABLED; } mhi_log(MHI_MSG_INFO, "Read back MMIO Ready bit successfully. Moving on..\n"); mhi_log(MHI_MSG_INFO, "Reading channel doorbell offset\n"); MHI_REG_READ_FIELD(mhi_dev_ctxt->mmio_addr, CHDBOFF, CHDBOFF_CHDBOFF_MASK, CHDBOFF_CHDBOFF_SHIFT, mhi_dev_ctxt->channel_db_addr); mhi_log(MHI_MSG_INFO, "Reading event doorbell offset\n"); MHI_REG_READ_FIELD(mhi_dev_ctxt->mmio_addr, ERDBOFF, ERDBOFF_ERDBOFF_MASK, ERDBOFF_ERDBOFF_SHIFT, mhi_dev_ctxt->event_db_addr); mhi_dev_ctxt->channel_db_addr += mhi_dev_ctxt->mmio_addr; mhi_dev_ctxt->event_db_addr += mhi_dev_ctxt->mmio_addr; mhi_log(MHI_MSG_INFO, "Setting all MMIO values.\n"); pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list); pcie_word_val = HIGH_WORD(pcie_dword_val); MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, CCABAP_HIGHER, CCABAP_HIGHER_CCABAP_HIGHER_MASK, CCABAP_HIGHER_CCABAP_HIGHER_SHIFT, pcie_word_val); pcie_word_val = LOW_WORD(pcie_dword_val); MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, CCABAP_LOWER, CCABAP_LOWER_CCABAP_LOWER_MASK, CCABAP_LOWER_CCABAP_LOWER_SHIFT, pcie_word_val); /* Write the Event Context Base Address Register High and Low parts */ pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list); pcie_word_val = HIGH_WORD(pcie_dword_val); MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, ECABAP_HIGHER, ECABAP_HIGHER_ECABAP_HIGHER_MASK, ECABAP_HIGHER_ECABAP_HIGHER_SHIFT, pcie_word_val); pcie_word_val = LOW_WORD(pcie_dword_val); MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, ECABAP_LOWER, ECABAP_LOWER_ECABAP_LOWER_MASK, ECABAP_LOWER_ECABAP_LOWER_SHIFT, pcie_word_val); /* Write the Command Ring Control Register High and Low parts */ pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list); pcie_word_val = HIGH_WORD(pcie_dword_val); MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, CRCBAP_HIGHER, CRCBAP_HIGHER_CRCBAP_HIGHER_MASK, CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT, pcie_word_val); pcie_word_val = LOW_WORD(pcie_dword_val); MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, CRCBAP_LOWER, CRCBAP_LOWER_CRCBAP_LOWER_MASK, CRCBAP_LOWER_CRCBAP_LOWER_SHIFT, pcie_word_val); mhi_dev_ctxt->cmd_db_addr = mhi_dev_ctxt->mmio_addr + CRDB_LOWER; /* Set the control segment in the MMIO */ pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg); pcie_word_val = HIGH_WORD(pcie_dword_val); MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHICTRLBASE_HIGHER, MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK, MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT, pcie_word_val); pcie_word_val = LOW_WORD(pcie_dword_val); MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHICTRLBASE_LOWER, MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK, MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT, pcie_word_val); pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg) + mhi_get_memregion_len(mhi_dev_ctxt->mhi_ctrl_seg_info) - 1; pcie_word_val = HIGH_WORD(pcie_dword_val); MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHICTRLLIMIT_HIGHER, MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK, MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT, pcie_word_val); pcie_word_val = LOW_WORD(pcie_dword_val); MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHICTRLLIMIT_LOWER, MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK, MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT, pcie_word_val); /* Set the data segment in the MMIO */ pcie_dword_val = MHI_DATA_SEG_WINDOW_START_ADDR; pcie_word_val = HIGH_WORD(pcie_dword_val); MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHIDATABASE_HIGHER, MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK, MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT, pcie_word_val); pcie_word_val = LOW_WORD(pcie_dword_val); MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHIDATABASE_LOWER, MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK, MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT, pcie_word_val); pcie_dword_val = MHI_DATA_SEG_WINDOW_END_ADDR; pcie_word_val = HIGH_WORD(pcie_dword_val); MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHIDATALIMIT_HIGHER, MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK, MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT, (pcie_word_val)); pcie_word_val = LOW_WORD(pcie_dword_val); MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHIDATALIMIT_LOWER, MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK, MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT, (pcie_word_val)); mhi_log(MHI_MSG_INFO, "Done..\n"); return MHI_STATUS_SUCCESS; }