static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr, u64 pci_addr, size_t size) { int ret; struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size); if (ret) { dev_err(pci->dev, "failed to enable address\n"); return ret; } return 0; }
static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, phys_addr_t addr) { int ret; u32 atu_index; struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); ret = dw_pcie_find_index(ep, addr, &atu_index); if (ret < 0) return; dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND); clear_bit(atu_index, ep->ob_window_map); }
static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, enum pci_epc_irq_type type, u8 interrupt_num) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); switch (type) { case PCI_EPC_IRQ_LEGACY: return cdns_pcie_ep_send_legacy_irq(ep, fn, 0); case PCI_EPC_IRQ_MSI: return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num); default: break; } return -EINVAL; }
static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; u16 flags; /* * Set the Multiple Message Capable bitfield into the Message Control * register. */ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1); flags |= PCI_MSI_FLAGS_64BIT; flags &= ~PCI_MSI_FLAGS_MASKBIT; cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags); return 0; }
static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; u32 r; for (r = 0; r < ep->max_regions - 1; r++) if (ep->ob_addr[r] == addr) break; if (r == ep->max_regions - 1) return; cdns_pcie_reset_outbound_region(pcie, r); ep->ob_addr[r] = 0; clear_bit(r, &ep->ob_region_map); }
static int dw_pcie_ep_get_msi(struct pci_epc *epc) { int val; u32 lower_addr; u32 upper_addr; struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); val = dw_pcie_readb_dbi(pci, MSI_MESSAGE_CONTROL); val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT; lower_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32); upper_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32); if (!(lower_addr || upper_addr)) return -EINVAL; return val; }
static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; u16 flags, mmc, mme; /* Validate that the MSI feature is actually enabled. */ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); if (!(flags & PCI_MSI_FLAGS_ENABLE)) return -EINVAL; /* * Get the Multiple Message Enable bitfield from the Message Control * register. */ mmc = (flags & PCI_MSI_FLAGS_QMASK) >> 1; mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; return mme; }
static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr, u64 pci_addr, size_t size) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; u32 r; r = find_first_zero_bit(&ep->ob_region_map, sizeof(ep->ob_region_map) * BITS_PER_LONG); if (r >= ep->max_regions - 1) { dev_err(&epc->dev, "no free outbound region\n"); return -EINVAL; } cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size); set_bit(r, &ep->ob_region_map); ep->ob_addr[r] = addr; return 0; }
static int dw_pcie_ep_write_header(struct pci_epc *epc, struct pci_epf_header *hdr) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid); dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid); dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid); dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code); dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, hdr->subclass_code | hdr->baseclass_code << 8); dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE, hdr->cache_line_size); dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID, hdr->subsys_vendor_id); dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id); dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN, hdr->interrupt_pin); return 0; }
static int dw_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar, dma_addr_t bar_phys, size_t size, int flags) { int ret; struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); enum dw_pcie_as_type as_type; u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); if (!(flags & PCI_BASE_ADDRESS_SPACE)) as_type = DW_PCIE_AS_MEM; else as_type = DW_PCIE_AS_IO; ret = dw_pcie_ep_inbound_atu(ep, bar, bar_phys, as_type); if (ret) return ret; dw_pcie_writel_dbi2(pci, reg, size - 1); dw_pcie_writel_dbi(pci, reg, flags); return 0; }
static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, struct pci_epf_bar *epf_bar) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; dma_addr_t bar_phys = epf_bar->phys_addr; enum pci_barno bar = epf_bar->barno; int flags = epf_bar->flags; u32 addr0, addr1, reg, cfg, b, aperture, ctrl; u64 sz; /* BAR size is 2^(aperture + 7) */ sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE); /* * roundup_pow_of_two() returns an unsigned long, which is not suited * for 64bit values. */ sz = 1ULL << fls64(sz - 1); aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS; } else { bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); bool is_64bits = sz > SZ_2G; if (is_64bits && (bar & 1)) return -EINVAL; if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; if (is_64bits && is_prefetch) ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; else if (is_prefetch) ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; else if (is_64bits) ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS; else ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS; } addr0 = lower_32_bits(bar_phys); addr1 = upper_32_bits(bar_phys); cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), addr0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), addr1); if (bar < BAR_4) { reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn); b = bar; } else { reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn); b = bar - BAR_4; } cfg = cdns_pcie_readl(pcie, reg); cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); cdns_pcie_writel(pcie, reg, cfg); return 0; }